From 053cd9a55b3305b4eb4553f0c4ec44ed5334eaef Mon Sep 17 00:00:00 2001 From: Marc Vef Date: Fri, 7 Feb 2020 16:12:31 +0100 Subject: [PATCH 01/25] Code Maintenance: Configurations, definitions, adafs to gkfs Restructuring code w.r.t. configurations and definitions: - #defines have been mostly removed from configurations - a dedicated config file has been added for configurations with constexpr - past configure file is now only a cmake wrapper - wrapping global functions into namespaces Removed all adafs and ifs occurrences. Now called gkfs --- CHANGELOG.md | 2 +- CMakeLists.txt | 4 +- include/client/adafs_functions.hpp | 82 --------- include/client/env.hpp | 2 +- include/client/gkfs_functions.hpp | 89 ++++++++++ include/client/hooks.hpp | 10 +- include/client/intcp_functions.hpp | 6 +- include/client/intercept.hpp | 6 +- include/client/open_dir.hpp | 6 +- include/client/open_file_map.hpp | 6 +- include/client/preload_context.hpp | 6 +- include/client/preload_util.hpp | 26 +-- include/client/rpc/hg_rpcs.hpp | 28 +-- include/client/rpc/ld_rpc_data_ws.hpp | 19 ++- include/client/rpc/ld_rpc_management.hpp | 8 +- include/client/rpc/ld_rpc_metadentry.hpp | 8 +- include/config.hpp | 72 ++++++++ include/daemon/backend/data/chunk_storage.hpp | 6 +- include/daemon/backend/exceptions.hpp | 14 +- include/daemon/backend/metadata/db.hpp | 10 +- include/daemon/env.hpp | 2 +- include/daemon/main.hpp | 4 +- include/daemon/ops/metadentry.hpp | 6 +- include/daemon/util.hpp | 28 +++ include/global/chunk_calc_util.hpp | 6 +- include/global/cmake_configure.hpp.in | 22 +++ include/global/configure.hpp.in | 76 --------- include/global/global_defs.hpp | 60 ++++--- include/global/log_util.hpp | 8 +- include/global/metadata.hpp | 2 +- include/global/path_util.hpp | 6 +- include/global/rpc/distributor.hpp | 6 +- include/global/rpc/rpc_utils.hpp | 7 +- src/client/CMakeLists.txt | 45 ++--- ...adafs_functions.cpp => gkfs_functions.cpp} | 161 +++++++++--------- src/client/hooks.cpp | 52 +++--- src/client/open_file_map.cpp | 26 +-- src/client/preload.cpp | 5 +- src/client/preload_context.cpp | 20 ++- src/client/preload_util.cpp | 21 +-- src/client/resolve.cpp | 1 - src/client/rpc/ld_rpc_data_ws.cpp | 134 +++++++-------- src/client/rpc/ld_rpc_metadentry.cpp | 45 +++-- src/daemon/CMakeLists.txt | 5 +- src/daemon/backend/metadata/db.cpp | 11 +- src/daemon/handler/h_data.cpp | 134 ++++++++------- src/daemon/handler/h_metadentry.cpp | 109 ++++++------ src/daemon/handler/h_preload.cpp | 22 +-- src/daemon/main.cpp | 158 ++++++++--------- src/daemon/ops/metadentry.cpp | 22 +-- src/daemon/util.cpp | 40 +++++ src/global/CMakeLists.txt | 1 + src/global/metadata.cpp | 54 +++--- 53 files changed, 910 insertions(+), 799 deletions(-) delete mode 100644 include/client/adafs_functions.hpp create mode 100644 include/client/gkfs_functions.hpp create mode 100644 include/config.hpp create mode 100644 include/daemon/util.hpp create mode 100644 include/global/cmake_configure.hpp.in delete mode 100644 include/global/configure.hpp.in rename src/client/{adafs_functions.cpp => gkfs_functions.cpp} (78%) create mode 100644 src/daemon/util.cpp diff --git a/CHANGELOG.md b/CHANGELOG.md index 1f6e9b906..1090f98d5 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -86,7 +86,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - Intercept I/O syscalls instead of GlibC function using [syscall intercept library](https://github.com/pmem/syscall_intercept) ## [0.4.0] - 2019-04-18 -First GekkoFS pubblic release +First GekkoFS public release This version provides a client library that uses GLibC I/O function interception. diff --git a/CMakeLists.txt b/CMakeLists.txt index f76b08d1b..64f54933a 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -114,7 +114,7 @@ option(USE_SHM "Use shared memory for intra-node communication" OFF) message(STATUS "[gekkofs] Shared-memory communication: ${USE_SHM}") option(CREATE_CHECK_PARENTS "Check parent directory existance before creating child node" ON) -message(STATUS "Create checks parents: ${CREATE_CHECK_PARENTS}") +message(STATUS "[gekkofs] Create checks parents: ${CREATE_CHECK_PARENTS}") option(SYMLINK_SUPPORT "Compile with support for symlinks" ON) if(SYMLINK_SUPPORT) @@ -146,7 +146,7 @@ add_definitions(-DLIBGKFS_LOG_MESSAGE_SIZE=${CLIENT_LOG_MESSAGE_SIZE}) message(STATUS "[gekkofs] Maximum log message size in the client library: ${CLIENT_LOG_MESSAGE_SIZE}") mark_as_advanced(CLIENT_LOG_MESSAGE_SIZE) -configure_file(include/global/configure.hpp.in include/global/configure.hpp) +configure_file(include/global/cmake_configure.hpp.in include/global/cmake_configure.hpp) # Imported target add_library(RocksDB INTERFACE IMPORTED GLOBAL) diff --git a/include/client/adafs_functions.hpp b/include/client/adafs_functions.hpp deleted file mode 100644 index 0ebb2c3da..000000000 --- a/include/client/adafs_functions.hpp +++ /dev/null @@ -1,82 +0,0 @@ -/* - Copyright 2018-2019, Barcelona Supercomputing Center (BSC), Spain - Copyright 2015-2019, Johannes Gutenberg Universitaet Mainz, Germany - - This software was partially supported by the - EC H2020 funded project NEXTGenIO (Project ID: 671951, www.nextgenio.eu). - - This software was partially supported by the - ADA-FS project under the SPPEXA project funded by the DFG. - - SPDX-License-Identifier: MIT -*/ - -#ifndef IFS_ADAFS_FUNCTIONS_HPP -#define IFS_ADAFS_FUNCTIONS_HPP - -#include -#include - -std::shared_ptr adafs_metadata(const std::string& path, bool follow_links = false); - -int adafs_open(const std::string& path, mode_t mode, int flags); - -int check_parent_dir(const std::string& path); - -int adafs_mk_node(const std::string& path, mode_t mode); - -int check_parent_dir(const std::string& path); - -int adafs_rm_node(const std::string& path); - -int adafs_access(const std::string& path, int mask, bool follow_links = true); - -int adafs_stat(const std::string& path, struct stat* buf, bool follow_links = true); - -int adafs_statvfs(struct statvfs* buf); - -int adafs_statfs(struct statfs* buf); - -off64_t adafs_lseek(unsigned int fd, off64_t offset, unsigned int whence); - -off64_t adafs_lseek(std::shared_ptr adafs_fd, off64_t offset, unsigned int whence); - -int adafs_truncate(const std::string& path, off_t offset); - -int adafs_truncate(const std::string& path, off_t old_size, off_t new_size); - -int adafs_dup(int oldfd); - -int adafs_dup2(int oldfd, int newfd); - -#ifdef HAS_SYMLINKS -int adafs_mk_symlink(const std::string& path, const std::string& target_path); -int adafs_readlink(const std::string& path, char *buf, int bufsize); -#endif - - -ssize_t adafs_pwrite(std::shared_ptr file, - const char * buf, size_t count, off64_t offset); -ssize_t adafs_pwrite_ws(int fd, const void* buf, size_t count, off64_t offset); -ssize_t adafs_write(int fd, const void * buf, size_t count); -ssize_t adafs_pwritev(int fd, const struct iovec *iov, int iovcnt, off_t offset); -ssize_t adafs_writev(int fd, const struct iovec * iov, int iovcnt); - -ssize_t adafs_pread(std::shared_ptr file, char * buf, size_t count, off64_t offset); -ssize_t adafs_pread_ws(int fd, void* buf, size_t count, off64_t offset); -ssize_t adafs_read(int fd, void* buf, size_t count); - - -int adafs_opendir(const std::string& path); - -int getdents(unsigned int fd, - struct linux_dirent *dirp, - unsigned int count); - -int getdents64(unsigned int fd, - struct linux_dirent64 *dirp, - unsigned int count); - -int adafs_rmdir(const std::string& path); - -#endif //IFS_ADAFS_FUNCTIONS_HPP diff --git a/include/client/env.hpp b/include/client/env.hpp index 59e6b81a5..054596cf9 100644 --- a/include/client/env.hpp +++ b/include/client/env.hpp @@ -14,7 +14,7 @@ #ifndef GKFS_CLIENT_ENV #define GKFS_CLIENT_ENV -#include +#include #define ADD_PREFIX(str) CLIENT_ENV_PREFIX str diff --git a/include/client/gkfs_functions.hpp b/include/client/gkfs_functions.hpp new file mode 100644 index 000000000..7750e9e7d --- /dev/null +++ b/include/client/gkfs_functions.hpp @@ -0,0 +1,89 @@ +/* + Copyright 2018-2019, Barcelona Supercomputing Center (BSC), Spain + Copyright 2015-2019, Johannes Gutenberg Universitaet Mainz, Germany + + This software was partially supported by the + EC H2020 funded project NEXTGenIO (Project ID: 671951, www.nextgenio.eu). + + This software was partially supported by the + ADA-FS project under the SPPEXA project funded by the DFG. + + SPDX-License-Identifier: MIT +*/ + +#ifndef GEKKOFS_GKFS_FUNCTIONS_HPP +#define GEKKOFS_GKFS_FUNCTIONS_HPP + +#include +#include + +std::shared_ptr gkfs_metadata(const std::string& path, bool follow_links = false); + +int check_parent_dir(const std::string& path); + +int gkfs_open(const std::string& path, mode_t mode, int flags); + +int gkfs_mk_node(const std::string& path, mode_t mode); + +int gkfs_rm_node(const std::string& path); + +int gkfs_access(const std::string& path, int mask, bool follow_links = true); + +int gkfs_stat(const std::string& path, struct stat* buf, bool follow_links = true); + +int gkfs_statvfs(struct statvfs* buf); + +int gkfs_statfs(struct statfs* buf); + +off64_t gkfs_lseek(unsigned int fd, off64_t offset, unsigned int whence); + +off64_t gkfs_lseek(std::shared_ptr gkfs_fd, off64_t offset, unsigned int whence); + +int gkfs_truncate(const std::string& path, off_t offset); + +int gkfs_truncate(const std::string& path, off_t old_size, off_t new_size); + +int gkfs_dup(int oldfd); + +int gkfs_dup2(int oldfd, int newfd); + +#ifdef HAS_SYMLINKS + +int gkfs_mk_symlink(const std::string& path, const std::string& target_path); + +int gkfs_readlink(const std::string& path, char* buf, int bufsize); + +#endif + + +ssize_t gkfs_pwrite(std::shared_ptr file, + const char* buf, size_t count, off64_t offset); + +ssize_t gkfs_pwrite_ws(int fd, const void* buf, size_t count, off64_t offset); + +ssize_t gkfs_write(int fd, const void* buf, size_t count); + +ssize_t gkfs_pwritev(int fd, const struct iovec* iov, int iovcnt, off_t offset); + +ssize_t gkfs_writev(int fd, const struct iovec* iov, int iovcnt); + +ssize_t gkfs_pread(std::shared_ptr file, char* buf, size_t count, off64_t offset); + +ssize_t gkfs_pread_ws(int fd, void* buf, size_t count, off64_t offset); + +ssize_t gkfs_read(int fd, void* buf, size_t count); + + +int gkfs_opendir(const std::string& path); + +int getdents(unsigned int fd, + struct linux_dirent* dirp, + unsigned int count); + +int getdents64(unsigned int fd, + struct linux_dirent64* dirp, + unsigned int count); + +int gkfs_rmdir(const std::string& path); + +#endif //GEKKOFS_GKFS_FUNCTIONS_HPP diff --git a/include/client/hooks.hpp b/include/client/hooks.hpp index 159e67cb7..98057e3b5 100644 --- a/include/client/hooks.hpp +++ b/include/client/hooks.hpp @@ -11,8 +11,8 @@ SPDX-License-Identifier: MIT */ -#ifndef IFS_HOOKS_HPP -#define IFS_HOOKS_HPP +#ifndef GEKKOFS_HOOKS_HPP +#define GEKKOFS_HOOKS_HPP #include #include @@ -20,10 +20,14 @@ int hook_openat(int dirfd, const char *cpath, int flags, mode_t mode); int hook_close(int fd); + int hook_stat(const char* path, struct stat* buf); + int hook_lstat(const char* path, struct stat* buf); + int hook_fstat(unsigned int fd, struct stat* buf); -int hook_fstatat(int dirfd, const char * cpath, struct stat * buf, int flags); + +int hook_fstatat(int dirfd, const char* cpath, struct stat* buf, int flags); int hook_read(unsigned int fd, void* buf, size_t count); int hook_pread(unsigned int fd, char * buf, size_t count, loff_t pos); int hook_write(unsigned int fd, const char * buf, size_t count); diff --git a/include/client/intcp_functions.hpp b/include/client/intcp_functions.hpp index 83d74ef92..22f850ff0 100644 --- a/include/client/intcp_functions.hpp +++ b/include/client/intcp_functions.hpp @@ -11,8 +11,8 @@ SPDX-License-Identifier: MIT */ -#ifndef IFS_INTCP_FUNCTIONS_HPP -#define IFS_INTCP_FUNCTIONS_HPP +#ifndef GEKKOFS_INTCP_FUNCTIONS_HPP +#define GEKKOFS_INTCP_FUNCTIONS_HPP #include @@ -75,6 +75,6 @@ strong_alias(intcp_statvfs, statvfs) int intcp_fstatvfs(int fd, struct statvfs *buf) noexcept; strong_alias(intcp_fstatvfs, fstatvfs) -#endif // IFS_INTCP_FUNCTIONS_HPP +#endif // GEKKOFS_INTCP_FUNCTIONS_HPP } // extern C diff --git a/include/client/intercept.hpp b/include/client/intercept.hpp index f3b590d92..d49d248d0 100644 --- a/include/client/intercept.hpp +++ b/include/client/intercept.hpp @@ -11,14 +11,14 @@ SPDX-License-Identifier: MIT */ -#ifndef IFS_INTERCEPT_HPP -#define IFS_INTERCEPT_HPP +#ifndef GEKKOFS_INTERCEPT_HPP +#define GEKKOFS_INTERCEPT_HPP int internal_hook_guard_wrapper(long syscall_number, long arg0, long arg1, long arg2, long arg3, long arg4, long arg5, - long *syscall_return_value); + long* syscall_return_value); int hook_guard_wrapper(long syscall_number, diff --git a/include/client/open_dir.hpp b/include/client/open_dir.hpp index 33b43fd50..583c122b9 100644 --- a/include/client/open_dir.hpp +++ b/include/client/open_dir.hpp @@ -12,8 +12,8 @@ */ -#ifndef IFS_OPEN_DIR_HPP -#define IFS_OPEN_DIR_HPP +#ifndef GEKKOFS_OPEN_DIR_HPP +#define GEKKOFS_OPEN_DIR_HPP #include #include @@ -46,4 +46,4 @@ class OpenDir: public OpenFile { }; -#endif //IFS_OPEN_DIR_HPP +#endif //GEKKOFS_OPEN_DIR_HPP diff --git a/include/client/open_file_map.hpp b/include/client/open_file_map.hpp index aeb188833..7d76b780f 100644 --- a/include/client/open_file_map.hpp +++ b/include/client/open_file_map.hpp @@ -12,8 +12,8 @@ */ -#ifndef IFS_OPEN_FILE_MAP_HPP -#define IFS_OPEN_FILE_MAP_HPP +#ifndef GEKKOFS_OPEN_FILE_MAP_HPP +#define GEKKOFS_OPEN_FILE_MAP_HPP #include #include @@ -115,4 +115,4 @@ public: }; -#endif //IFS_OPEN_FILE_MAP_HPP +#endif //GEKKOFS_OPEN_FILE_MAP_HPP diff --git a/include/client/preload_context.hpp b/include/client/preload_context.hpp index 3a4d288a3..cd9dcfeb5 100644 --- a/include/client/preload_context.hpp +++ b/include/client/preload_context.hpp @@ -11,8 +11,8 @@ SPDX-License-Identifier: MIT */ -#ifndef IFS_PRELOAD_CTX_HPP -#define IFS_PRELOAD_CTX_HPP +#ifndef GEKKOFS_PRELOAD_CTX_HPP +#define GEKKOFS_PRELOAD_CTX_HPP #include #include @@ -130,5 +130,5 @@ class PreloadContext { }; -#endif //IFS_PRELOAD_CTX_HPP +#endif //GEKKOFS_PRELOAD_CTX_HPP diff --git a/include/client/preload_util.hpp b/include/client/preload_util.hpp index 4879aec76..bb7b8b973 100644 --- a/include/client/preload_util.hpp +++ b/include/client/preload_util.hpp @@ -12,15 +12,16 @@ */ -#ifndef IFS_PRELOAD_UTIL_HPP -#define IFS_PRELOAD_UTIL_HPP +#ifndef GEKKOFS_PRELOAD_UTIL_HPP +#define GEKKOFS_PRELOAD_UTIL_HPP #include #include -// third party libs + #include #include #include +#include struct MetadentryUpdateFlags { bool atime = false; @@ -60,14 +61,19 @@ extern hg_id_t rpc_mk_symlink_id; #endif // function definitions +namespace gkfs { + namespace client { + template + constexpr typename std::underlying_type::type to_underlying(E e) { + return static_cast::type>(e); + } -int metadata_to_stat(const std::string& path, const Metadata& md, struct stat& attr); - -std::vector> load_hosts_file(const std::string& lfpath); + int metadata_to_stat(const std::string& path, const Metadata& md, struct stat& attr); -hg_addr_t get_local_addr(); + std::vector> load_hostfile(const std::string& lfpath); -void load_hosts(); -bool lookup_all_hosts(); + void load_hosts(); + } +} -#endif //IFS_PRELOAD_UTIL_HPP +#endif //GEKKOFS_PRELOAD_UTIL_HPP diff --git a/include/client/rpc/hg_rpcs.hpp b/include/client/rpc/hg_rpcs.hpp index 240a82433..00465cecb 100644 --- a/include/client/rpc/hg_rpcs.hpp +++ b/include/client/rpc/hg_rpcs.hpp @@ -77,7 +77,7 @@ struct fs_config { constexpr static const hg_id_t mercury_id = public_id; // RPC name - constexpr static const auto name = hg_tag::fs_config; + constexpr static const auto name = gkfs::hg_tag::fs_config; // requires response? constexpr static const auto requires_response = true; @@ -256,7 +256,7 @@ struct create { constexpr static const hg_id_t mercury_id = public_id; // RPC name - constexpr static const auto name = hg_tag::create; + constexpr static const auto name = gkfs::hg_tag::create; // requires response? constexpr static const auto requires_response = true; @@ -367,7 +367,7 @@ struct stat { constexpr static const hg_id_t mercury_id = public_id; // RPC name - constexpr static const auto name = hg_tag::stat; + constexpr static const auto name = gkfs::hg_tag::stat; // requires response? constexpr static const auto requires_response = true; @@ -481,7 +481,7 @@ struct remove { constexpr static const hg_id_t mercury_id = public_id; // RPC name - constexpr static const auto name = hg_tag::remove; + constexpr static const auto name = gkfs::hg_tag::remove; // requires response? constexpr static const auto requires_response = true; @@ -583,7 +583,7 @@ struct decr_size { constexpr static const hg_id_t mercury_id = public_id; // RPC name - constexpr static const auto name = hg_tag::decr_size; + constexpr static const auto name = gkfs::hg_tag::decr_size; // requires response? constexpr static const auto requires_response = true; @@ -693,7 +693,7 @@ struct update_metadentry { constexpr static const hg_id_t mercury_id = public_id; // RPC name - constexpr static const auto name = hg_tag::update_metadentry; + constexpr static const auto name = gkfs::hg_tag::update_metadentry; // requires response? constexpr static const auto requires_response = true; @@ -955,7 +955,7 @@ struct get_metadentry_size { constexpr static const hg_id_t mercury_id = public_id; // RPC name - constexpr static const auto name = hg_tag::get_metadentry_size; + constexpr static const auto name = gkfs::hg_tag::get_metadentry_size; // requires response? constexpr static const auto requires_response = true; @@ -1066,7 +1066,7 @@ struct update_metadentry_size { constexpr static const hg_id_t mercury_id = public_id; // RPC name - constexpr static const auto name = hg_tag::update_metadentry_size; + constexpr static const auto name = gkfs::hg_tag::update_metadentry_size; // requires response? constexpr static const auto requires_response = true; @@ -1206,7 +1206,7 @@ struct mk_symlink { constexpr static const hg_id_t mercury_id = public_id; // RPC name - constexpr static const auto name = hg_tag::mk_symlink; + constexpr static const auto name = gkfs::hg_tag::mk_symlink; // requires response? constexpr static const auto requires_response = true; @@ -1319,7 +1319,7 @@ struct write_data { constexpr static const hg_id_t mercury_id = public_id; // RPC name - constexpr static const auto name = hg_tag::write_data; + constexpr static const auto name = gkfs::hg_tag::write_data; // requires response? constexpr static const auto requires_response = true; @@ -1512,7 +1512,7 @@ struct read_data { constexpr static const hg_id_t mercury_id = public_id; // RPC name - constexpr static const auto name = hg_tag::read_data; + constexpr static const auto name = gkfs::hg_tag::read_data; // requires response? constexpr static const auto requires_response = true; @@ -1705,7 +1705,7 @@ struct trunc_data { constexpr static const hg_id_t mercury_id = public_id; // RPC name - constexpr static const auto name = hg_tag::trunc_data; + constexpr static const auto name = gkfs::hg_tag::trunc_data; // requires response? constexpr static const auto requires_response = true; @@ -1819,7 +1819,7 @@ struct get_dirents { constexpr static const hg_id_t mercury_id = public_id; // RPC name - constexpr static const auto name = hg_tag::get_dirents; + constexpr static const auto name = gkfs::hg_tag::get_dirents; // requires response? constexpr static const auto requires_response = true; @@ -1942,7 +1942,7 @@ struct chunk_stat { constexpr static const hg_id_t mercury_id = public_id; // RPC name - constexpr static const auto name = hg_tag::chunk_stat; + constexpr static const auto name = gkfs::hg_tag::chunk_stat; // requires response? constexpr static const auto requires_response = true; diff --git a/include/client/rpc/ld_rpc_data_ws.hpp b/include/client/rpc/ld_rpc_data_ws.hpp index 317124691..74da98849 100644 --- a/include/client/rpc/ld_rpc_data_ws.hpp +++ b/include/client/rpc/ld_rpc_data_ws.hpp @@ -12,18 +12,19 @@ */ -#ifndef IFS_PRELOAD_C_DATA_WS_HPP -#define IFS_PRELOAD_C_DATA_WS_HPP +#ifndef GEKKOFS_PRELOAD_C_DATA_WS_HPP +#define GEKKOFS_PRELOAD_C_DATA_WS_HPP namespace rpc_send { -ssize_t write(const std::string& path, const void* buf, const bool append_flag, const off64_t in_offset, - const size_t write_size, const int64_t updated_metadentry_size); -struct ChunkStat { - unsigned long chunk_size; - unsigned long chunk_total; - unsigned long chunk_free; + ssize_t write(const std::string& path, const void* buf, const bool append_flag, const off64_t in_offset, + const size_t write_size, const int64_t updated_metadentry_size); + + struct ChunkStat { + unsigned long chunk_size; + unsigned long chunk_total; + unsigned long chunk_free; }; ssize_t read(const std::string& path, void* buf, const off64_t offset, const size_t read_size); @@ -35,4 +36,4 @@ ChunkStat chunk_stat(); } -#endif //IFS_PRELOAD_C_DATA_WS_HPP +#endif //GEKKOFS_PRELOAD_C_DATA_WS_HPP diff --git a/include/client/rpc/ld_rpc_management.hpp b/include/client/rpc/ld_rpc_management.hpp index cd0e20606..f70063de6 100644 --- a/include/client/rpc/ld_rpc_management.hpp +++ b/include/client/rpc/ld_rpc_management.hpp @@ -12,15 +12,15 @@ */ -#ifndef IFS_MARGO_RPC_MANAGMENT_HPP -#define IFS_MARGO_RPC_MANAGMENT_HPP +#ifndef GEKKOFS_MARGO_RPC_MANAGMENT_HPP +#define GEKKOFS_MARGO_RPC_MANAGMENT_HPP namespace rpc_send { -bool get_fs_config(); + bool get_fs_config(); } // end namespace rpc_send -#endif //IFS_MARGO_RPC_NANAGMENT_HPP +#endif //GEKKOFS_MARGO_RPC_NANAGMENT_HPP diff --git a/include/client/rpc/ld_rpc_metadentry.hpp b/include/client/rpc/ld_rpc_metadentry.hpp index fe260a5ea..4c52de261 100644 --- a/include/client/rpc/ld_rpc_metadentry.hpp +++ b/include/client/rpc/ld_rpc_metadentry.hpp @@ -12,14 +12,16 @@ */ -#ifndef IFS_PRELOAD_C_METADENTRY_HPP -#define IFS_PRELOAD_C_METADENTRY_HPP +#ifndef GEKKOFS_PRELOAD_C_METADENTRY_HPP +#define GEKKOFS_PRELOAD_C_METADENTRY_HPP #include /* Forward declaration */ struct MetadentryUpdateFlags; + class OpenDir; + class Metadata; namespace rpc_send { @@ -49,4 +51,4 @@ int mk_symlink(const std::string& path, const std::string& target_path); } // end namespace rpc_send -#endif //IFS_PRELOAD_C_METADENTRY_HPP +#endif //GEKKOFS_PRELOAD_C_METADENTRY_HPP diff --git a/include/config.hpp b/include/config.hpp new file mode 100644 index 000000000..cee9d19a6 --- /dev/null +++ b/include/config.hpp @@ -0,0 +1,72 @@ +/* + Copyright 2018-2019, Barcelona Supercomputing Center (BSC), Spain + Copyright 2015-2019, Johannes Gutenberg Universitaet Mainz, Germany + + This software was partially supported by the + EC H2020 funded project NEXTGenIO (Project ID: 671951, www.nextgenio.eu). + + This software was partially supported by the + ADA-FS project under the SPPEXA project funded by the DFG. + + SPDX-License-Identifier: MIT +*/ + +#ifndef GEKKOFS_CONFIG_HPP +#define GEKKOFS_CONFIG_HPP + +#include + +// environment prefixes (are concatenated in env module at compile time) +#define CLIENT_ENV_PREFIX "LIBGKFS_" +#define DAEMON_ENV_PREFIX "GKFS_" + +namespace gkfs_config { + + constexpr auto hostfile_path = "./gkfs_hosts.txt"; + + namespace io { + /* + * Zero buffer before read. This is relevant if sparse files are used. + * If buffer is not zeroed, sparse regions contain invalid data. + */ + constexpr auto zero_buffer_before_read = false; + } + + namespace logging { + constexpr auto client_log_path = "/tmp/gkfs_client.log"; + constexpr auto daemon_log_path = "/tmp/gkfs_daemon.log"; + + constexpr auto client_log_level = "info,errors,critical,mercury"; + constexpr auto daemon_log_level = 4; //info + } + + namespace metadata { + + // which metadata should be considered apart from size and mode + constexpr auto use_atime = false; + constexpr auto use_ctime = false; + constexpr auto use_mtime = false; + constexpr auto use_link_cnt = false; + constexpr auto use_blocks = false; + } + + namespace rpc { + constexpr auto chunksize = 524288; // in bytes (e.g., 524288 == 512KB) + //size of preallocated buffer to hold directory entries in rpc call + constexpr auto dirents_buff_size = (8 * 1024 * 1024); // 8 mega + /* + * Indicates the number of concurrent progress to drive I/O operations of chunk files to and from local file systems + * The value is directly mapped to created Argobots xstreams, controlled in a single pool with ABT_snoozer scheduler + */ + constexpr auto daemon_io_xstreams = 8; + // Number of threads used for RPC handlers at the daemon + constexpr auto daemon_handler_xstreams = 8; + } + + namespace rocksdb { + // Write-ahead logging of rocksdb + constexpr auto use_write_ahead_log = false; + } +} + +#endif //GEKKOFS_CONFIG_HPP diff --git a/include/daemon/backend/data/chunk_storage.hpp b/include/daemon/backend/data/chunk_storage.hpp index 3533aaf0d..9008090c1 100644 --- a/include/daemon/backend/data/chunk_storage.hpp +++ b/include/daemon/backend/data/chunk_storage.hpp @@ -11,8 +11,8 @@ SPDX-License-Identifier: MIT */ -#ifndef IFS_CHUNK_STORAGE_HPP -#define IFS_CHUNK_STORAGE_HPP +#ifndef GEKKOFS_CHUNK_STORAGE_HPP +#define GEKKOFS_CHUNK_STORAGE_HPP #include #include @@ -60,4 +60,4 @@ class ChunkStorage { ChunkStat chunk_stat() const; }; -#endif //IFS_CHUNK_STORAGE_HPP +#endif //GEKKOFS_CHUNK_STORAGE_HPP diff --git a/include/daemon/backend/exceptions.hpp b/include/daemon/backend/exceptions.hpp index 01151dda5..f0319dcce 100644 --- a/include/daemon/backend/exceptions.hpp +++ b/include/daemon/backend/exceptions.hpp @@ -11,20 +11,20 @@ SPDX-License-Identifier: MIT */ -#ifndef IFS_DB_EXCEPTIONS_HPP -#define IFS_DB_EXCEPTIONS_HPP +#ifndef GEKKOFS_DB_EXCEPTIONS_HPP +#define GEKKOFS_DB_EXCEPTIONS_HPP #include #include -class DBException: public std::runtime_error { - public: - DBException(const std::string & s) : std::runtime_error(s) {}; +class DBException : public std::runtime_error { +public: + DBException(const std::string& s) : std::runtime_error(s) {}; }; -class NotFoundException: public DBException { +class NotFoundException : public DBException { public: NotFoundException(const std::string & s) : DBException(s) {}; }; -#endif //IFS_DB_EXCEPTIONS_HPP +#endif //GEKKOFS_DB_EXCEPTIONS_HPP diff --git a/include/daemon/backend/metadata/db.hpp b/include/daemon/backend/metadata/db.hpp index 6d66a3b5d..8690ec56d 100644 --- a/include/daemon/backend/metadata/db.hpp +++ b/include/daemon/backend/metadata/db.hpp @@ -11,8 +11,8 @@ SPDX-License-Identifier: MIT */ -#ifndef IFS_METADATA_DB_HPP -#define IFS_METADATA_DB_HPP +#ifndef GEKKOFS_METADATA_DB_HPP +#define GEKKOFS_METADATA_DB_HPP #include #include "rocksdb/db.h" @@ -21,8 +21,8 @@ namespace rdb = rocksdb; class MetadataDB { - private: - std::unique_ptr db; +private: + std::unique_ptr db; rdb::Options options; rdb::WriteOptions write_opts; std::string path; @@ -44,4 +44,4 @@ class MetadataDB { void iterate_all(); }; -#endif //IFS_METADATA_DB_HPP +#endif //GEKKOFS_METADATA_DB_HPP diff --git a/include/daemon/env.hpp b/include/daemon/env.hpp index cb75bf547..e1f486203 100644 --- a/include/daemon/env.hpp +++ b/include/daemon/env.hpp @@ -14,7 +14,7 @@ #ifndef GKFS_DAEMON_ENV #define GKFS_DAEMON_ENV -#include +#include #define ADD_PREFIX(str) DAEMON_ENV_PREFIX str diff --git a/include/daemon/main.hpp b/include/daemon/main.hpp index f284a5f28..19f8ca206 100644 --- a/include/daemon/main.hpp +++ b/include/daemon/main.hpp @@ -18,7 +18,7 @@ #include #include -#include +#include #include // margo extern "C" { @@ -29,7 +29,7 @@ extern "C" { #include #include -#define ADAFS_DATA (static_cast(FsData::getInstance())) +#define GKFS_DATA (static_cast(FsData::getInstance())) #define RPC_DATA (static_cast(RPCData::getInstance())) void init_environment(); diff --git a/include/daemon/ops/metadentry.hpp b/include/daemon/ops/metadentry.hpp index 39acc8f58..350713d52 100644 --- a/include/daemon/ops/metadentry.hpp +++ b/include/daemon/ops/metadentry.hpp @@ -12,8 +12,8 @@ */ -#ifndef IFS_METADENTRY_HPP -#define IFS_METADENTRY_HPP +#ifndef GEKKOFS_METADENTRY_HPP +#define GEKKOFS_METADENTRY_HPP #include #include @@ -36,4 +36,4 @@ void update_metadentry(const std::string& path, Metadata& md); std::vector> get_dirents(const std::string& dir); -#endif //IFS_METADENTRY_HPP +#endif //GEKKOFS_METADENTRY_HPP diff --git a/include/daemon/util.hpp b/include/daemon/util.hpp new file mode 100644 index 000000000..b12243da7 --- /dev/null +++ b/include/daemon/util.hpp @@ -0,0 +1,28 @@ +/* + Copyright 2018-2019, Barcelona Supercomputing Center (BSC), Spain + Copyright 2015-2019, Johannes Gutenberg Universitaet Mainz, Germany + + This software was partially supported by the + EC H2020 funded project NEXTGenIO (Project ID: 671951, www.nextgenio.eu). + + This software was partially supported by the + ADA-FS project under the SPPEXA project funded by the DFG. + + SPDX-License-Identifier: MIT +*/ + +#ifndef GEKKOFS_DAEMON_UTIL_HPP +#define GEKKOFS_DAEMON_UTIL_HPP + +#include +#include + +namespace gkfs { + namespace util { + void populate_hosts_file(); + + void destroy_hosts_file(); + } +} + +#endif //GEKKOFS_DAEMON_UTIL_HPP diff --git a/include/global/chunk_calc_util.hpp b/include/global/chunk_calc_util.hpp index bade836fb..00b9df0f3 100644 --- a/include/global/chunk_calc_util.hpp +++ b/include/global/chunk_calc_util.hpp @@ -11,15 +11,15 @@ SPDX-License-Identifier: MIT */ -#ifndef IFS_CHNK_CALC_UTIL_HPP -#define IFS_CHNK_CALC_UTIL_HPP +#ifndef GEKKOFS_CHNK_CALC_UTIL_HPP +#define GEKKOFS_CHNK_CALC_UTIL_HPP #include /** * Compute the base2 logarithm for 64 bit integers */ -inline int log2(uint64_t n){ +inline int log2(uint64_t n) { /* see http://stackoverflow.com/questions/11376288/fast-computing-of-log2-for-64-bit-integers */ static const int table[64] = { diff --git a/include/global/cmake_configure.hpp.in b/include/global/cmake_configure.hpp.in new file mode 100644 index 000000000..480c90eef --- /dev/null +++ b/include/global/cmake_configure.hpp.in @@ -0,0 +1,22 @@ +/* + Copyright 2018-2019, Barcelona Supercomputing Center (BSC), Spain + Copyright 2015-2019, Johannes Gutenberg Universitaet Mainz, Germany + + This software was partially supported by the + EC H2020 funded project NEXTGenIO (Project ID: 671951, www.nextgenio.eu). + + This software was partially supported by the + ADA-FS project under the SPPEXA project funded by the DFG. + + SPDX-License-Identifier: MIT +*/ + +#ifndef GKFS_CMAKE_CONFIGURE_HPP +#define GKFS_CMAKE_CONFIGURE_HPP + +#define RPC_PROTOCOL "@RPC_PROTOCOL@" +#cmakedefine01 USE_SHM +#cmakedefine01 CREATE_CHECK_PARENTS +#cmakedefine01 LOG_SYSCALLS + +#endif //FS_CMAKE_CONFIGURE_H diff --git a/include/global/configure.hpp.in b/include/global/configure.hpp.in deleted file mode 100644 index d3e1f32fe..000000000 --- a/include/global/configure.hpp.in +++ /dev/null @@ -1,76 +0,0 @@ -/* - Copyright 2018-2019, Barcelona Supercomputing Center (BSC), Spain - Copyright 2015-2019, Johannes Gutenberg Universitaet Mainz, Germany - - This software was partially supported by the - EC H2020 funded project NEXTGenIO (Project ID: 671951, www.nextgenio.eu). - - This software was partially supported by the - ADA-FS project under the SPPEXA project funded by the DFG. - - SPDX-License-Identifier: MIT -*/ - -#ifndef GKFS_CONFIGURE_HPP -#define GKFS_CONFIGURE_HPP - - -#define RPC_PROTOCOL "@RPC_PROTOCOL@" -#cmakedefine01 USE_SHM -#cmakedefine01 CREATE_CHECK_PARENTS - -#define CHUNKSIZE 524288 // in bytes 512KB - -// What metadata is used -#define MDATA_USE_ATIME false -#define MDATA_USE_MTIME false -#define MDATA_USE_CTIME false -#define MDATA_USE_LINK_CNT false -#define MDATA_USE_BLOCKS false - -/* - * Zero buffer before read. This is relevant if sparse files are used. - * If buffer is not zeroed, sparse regions contain invalid data. - */ -//#define ZERO_BUFFER_BEFORE_READ - -// Write-ahead logging of rocksdb -#define KV_WOL false - -// Buffer size for Rocksdb. A high number means that all entries are held in memory. -// However, when full the application blocks until **all** entries are flushed to disk. -//#define KV_WRITE_BUFFER 16384 - -// Margo and Argobots configuration - -/* - * Indicates the number of concurrent progress to drive I/O operations of chunk files to and from local file systems - * The value is directly mapped to created Argobots xstreams, controlled in a single pool with ABT_snoozer scheduler - */ -#define DAEMON_IO_XSTREAMS 8 -// Number of threads used for RPC handlers at the daemon -#define DAEMON_RPC_HANDLER_XSTREAMS 8 -#define DEFAULT_RPC_PORT 4433 -#define RPC_TRIES 3 -// rpc timeout to try again in milliseconds -#define RPC_TIMEOUT 3000 - -#define DEFAULT_HOSTS_FILE "./gkfs_hosts.txt" - -//size of preallocated buffer to hold directory entries in rpc call -#define RPC_DIRENTS_BUFF_SIZE (8 * 1024 * 1024) // 8 mega - -// environment prefixes -#define CLIENT_ENV_PREFIX "LIBGKFS_" -#define DAEMON_ENV_PREFIX "GKFS_" - -// Log -#define DEFAULT_CLIENT_LOG_PATH "/tmp/gkfs_client.log" -#define DEFAULT_DAEMON_LOG_PATH "/tmp/gkfs_daemon.log" - -#define DEFAULT_CLIENT_LOG_LEVEL "info,errors,critical,mercury" -#define DEFAULT_DAEMON_LOG_LEVEL 4 // info - -#cmakedefine01 LOG_SYSCALLS - -#endif //FS_CONFIGURE_H diff --git a/include/global/global_defs.hpp b/include/global/global_defs.hpp index 8b8b1c4b4..771733c9a 100644 --- a/include/global/global_defs.hpp +++ b/include/global/global_defs.hpp @@ -11,37 +11,43 @@ SPDX-License-Identifier: MIT */ -#ifndef IFS_GLOBAL_DEFS_HPP -#define IFS_GLOBAL_DEFS_HPP +#ifndef GEKKOFS_GLOBAL_DEFS_HPP +#define GEKKOFS_GLOBAL_DEFS_HPP -#include //underlying_type +namespace gkfs { // These constexpr set the RPC's identity and which handler the receiver end should use -namespace hg_tag { - constexpr auto fs_config = "rpc_srv_fs_config"; - constexpr auto create = "rpc_srv_mk_node"; - constexpr auto stat = "rpc_srv_stat"; - constexpr auto remove = "rpc_srv_rm_node"; - constexpr auto decr_size = "rpc_srv_decr_size"; - constexpr auto update_metadentry = "rpc_srv_update_metadentry"; - constexpr auto get_metadentry_size = "rpc_srv_get_metadentry_size"; - constexpr auto update_metadentry_size = "rpc_srv_update_metadentry_size"; - constexpr auto get_dirents = "rpc_srv_get_dirents"; + namespace hg_tag { + constexpr auto fs_config = "rpc_srv_fs_config"; + constexpr auto create = "rpc_srv_mk_node"; + constexpr auto stat = "rpc_srv_stat"; + constexpr auto remove = "rpc_srv_rm_node"; + constexpr auto decr_size = "rpc_srv_decr_size"; + constexpr auto update_metadentry = "rpc_srv_update_metadentry"; + constexpr auto get_metadentry_size = "rpc_srv_get_metadentry_size"; + constexpr auto update_metadentry_size = "rpc_srv_update_metadentry_size"; + constexpr auto get_dirents = "rpc_srv_get_dirents"; #ifdef HAS_SYMLINKS - constexpr auto mk_symlink = "rpc_srv_mk_symlink"; + constexpr auto mk_symlink = "rpc_srv_mk_symlink"; #endif - constexpr auto write_data = "rpc_srv_write_data"; - constexpr auto read_data = "rpc_srv_read_data"; - constexpr auto trunc_data = "rpc_srv_trunc_data"; - constexpr auto chunk_stat = "rpc_srv_chunk_stat"; + constexpr auto write_data = "rpc_srv_write_data"; + constexpr auto read_data = "rpc_srv_read_data"; + constexpr auto trunc_data = "rpc_srv_trunc_data"; + constexpr auto chunk_stat = "rpc_srv_chunk_stat"; + } + + namespace rpc { + namespace protocol { + constexpr auto ofi_psm2 = "ofi+psm2"; + constexpr auto ofi_sockets = "ofi+sockets"; + constexpr auto ofi_tcp = "ofi+tcp"; + } + } + + namespace types { + // typedefs + typedef unsigned long rpc_chnk_id_t; + } } -// typedefs -typedef unsigned long rpc_chnk_id_t; - -template -constexpr typename std::underlying_type::type to_underlying(E e) { - return static_cast::type>(e); -} - -#endif //IFS_GLOBAL_DEFS_HPP +#endif //GEKKOFS_GLOBAL_DEFS_HPP diff --git a/include/global/log_util.hpp b/include/global/log_util.hpp index 898b3e430..bd0d8e572 100644 --- a/include/global/log_util.hpp +++ b/include/global/log_util.hpp @@ -11,16 +11,18 @@ SPDX-License-Identifier: MIT */ -#ifndef IFS_LOG_UITIL_HPP -#define IFS_LOG_UITIL_HPP +#ifndef GEKKOFS_LOG_UITIL_HPP +#define GEKKOFS_LOG_UITIL_HPP #include spdlog::level::level_enum get_spdlog_level(std::string level_str); + spdlog::level::level_enum get_spdlog_level(unsigned long level); + void setup_loggers(const std::vector& loggers, - spdlog::level::level_enum level, const std::string& path); + spdlog::level::level_enum level, const std::string& path); #endif diff --git a/include/global/metadata.hpp b/include/global/metadata.hpp index e24c60783..573d7ae0d 100644 --- a/include/global/metadata.hpp +++ b/include/global/metadata.hpp @@ -16,7 +16,7 @@ #pragma once -#include "global/configure.hpp" +#include "config.hpp" #include #include #include diff --git a/include/global/path_util.hpp b/include/global/path_util.hpp index d93fc1aff..b12b01a40 100644 --- a/include/global/path_util.hpp +++ b/include/global/path_util.hpp @@ -11,8 +11,8 @@ SPDX-License-Identifier: MIT */ -#ifndef IFS_PATH_UTIL_HPP -#define IFS_PATH_UTIL_HPP +#ifndef GEKKOFS_PATH_UTIL_HPP +#define GEKKOFS_PATH_UTIL_HPP #include #include @@ -30,4 +30,4 @@ std::string prepend_path(const std::string& path, const char * raw_path); std::string dirname(const std::string& path); std::vector split_path(const std::string& path); -#endif //IFS_PATH_UTIL_HPP +#endif //GEKKOFS_PATH_UTIL_HPP diff --git a/include/global/rpc/distributor.hpp b/include/global/rpc/distributor.hpp index 6ca59f651..d09fbc164 100644 --- a/include/global/rpc/distributor.hpp +++ b/include/global/rpc/distributor.hpp @@ -11,8 +11,8 @@ SPDX-License-Identifier: MIT */ -#ifndef IFS_RPC_DISTRIBUTOR_HPP -#define IFS_RPC_DISTRIBUTOR_HPP +#ifndef GEKKOFS_RPC_DISTRIBUTOR_HPP +#define GEKKOFS_RPC_DISTRIBUTOR_HPP #include #include @@ -56,4 +56,4 @@ class LocalOnlyDistributor : public Distributor { std::vector locate_directory_metadata(const std::string& path) const override; }; -#endif //IFS_RPC_LOCATOR_HPP +#endif //GEKKOFS_RPC_LOCATOR_HPP diff --git a/include/global/rpc/rpc_utils.hpp b/include/global/rpc/rpc_utils.hpp index ee3b6b7b4..2a55721eb 100644 --- a/include/global/rpc/rpc_utils.hpp +++ b/include/global/rpc/rpc_utils.hpp @@ -12,14 +12,15 @@ */ -#ifndef IFS_RPC_UTILS_HPP -#define IFS_RPC_UTILS_HPP +#ifndef GEKKOFS_RPC_UTILS_HPP +#define GEKKOFS_RPC_UTILS_HPP extern "C" { #include #include #include } + #include template @@ -68,4 +69,4 @@ std::string get_host_by_name(const std::string & hostname); bool is_handle_sm(margo_instance_id mid, const hg_addr_t& addr); -#endif //IFS_RPC_UTILS_HPP +#endif //GEKKOFS_RPC_UTILS_HPP diff --git a/src/client/CMakeLists.txt b/src/client/CMakeLists.txt index 753faa255..9bafe5327 100644 --- a/src/client/CMakeLists.txt +++ b/src/client/CMakeLists.txt @@ -1,51 +1,52 @@ set(PRELOAD_SRC - preload_context.cpp - adafs_functions.cpp - intercept.cpp + gkfs_functions.cpp hooks.cpp + intercept.cpp + logging.cpp open_file_map.cpp open_dir.cpp preload.cpp - resolve.cpp + preload_context.cpp preload_util.cpp + resolve.cpp + ../global/path_util.cpp + ../global/rpc/rpc_utils.cpp rpc/hg_rpcs.cpp - rpc/ld_rpc_management.cpp rpc/ld_rpc_data_ws.cpp + rpc/ld_rpc_management.cpp rpc/ld_rpc_metadentry.cpp - ../global/rpc/rpc_utils.cpp - ../global/path_util.cpp - logging.cpp syscalls/detail/syscall_info.c ) set(PRELOAD_HEADERS - ../../include/global/configure.hpp - ../../include/global/global_defs.hpp - ../../include/global/rpc/rpc_types.hpp - ../../include/global/rpc/rpc_utils.hpp - ../../include/global/path_util.hpp - ../../include/global/chunk_calc_util.hpp - ../../include/client/preload_context.hpp - ../../include/client/adafs_functions.hpp - ../../include/client/intercept.hpp + ../../include/client/gkfs_functions.hpp + ../../include/config.hpp + ../../include/client/env.hpp ../../include/client/hooks.hpp + ../../include/client/intercept.hpp + ../../include/client/logging.hpp + ../../include/client/make_array.hpp ../../include/client/open_file_map.hpp ../../include/client/open_dir.hpp ../../include/client/preload.hpp - ../../include/client/resolve.hpp + ../../include/client/preload_context.hpp ../../include/client/preload_util.hpp + ../../include/client/resolve.hpp ../../include/client/rpc/hg_rpcs.hpp ../../include/client/rpc/ld_rpc_management.hpp - ../../include/client/rpc/ld_rpc_data_ws.hpp ../../include/client/rpc/ld_rpc_metadentry.hpp - ../../include/client/logging.hpp - ../../include/client/env.hpp - ../../include/client/make_array.hpp + ../../include/client/rpc/ld_rpc_data_ws.hpp ../../include/client/syscalls/args.hpp ../../include/client/syscalls/decoder.hpp ../../include/client/syscalls/errno.hpp ../../include/client/syscalls/rets.hpp ../../include/client/syscalls/syscall.hpp ../../include/client/syscalls/detail/syscall_info.h + ../../include/global/cmake_configure.hpp + ../../include/global/chunk_calc_util.hpp + ../../include/global/global_defs.hpp + ../../include/global/path_util.hpp + ../../include/global/rpc/rpc_types.hpp + ../../include/global/rpc/rpc_utils.hpp ) add_library(gkfs_intercept SHARED ${PRELOAD_SRC} ${PRELOAD_HEADERS}) diff --git a/src/client/adafs_functions.cpp b/src/client/gkfs_functions.cpp similarity index 78% rename from src/client/adafs_functions.cpp rename to src/client/gkfs_functions.cpp index 9d85a0d42..ed11b7484 100644 --- a/src/client/adafs_functions.cpp +++ b/src/client/gkfs_functions.cpp @@ -14,12 +14,12 @@ #include #include -#include +#include #include #include #include "client/preload_util.hpp" #include -#include +#include #include #include #include @@ -46,22 +46,22 @@ struct linux_dirent64 { using namespace std; -int adafs_open(const std::string& path, mode_t mode, int flags) { +int gkfs_open(const std::string& path, mode_t mode, int flags) { - if(flags & O_PATH){ + if (flags & O_PATH) { LOG(ERROR, "`O_PATH` flag is not supported"); errno = ENOTSUP; return -1; } - if(flags & O_APPEND){ + if (flags & O_APPEND) { LOG(ERROR, "`O_APPEND` flag is not supported"); errno = ENOTSUP; return -1; } bool exists = true; - auto md = adafs_metadata(path); + auto md = gkfs_metadata(path); if (!md) { if(errno == ENOENT) { exists = false; @@ -88,8 +88,8 @@ int adafs_open(const std::string& path, mode_t mode, int flags) { } // no access check required here. If one is using our FS they have the permissions. - if(adafs_mk_node(path, mode | S_IFREG)) { - LOG(ERROR, "Error creating non-existent file: {}", strerror(errno)); + if (gkfs_mk_node(path, mode | S_IFREG)) { + LOG(ERROR, "Error creating non-existent file: '{}'", strerror(errno)); return -1; } } else { @@ -108,12 +108,12 @@ int adafs_open(const std::string& path, mode_t mode, int flags) { errno = ELOOP; return -1; } - return adafs_open(md->target_path(), mode, flags); + return gkfs_open(md->target_path(), mode, flags); } #endif if(S_ISDIR(md->mode())) { - return adafs_opendir(path); + return gkfs_opendir(path); } @@ -121,7 +121,7 @@ int adafs_open(const std::string& path, mode_t mode, int flags) { assert(S_ISREG(md->mode())); if( (flags & O_TRUNC) && ((flags & O_RDWR) || (flags & O_WRONLY)) ) { - if(adafs_truncate(path, md->size(), 0)) { + if (gkfs_truncate(path, md->size(), 0)) { LOG(ERROR, "Error truncating file"); return -1; } @@ -134,7 +134,7 @@ int adafs_open(const std::string& path, mode_t mode, int flags) { int check_parent_dir(const std::string& path) { #if CREATE_CHECK_PARENTS auto p_comp = dirname(path); - auto md = adafs_metadata(p_comp); + auto md = gkfs_metadata(p_comp); if (!md) { if (errno == ENOENT) { LOG(DEBUG, "Parent component does not exist: '{}'", p_comp); @@ -152,7 +152,7 @@ int check_parent_dir(const std::string& path) { return 0; } -int adafs_mk_node(const std::string& path, mode_t mode) { +int gkfs_mk_node(const std::string& path, mode_t mode) { //file type must be set switch (mode & S_IFMT) { @@ -178,7 +178,6 @@ int adafs_mk_node(const std::string& path, mode_t mode) { if (check_parent_dir(path)) { return -1; } - return rpc_send::mk_node(path, mode); } @@ -187,8 +186,8 @@ int adafs_mk_node(const std::string& path, mode_t mode) { * @param path * @return */ -int adafs_rm_node(const std::string& path) { - auto md = adafs_metadata(path); +int gkfs_rm_node(const std::string& path) { + auto md = gkfs_metadata(path); if (!md) { return -1; } @@ -196,8 +195,8 @@ int adafs_rm_node(const std::string& path) { return rpc_send::rm_node(path, !has_data, md->size()); } -int adafs_access(const std::string& path, const int mask, bool follow_links) { - auto md = adafs_metadata(path, follow_links); +int gkfs_access(const std::string& path, const int mask, bool follow_links) { + auto md = gkfs_metadata(path, follow_links); if (!md) { errno = ENOENT; return -1; @@ -205,16 +204,16 @@ int adafs_access(const std::string& path, const int mask, bool follow_links) { return 0; } -int adafs_stat(const string& path, struct stat* buf, bool follow_links) { - auto md = adafs_metadata(path, follow_links); +int gkfs_stat(const string& path, struct stat* buf, bool follow_links) { + auto md = gkfs_metadata(path, follow_links); if (!md) { return -1; } - metadata_to_stat(path, *md, *buf); + gkfs::client::metadata_to_stat(path, *md, *buf); return 0; } -std::shared_ptr adafs_metadata(const string& path, bool follow_links) { +std::shared_ptr gkfs_metadata(const string& path, bool follow_links) { std::string attr; auto err = rpc_send::stat(path, attr); if (err) { @@ -235,7 +234,7 @@ std::shared_ptr adafs_metadata(const string& path, bool follow_links) return make_shared(attr); } -int adafs_statfs(struct statfs* buf) { +int gkfs_statfs(struct statfs* buf) { auto blk_stat = rpc_send::chunk_stat(); buf->f_type = 0; buf->f_bsize = blk_stat.chunk_size; @@ -252,7 +251,7 @@ int adafs_statfs(struct statfs* buf) { return 0; } -int adafs_statvfs(struct statvfs* buf) { +int gkfs_statvfs(struct statvfs* buf) { init_ld_env_if_needed(); auto blk_stat = rpc_send::chunk_stat(); buf->f_bsize = blk_stat.chunk_size; @@ -270,26 +269,26 @@ int adafs_statvfs(struct statvfs* buf) { return 0; } -off_t adafs_lseek(unsigned int fd, off_t offset, unsigned int whence) { - return adafs_lseek(CTX->file_map()->get(fd), offset, whence); +off_t gkfs_lseek(unsigned int fd, off_t offset, unsigned int whence) { + return gkfs_lseek(CTX->file_map()->get(fd), offset, whence); } -off_t adafs_lseek(shared_ptr adafs_fd, off_t offset, unsigned int whence) { +off_t gkfs_lseek(shared_ptr gkfs_fd, off_t offset, unsigned int whence) { switch (whence) { case SEEK_SET: - adafs_fd->pos(offset); + gkfs_fd->pos(offset); break; case SEEK_CUR: - adafs_fd->pos(adafs_fd->pos() + offset); + gkfs_fd->pos(gkfs_fd->pos() + offset); break; case SEEK_END: { off64_t file_size; - auto err = rpc_send::get_metadentry_size(adafs_fd->path(), file_size); + auto err = rpc_send::get_metadentry_size(gkfs_fd->path(), file_size); if (err < 0) { errno = err; // Negative numbers are explicitly for error codes return -1; } - adafs_fd->pos(file_size + offset); + gkfs_fd->pos(file_size + offset); break; } case SEEK_DATA: @@ -307,14 +306,14 @@ off_t adafs_lseek(shared_ptr adafs_fd, off_t offset, unsigned int when errno = EINVAL; return -1; } - return adafs_fd->pos(); + return gkfs_fd->pos(); } -int adafs_truncate(const std::string& path, off_t old_size, off_t new_size) { +int gkfs_truncate(const std::string& path, off_t old_size, off_t new_size) { assert(new_size >= 0); assert(new_size <= old_size); - if(new_size == old_size) { + if (new_size == old_size) { return 0; } @@ -330,22 +329,22 @@ int adafs_truncate(const std::string& path, off_t old_size, off_t new_size) { return 0; } -int adafs_truncate(const std::string& path, off_t length) { +int gkfs_truncate(const std::string& path, off_t length) { /* TODO CONCURRENCY: * At the moment we first ask the length to the metadata-server in order to * know which data-server have data to be deleted. * - * From the moment we issue the adafs_stat and the moment we issue the - * adafs_trunc_data, some more data could have been added to the file and the + * From the moment we issue the gkfs_stat and the moment we issue the + * gkfs_trunc_data, some more data could have been added to the file and the * length increased. */ - if(length < 0) { + if (length < 0) { LOG(DEBUG, "Length is negative: {}", length); errno = EINVAL; return -1; } - auto md = adafs_metadata(path, true); + auto md = gkfs_metadata(path, true); if (!md) { return -1; } @@ -355,18 +354,18 @@ int adafs_truncate(const std::string& path, off_t length) { errno = EINVAL; return -1; } - return adafs_truncate(path, size, length); + return gkfs_truncate(path, size, length); } -int adafs_dup(const int oldfd) { +int gkfs_dup(const int oldfd) { return CTX->file_map()->dup(oldfd); } -int adafs_dup2(const int oldfd, const int newfd) { +int gkfs_dup2(const int oldfd, const int newfd) { return CTX->file_map()->dup2(oldfd, newfd); } -ssize_t adafs_pwrite(std::shared_ptr file, const char * buf, size_t count, off64_t offset) { +ssize_t gkfs_pwrite(std::shared_ptr file, const char* buf, size_t count, off64_t offset) { if (file->type() != FileType::regular) { assert(file->type() == FileType::directory); LOG(WARNING, "Cannot read from directory"); @@ -390,9 +389,9 @@ ssize_t adafs_pwrite(std::shared_ptr file, const char * buf, size_t co return ret; // return written size or -1 as error } -ssize_t adafs_pwrite_ws(int fd, const void* buf, size_t count, off64_t offset) { +ssize_t gkfs_pwrite_ws(int fd, const void* buf, size_t count, off64_t offset) { auto file = CTX->file_map()->get(fd); - return adafs_pwrite(file, reinterpret_cast(buf), count, offset); + return gkfs_pwrite(file, reinterpret_cast(buf), count, offset); } /* Write counts bytes starting from current file position @@ -400,32 +399,32 @@ ssize_t adafs_pwrite_ws(int fd, const void* buf, size_t count, off64_t offset) { * * Same as write syscall. */ -ssize_t adafs_write(int fd, const void * buf, size_t count) { - auto adafs_fd = CTX->file_map()->get(fd); - auto pos = adafs_fd->pos(); //retrieve the current offset - if (adafs_fd->get_flag(OpenFile_flags::append)) - adafs_lseek(adafs_fd, 0, SEEK_END); - auto ret = adafs_pwrite(adafs_fd, reinterpret_cast(buf), count, pos); +ssize_t gkfs_write(int fd, const void* buf, size_t count) { + auto gkfs_fd = CTX->file_map()->get(fd); + auto pos = gkfs_fd->pos(); //retrieve the current offset + if (gkfs_fd->get_flag(OpenFile_flags::append)) + gkfs_lseek(gkfs_fd, 0, SEEK_END); + auto ret = gkfs_pwrite(gkfs_fd, reinterpret_cast(buf), count, pos); // Update offset in file descriptor in the file map if (ret > 0) { - adafs_fd->pos(pos + count); + gkfs_fd->pos(pos + count); } return ret; } -ssize_t adafs_pwritev(int fd, const struct iovec *iov, int iovcnt, off_t offset) { +ssize_t gkfs_pwritev(int fd, const struct iovec* iov, int iovcnt, off_t offset) { auto file = CTX->file_map()->get(fd); auto pos = offset; // keep truck of current position ssize_t written = 0; ssize_t ret; for (int i = 0; i < iovcnt; ++i) { - auto count = (iov+i)->iov_len; + auto count = (iov + i)->iov_len; if (count == 0) { continue; } auto buf = (iov+i)->iov_base; - ret = adafs_pwrite(file, reinterpret_cast(buf), count, pos); + ret = gkfs_pwrite(file, reinterpret_cast(buf), count, pos); if (ret == -1) { break; } @@ -443,20 +442,20 @@ ssize_t adafs_pwritev(int fd, const struct iovec *iov, int iovcnt, off_t offset) return written; } -ssize_t adafs_writev(int fd, const struct iovec * iov, int iovcnt) { +ssize_t gkfs_writev(int fd, const struct iovec* iov, int iovcnt) { - auto adafs_fd = CTX->file_map()->get(fd); - auto pos = adafs_fd->pos(); // retrieve the current offset - auto ret = adafs_pwritev(fd, iov, iovcnt, pos); + auto gkfs_fd = CTX->file_map()->get(fd); + auto pos = gkfs_fd->pos(); // retrieve the current offset + auto ret = gkfs_pwritev(fd, iov, iovcnt, pos); assert(ret != 0); if (ret < 0) { return -1; } - adafs_fd->pos(pos + ret); + gkfs_fd->pos(pos + ret); return ret; } -ssize_t adafs_pread(std::shared_ptr file, char * buf, size_t count, off64_t offset) { +ssize_t gkfs_pread(std::shared_ptr file, char* buf, size_t count, off64_t offset) { if (file->type() != FileType::regular) { assert(file->type() == FileType::directory); LOG(WARNING, "Cannot read from directory"); @@ -465,9 +464,9 @@ ssize_t adafs_pread(std::shared_ptr file, char * buf, size_t count, of } // Zeroing buffer before read is only relevant for sparse files. Otherwise sparse regions contain invalid data. -#if defined(ZERO_BUFFER_BEFORE_READ) - memset(buf, 0, sizeof(char)*count); -#endif + if (gkfs_config::io::zero_buffer_before_read) { + memset(buf, 0, sizeof(char) * count); + } auto ret = rpc_send::read(file->path(), buf, offset, count); if (ret < 0) { LOG(WARNING, "rpc_send::read() failed with ret {}", ret); @@ -476,25 +475,25 @@ ssize_t adafs_pread(std::shared_ptr file, char * buf, size_t count, of return ret; // return read size or -1 as error } -ssize_t adafs_read(int fd, void* buf, size_t count) { - auto adafs_fd = CTX->file_map()->get(fd); - auto pos = adafs_fd->pos(); //retrieve the current offset - auto ret = adafs_pread(adafs_fd, reinterpret_cast(buf), count, pos); +ssize_t gkfs_read(int fd, void* buf, size_t count) { + auto gkfs_fd = CTX->file_map()->get(fd); + auto pos = gkfs_fd->pos(); //retrieve the current offset + auto ret = gkfs_pread(gkfs_fd, reinterpret_cast(buf), count, pos); // Update offset in file descriptor in the file map if (ret > 0) { - adafs_fd->pos(pos + ret); + gkfs_fd->pos(pos + ret); } return ret; } -ssize_t adafs_pread_ws(int fd, void* buf, size_t count, off64_t offset) { - auto adafs_fd = CTX->file_map()->get(fd); - return adafs_pread(adafs_fd, reinterpret_cast(buf), count, offset); +ssize_t gkfs_pread_ws(int fd, void* buf, size_t count, off64_t offset) { + auto gkfs_fd = CTX->file_map()->get(fd); + return gkfs_pread(gkfs_fd, reinterpret_cast(buf), count, offset); } -int adafs_opendir(const std::string& path) { +int gkfs_opendir(const std::string& path) { - auto md = adafs_metadata(path); + auto md = gkfs_metadata(path); if (!md) { return -1; } @@ -509,8 +508,8 @@ int adafs_opendir(const std::string& path) { return CTX->file_map()->add(open_dir); } -int adafs_rmdir(const std::string& path) { - auto md = adafs_metadata(path); +int gkfs_rmdir(const std::string& path) { + auto md = gkfs_metadata(path); if (!md) { LOG(DEBUG, "Path '{}' does not exist: ", path); errno = ENOENT; @@ -636,14 +635,14 @@ int getdents64(unsigned int fd, #ifdef HAS_SYMLINKS -int adafs_mk_symlink(const std::string& path, const std::string& target_path) { +int gkfs_mk_symlink(const std::string& path, const std::string& target_path) { init_ld_env_if_needed(); /* The following check is not POSIX compliant. * In POSIX the target is not checked at all. * Here if the target is a directory we raise a NOTSUP error. * So that application know we don't support link to directory. */ - auto target_md = adafs_metadata(target_path, false); + auto target_md = gkfs_metadata(target_path, false); if (target_md != nullptr) { auto trg_mode = target_md->mode(); if (!(S_ISREG(trg_mode) || S_ISLNK(trg_mode))) { @@ -658,7 +657,7 @@ int adafs_mk_symlink(const std::string& path, const std::string& target_path) { return -1; } - auto link_md = adafs_metadata(path, false); + auto link_md = gkfs_metadata(path, false); if (link_md != nullptr) { LOG(DEBUG, "Link exists: '{}'", path); errno = EEXIST; @@ -668,9 +667,9 @@ int adafs_mk_symlink(const std::string& path, const std::string& target_path) { return rpc_send::mk_symlink(path, target_path); } -int adafs_readlink(const std::string& path, char *buf, int bufsize) { +int gkfs_readlink(const std::string& path, char* buf, int bufsize) { init_ld_env_if_needed(); - auto md = adafs_metadata(path, false); + auto md = gkfs_metadata(path, false); if (md == nullptr) { LOG(DEBUG, "Named link doesn't exist"); return -1; diff --git a/src/client/hooks.cpp b/src/client/hooks.cpp index 2f30cb70f..04c14f2fa 100644 --- a/src/client/hooks.cpp +++ b/src/client/hooks.cpp @@ -15,7 +15,7 @@ #include "client/preload.hpp" #include "client/logging.hpp" -#include "client/adafs_functions.hpp" +#include "client/gkfs_functions.hpp" #include "client/resolve.hpp" #include "client/open_dir.hpp" #include "global/path_util.hpp" @@ -48,7 +48,7 @@ int hook_openat(int dirfd, const char *cpath, int flags, mode_t mode) { return -ENOTDIR; case RelativizeStatus::internal: - return with_errno(adafs_open(resolved, mode, flags)); + return with_errno(gkfs_open(resolved, mode, flags)); default: LOG(ERROR, "{}() relativize status unknown: {}", __func__); @@ -82,7 +82,7 @@ int hook_stat(const char* path, struct stat* buf) { std::string rel_path; if (CTX->relativize_path(path, rel_path, false)) { - return with_errno(adafs_stat(rel_path, buf)); + return with_errno(gkfs_stat(rel_path, buf)); } return syscall_no_intercept(SYS_stat, rel_path.c_str(), buf); } @@ -94,7 +94,7 @@ int hook_lstat(const char* path, struct stat* buf) { std::string rel_path; if (CTX->relativize_path(path, rel_path)) { - return with_errno(adafs_stat(rel_path, buf)); + return with_errno(gkfs_stat(rel_path, buf)); } return syscall_no_intercept(SYS_lstat, rel_path.c_str(), buf); } @@ -106,7 +106,7 @@ int hook_fstat(unsigned int fd, struct stat* buf) { if (CTX->file_map()->exist(fd)) { auto path = CTX->file_map()->get(fd)->path(); - return with_errno(adafs_stat(path, buf)); + return with_errno(gkfs_stat(path, buf)); } return syscall_no_intercept(SYS_fstat, fd, buf); } @@ -134,7 +134,7 @@ int hook_fstatat(int dirfd, const char * cpath, struct stat * buf, int flags) { return -ENOTDIR; case RelativizeStatus::internal: - return with_errno(adafs_stat(resolved, buf)); + return with_errno(gkfs_stat(resolved, buf)); default: LOG(ERROR, "{}() relativize status unknown: {}", __func__); @@ -148,7 +148,7 @@ int hook_read(unsigned int fd, void* buf, size_t count) { __func__, fd, fmt::ptr(buf), count); if (CTX->file_map()->exist(fd)) { - return with_errno(adafs_read(fd, buf, count)); + return with_errno(gkfs_read(fd, buf, count)); } return syscall_no_intercept(SYS_read, fd, buf, count); } @@ -159,7 +159,7 @@ int hook_pread(unsigned int fd, char * buf, size_t count, loff_t pos) { __func__, fd, fmt::ptr(buf), count, pos); if (CTX->file_map()->exist(fd)) { - return with_errno(adafs_pread_ws(fd, buf, count, pos)); + return with_errno(gkfs_pread_ws(fd, buf, count, pos)); } /* Since kernel 2.6: pread() became pread64(), and pwrite() became pwrite64(). */ return syscall_no_intercept(SYS_pread64, fd, buf, count, pos); @@ -171,7 +171,7 @@ int hook_write(unsigned int fd, const char * buf, size_t count) { __func__, fd, fmt::ptr(buf), count); if (CTX->file_map()->exist(fd)) { - return with_errno(adafs_write(fd, buf, count)); + return with_errno(gkfs_write(fd, buf, count)); } return syscall_no_intercept(SYS_write, fd, buf, count); } @@ -182,7 +182,7 @@ int hook_pwrite(unsigned int fd, const char * buf, size_t count, loff_t pos) { __func__, fd, fmt::ptr(buf), count, pos); if (CTX->file_map()->exist(fd)) { - return with_errno(adafs_pwrite_ws(fd, buf, count, pos)); + return with_errno(gkfs_pwrite_ws(fd, buf, count, pos)); } /* Since kernel 2.6: pread() became pread64(), and pwrite() became pwrite64(). */ return syscall_no_intercept(SYS_pwrite64, fd, buf, count, pos); @@ -194,7 +194,7 @@ int hook_writev(unsigned long fd, const struct iovec * iov, unsigned long iovcnt __func__, fd, fmt::ptr(iov), iovcnt); if (CTX->file_map()->exist(fd)) { - return with_errno(adafs_writev(fd, iov, iovcnt)); + return with_errno(gkfs_writev(fd, iov, iovcnt)); } return syscall_no_intercept(SYS_writev, fd, iov, iovcnt); } @@ -237,9 +237,9 @@ int hook_unlinkat(int dirfd, const char * cpath, int flags) { case RelativizeStatus::internal: if(flags & AT_REMOVEDIR) { - return with_errno(adafs_rmdir(resolved)); + return with_errno(gkfs_rmdir(resolved)); } else { - return with_errno(adafs_rm_node(resolved)); + return with_errno(gkfs_rm_node(resolved)); } default: @@ -289,7 +289,7 @@ int hook_access(const char* path, int mask) { std::string rel_path; if (CTX->relativize_path(path, rel_path)) { - auto ret = adafs_access(rel_path, mask); + auto ret = gkfs_access(rel_path, mask); if(ret < 0) { return -errno; } @@ -316,7 +316,7 @@ int hook_faccessat(int dirfd, const char * cpath, int mode) { return -ENOTDIR; case RelativizeStatus::internal: - return with_errno(adafs_access(resolved, mode)); + return with_errno(gkfs_access(resolved, mode)); default: LOG(ERROR, "{}() relativize status unknown: {}", __func__); @@ -330,7 +330,7 @@ off_t hook_lseek(unsigned int fd, off_t offset, unsigned int whence) { __func__, fd, offset, whence); if (CTX->file_map()->exist(fd)) { - auto off_ret = adafs_lseek(fd, static_cast(offset), whence); + auto off_ret = gkfs_lseek(fd, static_cast(offset), whence); if (off_ret > std::numeric_limits::max()) { return -EOVERFLOW; } else if(off_ret < 0) { @@ -349,7 +349,7 @@ int hook_truncate(const char* path, long length) { std::string rel_path; if (CTX->relativize_path(path, rel_path)) { - return with_errno(adafs_truncate(rel_path, length)); + return with_errno(gkfs_truncate(rel_path, length)); } return syscall_no_intercept(SYS_truncate, rel_path.c_str(), length); } @@ -361,7 +361,7 @@ int hook_ftruncate(unsigned int fd, unsigned long length) { if (CTX->file_map()->exist(fd)) { auto path = CTX->file_map()->get(fd)->path(); - return with_errno(adafs_truncate(path, length)); + return with_errno(gkfs_truncate(path, length)); } return syscall_no_intercept(SYS_ftruncate, fd, length); } @@ -372,7 +372,7 @@ int hook_dup(unsigned int fd) { __func__, fd); if (CTX->file_map()->exist(fd)) { - return with_errno(adafs_dup(fd)); + return with_errno(gkfs_dup(fd)); } return syscall_no_intercept(SYS_dup, fd); } @@ -383,7 +383,7 @@ int hook_dup2(unsigned int oldfd, unsigned int newfd) { __func__, oldfd, newfd); if (CTX->file_map()->exist(oldfd)) { - return with_errno(adafs_dup2(oldfd, newfd)); + return with_errno(gkfs_dup2(oldfd, newfd)); } return syscall_no_intercept(SYS_dup2, oldfd, newfd); } @@ -444,7 +444,7 @@ int hook_mkdirat(int dirfd, const char * cpath, mode_t mode) { return -ENOTDIR; case RelativizeStatus::internal: - return with_errno(adafs_mk_node(resolved, mode | S_IFDIR)); + return with_errno(gkfs_mk_node(resolved, mode | S_IFDIR)); default: LOG(ERROR, "{}() relativize status unknown: {}", __func__); @@ -500,7 +500,7 @@ int hook_chdir(const char * path) { bool internal = CTX->relativize_path(path, rel_path); if (internal) { //path falls in our namespace - auto md = adafs_metadata(rel_path); + auto md = gkfs_metadata(rel_path); if (md == nullptr) { LOG(ERROR, "{}() path does not exists", __func__); return -ENOENT; @@ -616,11 +616,11 @@ int hook_fcntl(unsigned int fd, unsigned int cmd, unsigned long arg) { case F_DUPFD: LOG(DEBUG, "{}() F_DUPFD on fd {}", __func__, fd); - return with_errno(adafs_dup(fd)); + return with_errno(gkfs_dup(fd)); case F_DUPFD_CLOEXEC: LOG(DEBUG, "{}() F_DUPFD_CLOEXEC on fd {}", __func__, fd); - ret = adafs_dup(fd); + ret = gkfs_dup(fd); if(ret == -1) { return -errno; } @@ -733,7 +733,7 @@ int hook_statfs(const char * path, struct statfs * buf) { std::string rel_path; if (CTX->relativize_path(path, rel_path)) { - return with_errno(adafs_statfs(buf)); + return with_errno(gkfs_statfs(buf)); } return syscall_no_intercept(SYS_statfs, rel_path.c_str(), buf); } @@ -744,7 +744,7 @@ int hook_fstatfs(unsigned int fd, struct statfs * buf) { __func__, fd, fmt::ptr(buf)); if (CTX->file_map()->exist(fd)) { - return with_errno(adafs_statfs(buf)); + return with_errno(gkfs_statfs(buf)); } return syscall_no_intercept(SYS_fstatfs, fd, buf); } diff --git a/src/client/open_file_map.cpp b/src/client/open_file_map.cpp index 3f2678a18..f47853385 100644 --- a/src/client/open_file_map.cpp +++ b/src/client/open_file_map.cpp @@ -13,10 +13,10 @@ #include -#include #include #include #include +#include #include using namespace std; @@ -27,17 +27,17 @@ OpenFile::OpenFile(const string& path, const int flags, FileType type) : { // set flags to OpenFile if (flags & O_CREAT) - flags_[to_underlying(OpenFile_flags::creat)] = true; + flags_[gkfs::client::to_underlying(OpenFile_flags::creat)] = true; if (flags & O_APPEND) - flags_[to_underlying(OpenFile_flags::append)] = true; + flags_[gkfs::client::to_underlying(OpenFile_flags::append)] = true; if (flags & O_TRUNC) - flags_[to_underlying(OpenFile_flags::trunc)] = true; + flags_[gkfs::client::to_underlying(OpenFile_flags::trunc)] = true; if (flags & O_RDONLY) - flags_[to_underlying(OpenFile_flags::rdonly)] = true; + flags_[gkfs::client::to_underlying(OpenFile_flags::rdonly)] = true; if (flags & O_WRONLY) - flags_[to_underlying(OpenFile_flags::wronly)] = true; + flags_[gkfs::client::to_underlying(OpenFile_flags::wronly)] = true; if (flags & O_RDWR) - flags_[to_underlying(OpenFile_flags::rdwr)] = true; + flags_[gkfs::client::to_underlying(OpenFile_flags::rdwr)] = true; pos_ = 0; // If O_APPEND flag is used, it will be used before each write. } @@ -55,8 +55,8 @@ string OpenFile::path() const { return path_; } -void OpenFile::path(const string& path_) { - OpenFile::path_ = path_; +void OpenFile::path(const string& path) { + OpenFile::path_ = path; } unsigned long OpenFile::pos() { @@ -64,19 +64,19 @@ unsigned long OpenFile::pos() { return pos_; } -void OpenFile::pos(unsigned long pos_) { +void OpenFile::pos(unsigned long pos) { lock_guard lock(pos_mutex_); - OpenFile::pos_ = pos_; + OpenFile::pos_ = pos; } bool OpenFile::get_flag(OpenFile_flags flag) { lock_guard lock(pos_mutex_); - return flags_[to_underlying(flag)]; + return flags_[gkfs::client::to_underlying(flag)]; } void OpenFile::set_flag(OpenFile_flags flag, bool value) { lock_guard lock(flag_mutex_); - flags_[to_underlying(flag)] = value; + flags_[gkfs::client::to_underlying(flag)] = value; } FileType OpenFile::type() const { diff --git a/src/client/preload.cpp b/src/client/preload.cpp index 952edf4c1..7812a61cb 100644 --- a/src/client/preload.cpp +++ b/src/client/preload.cpp @@ -12,9 +12,6 @@ */ #include -#include -#include -#include #include #include #include @@ -131,7 +128,7 @@ void init_ld_environment_() { try { LOG(INFO, "Loading peer addresses..."); - load_hosts(); + gkfs::client::load_hosts(); } catch (const std::exception& e) { exit_error_msg(EXIT_FAILURE, "Failed to load hosts addresses: "s + e.what()); } diff --git a/src/client/preload_context.cpp b/src/client/preload_context.cpp index 816c7870e..0728e18ae 100644 --- a/src/client/preload_context.cpp +++ b/src/client/preload_context.cpp @@ -25,15 +25,17 @@ #include #include +#include + #include #include -decltype(PreloadContext::MIN_INTERNAL_FD) constexpr -PreloadContext::MIN_INTERNAL_FD; -decltype(PreloadContext::MAX_USER_FDS) constexpr -PreloadContext::MAX_USER_FDS; +decltype(PreloadContext::MIN_INTERNAL_FD) constexpr + PreloadContext::MIN_INTERNAL_FD; +decltype(PreloadContext::MAX_USER_FDS) constexpr + PreloadContext::MAX_USER_FDS; -PreloadContext::PreloadContext(): +PreloadContext::PreloadContext() : ofm_(std::make_shared()), fs_conf_(std::make_shared()) { @@ -44,11 +46,11 @@ PreloadContext::PreloadContext(): void PreloadContext::init_logging() { - const std::string log_opts = - gkfs::env::get_var(gkfs::env::LOG, DEFAULT_CLIENT_LOG_LEVEL); + const std::string log_opts = + gkfs::env::get_var(gkfs::env::LOG, gkfs_config::logging::client_log_level); - const std::string log_output = - gkfs::env::get_var(gkfs::env::LOG_OUTPUT, DEFAULT_CLIENT_LOG_PATH); + const std::string log_output = + gkfs::env::get_var(gkfs::env::LOG_OUTPUT, gkfs_config::logging::client_log_path); #ifdef GKFS_DEBUG_BUILD // atoi returns 0 if no int conversion can be performed, which works diff --git a/src/client/preload_util.cpp b/src/client/preload_util.cpp index 32dd3ede3..233123576 100644 --- a/src/client/preload_util.cpp +++ b/src/client/preload_util.cpp @@ -17,6 +17,7 @@ #include #include #include +#include #include #include @@ -35,7 +36,7 @@ using namespace std; * @param attr * @return */ -int metadata_to_stat(const std::string& path, const Metadata& md, struct stat& attr) { +int gkfs::client::metadata_to_stat(const std::string& path, const Metadata& md, struct stat& attr) { /* Populate default values */ attr.st_dev = makedev(0, 0); @@ -44,7 +45,7 @@ int metadata_to_stat(const std::string& path, const Metadata& md, struct stat& a attr.st_uid = CTX->fs_conf()->uid; attr.st_gid = CTX->fs_conf()->gid; attr.st_rdev = 0; - attr.st_blksize = CHUNKSIZE; + attr.st_blksize = gkfs_config::rpc::chunksize; attr.st_blocks = 0; memset(&attr.st_atim, 0, sizeof(timespec)); @@ -78,14 +79,14 @@ int metadata_to_stat(const std::string& path, const Metadata& md, struct stat& a return 0; } -vector> load_hosts_file(const std::string& lfpath) { +vector> gkfs::client::load_hostfile(const std::string& lfpath) { LOG(DEBUG, "Loading hosts file: \"{}\"", lfpath); ifstream lf(lfpath); if (!lf) { throw runtime_error(fmt::format("Failed to open hosts file '{}': {}", - lfpath, strerror(errno))); + lfpath, strerror(errno))); } vector> hosts; const regex line_re("^(\\S+)\\s+(\\S+)$", @@ -110,7 +111,7 @@ vector> load_hosts_file(const std::string& lfpath) { return hosts; } -hermes::endpoint lookup_endpoint(const std::string& uri, +hermes::endpoint lookup_endpoint(const std::string& uri, std::size_t max_retries = 3) { LOG(DEBUG, "Looking up address \"{}\"", uri); @@ -141,21 +142,21 @@ hermes::endpoint lookup_endpoint(const std::string& uri, uri, error_msg)); } -void load_hosts() { - string hosts_file; +void gkfs::client::load_hosts() { + string hostfile; - hosts_file = gkfs::env::get_var(gkfs::env::HOSTS_FILE, DEFAULT_HOSTS_FILE); + hostfile = gkfs::env::get_var(gkfs::env::HOSTS_FILE, gkfs_config::hostfile_path); vector> hosts; try { - hosts = load_hosts_file(hosts_file); + hosts = gkfs::client::load_hostfile(hostfile); } catch (const exception& e) { auto emsg = fmt::format("Failed to load hosts file: {}", e.what()); throw runtime_error(emsg); } if (hosts.size() == 0) { - throw runtime_error(fmt::format("Host file empty: '{}'", hosts_file)); + throw runtime_error(fmt::format("Hostfile empty: '{}'", hostfile)); } LOG(INFO, "Hosts pool size: {}", hosts.size()); diff --git a/src/client/resolve.cpp b/src/client/resolve.cpp index 0c5825c81..da950d902 100644 --- a/src/client/resolve.cpp +++ b/src/client/resolve.cpp @@ -19,7 +19,6 @@ #include #include "global/path_util.hpp" -#include "global/configure.hpp" #include "client/preload.hpp" #include "client/logging.hpp" #include "client/env.hpp" diff --git a/src/client/rpc/ld_rpc_data_ws.cpp b/src/client/rpc/ld_rpc_data_ws.cpp index c080150d5..74171fcd3 100644 --- a/src/client/rpc/ld_rpc_data_ws.cpp +++ b/src/client/rpc/ld_rpc_data_ws.cpp @@ -11,10 +11,8 @@ SPDX-License-Identifier: MIT */ -#include #include #include -#include "global/rpc/rpc_types.hpp" #include #include #include @@ -37,26 +35,26 @@ ssize_t write(const string& path, const void* buf, const bool append_flag, const off64_t in_offset, const size_t write_size, const int64_t updated_metadentry_size) { - assert(write_size > 0); + assert(write_size > 0); - // Calculate chunkid boundaries and numbers so that daemons know in - // which interval to look for chunks - off64_t offset = append_flag ? - in_offset : - (updated_metadentry_size - write_size); + // Calculate chunkid boundaries and numbers so that daemons know in + // which interval to look for chunks + off64_t offset = append_flag ? + in_offset : + (updated_metadentry_size - write_size); - auto chnk_start = chnk_id_for_offset(offset, CHUNKSIZE); - auto chnk_end = chnk_id_for_offset((offset + write_size) - 1, CHUNKSIZE); + auto chnk_start = chnk_id_for_offset(offset, gkfs_config::rpc::chunksize); + auto chnk_end = chnk_id_for_offset((offset + write_size) - 1, gkfs_config::rpc::chunksize); - // Collect all chunk ids within count that have the same destination so - // that those are send in one rpc bulk transfer - std::map> target_chnks{}; - // contains the target ids, used to access the target_chnks map. - // First idx is chunk with potential offset - std::vector targets{}; + // Collect all chunk ids within count that have the same destination so + // that those are send in one rpc bulk transfer + std::map> target_chnks{}; + // contains the target ids, used to access the target_chnks map. + // First idx is chunk with potential offset + std::vector targets{}; - // targets for the first and last chunk as they need special treatment - uint64_t chnk_start_target = 0; + // targets for the first and last chunk as they need special treatment + uint64_t chnk_start_target = 0; uint64_t chnk_end_target = 0; for (uint64_t chnk_id = chnk_start; chnk_id <= chnk_end; chnk_id++) { @@ -109,16 +107,16 @@ ssize_t write(const string& path, const void* buf, const bool append_flag, for(const auto& target : targets) { // total chunk_size for target - auto total_chunk_size = target_chnks[target].size() * CHUNKSIZE; + auto total_chunk_size = target_chnks[target].size() * gkfs_config::rpc::chunksize; // receiver of first chunk must subtract the offset from first chunk if (target == chnk_start_target) { - total_chunk_size -= chnk_lpad(offset, CHUNKSIZE); + total_chunk_size -= chnk_lpad(offset, gkfs_config::rpc::chunksize); } // receiver of last chunk must subtract if (target == chnk_end_target) { - total_chunk_size -= chnk_rpad(offset + write_size, CHUNKSIZE); + total_chunk_size -= chnk_rpad(offset + write_size, gkfs_config::rpc::chunksize); } auto endp = CTX->hosts().at(target); @@ -128,20 +126,20 @@ ssize_t write(const string& path, const void* buf, const bool append_flag, LOG(DEBUG, "Sending RPC ..."); gkfs::rpc::write_data::input in( - path, - // first offset in targets is the chunk with - // a potential offset - chnk_lpad(offset, CHUNKSIZE), - target, - CTX->hosts().size(), - // number of chunks handled by that destination - target_chnks[target].size(), - // chunk start id of this write - chnk_start, - // chunk end id of this write - chnk_end, - // total size to write - total_chunk_size, + path, + // first offset in targets is the chunk with + // a potential offset + chnk_lpad(offset, gkfs_config::rpc::chunksize), + target, + CTX->hosts().size(), + // number of chunks handled by that destination + target_chnks[target].size(), + // chunk start id of this write + chnk_start, + // chunk end id of this write + chnk_end, + // total size to write + total_chunk_size, local_buffers); // TODO(amiranda): add a post() with RPC_TIMEOUT to hermes so that @@ -202,20 +200,20 @@ ssize_t write(const string& path, const void* buf, const bool append_flag, */ ssize_t read(const string& path, void* buf, const off64_t offset, const size_t read_size) { - // Calculate chunkid boundaries and numbers so that daemons know in which - // interval to look for chunks - auto chnk_start = chnk_id_for_offset(offset, CHUNKSIZE); - auto chnk_end = chnk_id_for_offset((offset + read_size - 1), CHUNKSIZE); + // Calculate chunkid boundaries and numbers so that daemons know in which + // interval to look for chunks + auto chnk_start = chnk_id_for_offset(offset, gkfs_config::rpc::chunksize); + auto chnk_end = chnk_id_for_offset((offset + read_size - 1), gkfs_config::rpc::chunksize); - // Collect all chunk ids within count that have the same destination so - // that those are send in one rpc bulk transfer - std::map> target_chnks{}; - // contains the recipient ids, used to access the target_chnks map. - // First idx is chunk with potential offset - std::vector targets{}; + // Collect all chunk ids within count that have the same destination so + // that those are send in one rpc bulk transfer + std::map> target_chnks{}; + // contains the recipient ids, used to access the target_chnks map. + // First idx is chunk with potential offset + std::vector targets{}; - // targets for the first and last chunk as they need special treatment - uint64_t chnk_start_target = 0; + // targets for the first and last chunk as they need special treatment + uint64_t chnk_start_target = 0; uint64_t chnk_end_target = 0; for (uint64_t chnk_id = chnk_start; chnk_id <= chnk_end; chnk_id++) { @@ -268,16 +266,16 @@ ssize_t read(const string& path, void* buf, const off64_t offset, const size_t r for(const auto& target : targets) { // total chunk_size for target - auto total_chunk_size = target_chnks[target].size() * CHUNKSIZE; + auto total_chunk_size = target_chnks[target].size() * gkfs_config::rpc::chunksize; // receiver of first chunk must subtract the offset from first chunk if (target == chnk_start_target) { - total_chunk_size -= chnk_lpad(offset, CHUNKSIZE); + total_chunk_size -= chnk_lpad(offset, gkfs_config::rpc::chunksize); } // receiver of last chunk must subtract if (target == chnk_end_target) { - total_chunk_size -= chnk_rpad(offset + read_size, CHUNKSIZE); + total_chunk_size -= chnk_rpad(offset + read_size, gkfs_config::rpc::chunksize); } auto endp = CTX->hosts().at(target); @@ -287,20 +285,20 @@ ssize_t read(const string& path, void* buf, const off64_t offset, const size_t r LOG(DEBUG, "Sending RPC ..."); gkfs::rpc::read_data::input in( - path, - // first offset in targets is the chunk with - // a potential offset - chnk_lpad(offset, CHUNKSIZE), - target, - CTX->hosts().size(), - // number of chunks handled by that destination - target_chnks[target].size(), - // chunk start id of this write - chnk_start, - // chunk end id of this write - chnk_end, - // total size to write - total_chunk_size, + path, + // first offset in targets is the chunk with + // a potential offset + chnk_lpad(offset, gkfs_config::rpc::chunksize), + target, + CTX->hosts().size(), + // number of chunks handled by that destination + target_chnks[target].size(), + // chunk start id of this write + chnk_start, + // chunk end id of this write + chnk_end, + // total size to write + total_chunk_size, local_buffers); // TODO(amiranda): add a post() with RPC_TIMEOUT to hermes so that @@ -363,9 +361,9 @@ int trunc_data(const std::string& path, size_t current_size, size_t new_size) { // Find out which data servers need to delete data chunks in order to // contact only them - const unsigned int chunk_start = chnk_id_for_offset(new_size, CHUNKSIZE); - const unsigned int chunk_end = - chnk_id_for_offset(current_size - new_size - 1, CHUNKSIZE); + const unsigned int chunk_start = chnk_id_for_offset(new_size, gkfs_config::rpc::chunksize); + const unsigned int chunk_end = + chnk_id_for_offset(current_size - new_size - 1, gkfs_config::rpc::chunksize); std::unordered_set hosts; for(unsigned int chunk_id = chunk_start; chunk_id <= chunk_end; ++chunk_id) { @@ -450,7 +448,7 @@ ChunkStat chunk_stat() { } } - unsigned long chunk_size = CHUNKSIZE; + unsigned long chunk_size = gkfs_config::rpc::chunksize; unsigned long chunk_total = 0; unsigned long chunk_free = 0; diff --git a/src/client/rpc/ld_rpc_metadentry.cpp b/src/client/rpc/ld_rpc_metadentry.cpp index 1a2b940bc..3a11766f3 100644 --- a/src/client/rpc/ld_rpc_metadentry.cpp +++ b/src/client/rpc/ld_rpc_metadentry.cpp @@ -11,7 +11,6 @@ SPDX-License-Identifier: MIT */ -#include #include #include "client/preload.hpp" #include "client/logging.hpp" @@ -166,7 +165,7 @@ int rm_node(const std::string& path, const bool remove_metadentry_only, const ss std::vector> handles; // Small files - if(static_cast(size / CHUNKSIZE) < CTX->hosts().size()) { + if (static_cast(size / gkfs_config::rpc::chunksize) < CTX->hosts().size()) { auto endp = CTX->hosts().at( CTX->distributor()->locate_file_metadata(path)); @@ -175,10 +174,10 @@ int rm_node(const std::string& path, const bool remove_metadentry_only, const ss LOG(DEBUG, "Sending RPC to host: {}", endp.to_string()); gkfs::rpc::remove::input in(path); handles.emplace_back( - ld_network_service->post(endp,in)); + ld_network_service->post(endp, in)); uint64_t chnk_start = 0; - uint64_t chnk_end = size/CHUNKSIZE; + uint64_t chnk_end = size / gkfs_config::rpc::chunksize; for (uint64_t chnk_id = chnk_start; chnk_id <= chnk_end; chnk_id++) { const auto target = CTX->hosts().at( @@ -378,28 +377,28 @@ void get_dirents(OpenDir& open_dir){ auto const targets = CTX->distributor()->locate_directory_metadata(root_dir); - /* preallocate receiving buffer. The actual size is not known yet. - * - * On C++14 make_unique function also zeroes the newly allocated buffer. - * It turns out that this operation is increadibly slow for such a big - * buffer. Moreover we don't need a zeroed buffer here. - */ - auto large_buffer = - std::unique_ptr(new char[RPC_DIRENTS_BUFF_SIZE]); + /* preallocate receiving buffer. The actual size is not known yet. + * + * On C++14 make_unique function also zeroes the newly allocated buffer. + * It turns out that this operation is increadibly slow for such a big + * buffer. Moreover we don't need a zeroed buffer here. + */ + auto large_buffer = + std::unique_ptr(new char[gkfs_config::rpc::dirents_buff_size]); - //XXX there is a rounding error here depending on the number of targets... - const std::size_t per_host_buff_size = - RPC_DIRENTS_BUFF_SIZE / targets.size(); + //XXX there is a rounding error here depending on the number of targets... + const std::size_t per_host_buff_size = + gkfs_config::rpc::dirents_buff_size / targets.size(); - // expose local buffers for RMA from servers - std::vector exposed_buffers; - exposed_buffers.reserve(targets.size()); + // expose local buffers for RMA from servers + std::vector exposed_buffers; + exposed_buffers.reserve(targets.size()); - for(std::size_t i = 0; i < targets.size(); ++i) { - try { - exposed_buffers.emplace_back( - ld_network_service->expose( - std::vector{ + for (std::size_t i = 0; i < targets.size(); ++i) { + try { + exposed_buffers.emplace_back( + ld_network_service->expose( + std::vector{ hermes::mutable_buffer{ large_buffer.get() + (i * per_host_buff_size), per_host_buff_size diff --git a/src/daemon/CMakeLists.txt b/src/daemon/CMakeLists.txt index 6e5503ee2..da70e0ec2 100644 --- a/src/daemon/CMakeLists.txt +++ b/src/daemon/CMakeLists.txt @@ -5,6 +5,7 @@ set(DAEMON_SRC ../global/rpc/rpc_utils.cpp ../global/path_util.cpp main.cpp + util.cpp ops/metadentry.cpp classes/fs_data.cpp classes/rpc_data.cpp @@ -13,13 +14,15 @@ set(DAEMON_SRC handler/h_preload.cpp ) set(DAEMON_HEADERS + ../../include/config.hpp ../../include/version.hpp - ../../include/global/configure.hpp + ../../include/global/cmake_configure.hpp ../../include/global/global_defs.hpp ../../include/global/rpc/rpc_types.hpp ../../include/global/rpc/rpc_utils.hpp ../../include/global/path_util.hpp ../../include/daemon/main.hpp + ../../include/daemon/util.hpp ../../include/daemon/ops/metadentry.hpp ../../include/daemon/classes/fs_data.hpp ../../include/daemon/classes/rpc_data.hpp diff --git a/src/daemon/backend/metadata/db.cpp b/src/daemon/backend/metadata/db.cpp index f41a69a7b..1ed8d59cd 100644 --- a/src/daemon/backend/metadata/db.cpp +++ b/src/daemon/backend/metadata/db.cpp @@ -29,7 +29,7 @@ MetadataDB::MetadataDB(const std::string& path): path(path) { options.create_if_missing = true; options.merge_operator.reset(new MetadataMergeOperator); MetadataDB::optimize_rocksdb_options(options); - write_opts.disableWAL = !(KV_WOL); + write_opts.disableWAL = !(gkfs_config::rocksdb::use_write_ahead_log); rdb::DB * rdb_ptr; auto s = rocksdb::DB::Open(options, path, &rdb_ptr); if (!s.ok()) { @@ -187,13 +187,4 @@ void MetadataDB::iterate_all() { void MetadataDB::optimize_rocksdb_options(rdb::Options& options) { options.max_successive_merges = 128; - -#if defined(KV_WRITE_BUFFER) - // write_buffer_size is multiplied by the write_buffer_number to get the amount of data hold in memory. - // at min_write_buffer_number_to_merge rocksdb starts to flush entries out to disk - options.write_buffer_size = KV_WRITE_BUFFER << 20; - // XXX experimental values. We only want one buffer, which is held in memory - options.max_write_buffer_number = 1; - options.min_write_buffer_number_to_merge = 1; -#endif } diff --git a/src/daemon/handler/h_data.cpp b/src/daemon/handler/h_data.cpp index 7167a9d4f..7abea310d 100644 --- a/src/daemon/handler/h_data.cpp +++ b/src/daemon/handler/h_data.cpp @@ -26,7 +26,7 @@ using namespace std; struct write_chunk_args { const std::string* path; const char* buf; - rpc_chnk_id_t chnk_id; + gkfs::types::rpc_chnk_id_t chnk_id; size_t size; off64_t off; ABT_eventual eventual; @@ -36,7 +36,7 @@ struct write_chunk_args { * Used by an argobots threads. Argument args has the following fields: * const std::string* path; const char* buf; - const rpc_chnk_id_t* chnk_id; + const gkfs::types::rpc_chnk_id_t* chnk_id; size_t size; off64_t off; ABT_eventual* eventual; @@ -50,10 +50,10 @@ void write_file_abt(void* _arg) { const std::string& path = *(arg->path); try { - ADAFS_DATA->storage()->write_chunk(path, arg->chnk_id, - arg->buf, arg->size, arg->off, arg->eventual); - } catch (const std::system_error& serr){ - ADAFS_DATA->spdlogger()->error("{}() Error writing chunk {} of file {}", __func__, arg->chnk_id, path); + GKFS_DATA->storage()->write_chunk(path, arg->chnk_id, + arg->buf, arg->size, arg->off, arg->eventual); + } catch (const std::system_error& serr) { + GKFS_DATA->spdlogger()->error("{}() Error writing chunk {} of file {}", __func__, arg->chnk_id, path); ssize_t wrote = -(serr.code().value()); ABT_eventual_set(arg->eventual, &wrote, sizeof(ssize_t)); } @@ -63,7 +63,7 @@ void write_file_abt(void* _arg) { struct read_chunk_args { const std::string* path; char* buf; - rpc_chnk_id_t chnk_id; + gkfs::types::rpc_chnk_id_t chnk_id; size_t size; off64_t off; ABT_eventual eventual; @@ -73,7 +73,7 @@ struct read_chunk_args { * Used by an argobots threads. Argument args has the following fields: * const std::string* path; char* buf; - const rpc_chnk_id_t* chnk_id; + const gkfs::types::rpc_chnk_id_t* chnk_id; size_t size; off64_t off; ABT_eventual* eventual; @@ -87,10 +87,10 @@ void read_file_abt(void* _arg) { const std::string& path = *(arg->path); try { - ADAFS_DATA->storage()->read_chunk(path, arg->chnk_id, - arg->buf, arg->size, arg->off, arg->eventual); - } catch (const std::system_error& serr){ - ADAFS_DATA->spdlogger()->error("{}() Error reading chunk {} of file {}", __func__, arg->chnk_id, path); + GKFS_DATA->storage()->read_chunk(path, arg->chnk_id, + arg->buf, arg->size, arg->off, arg->eventual); + } catch (const std::system_error& serr) { + GKFS_DATA->spdlogger()->error("{}() Error reading chunk {} of file {}", __func__, arg->chnk_id, path); ssize_t read = -(serr.code().value()); ABT_eventual_set(arg->eventual, &read, sizeof(ssize_t)); } @@ -133,14 +133,14 @@ static hg_return_t rpc_srv_write_data(hg_handle_t handle) { // Getting some information from margo auto ret = margo_get_input(handle, &in); if (ret != HG_SUCCESS) { - ADAFS_DATA->spdlogger()->error("{}() Could not get RPC input data with err {}", __func__, ret); + GKFS_DATA->spdlogger()->error("{}() Could not get RPC input data with err {}", __func__, ret); return rpc_cleanup_respond(&handle, &in, &out, &bulk_handle); } auto hgi = margo_get_info(handle); auto mid = margo_hg_info_get_instance(hgi); auto bulk_size = margo_bulk_get_size(in.bulk_handle); - ADAFS_DATA->spdlogger()->debug("{}() path: {}, size: {}, offset: {}", __func__, - in.path, bulk_size, in.offset); + GKFS_DATA->spdlogger()->debug("{}() path: {}, size: {}, offset: {}", __func__, + in.path, bulk_size, in.offset); /* * 2. Set up buffers for pull bulk transfers */ @@ -149,7 +149,7 @@ static hg_return_t rpc_srv_write_data(hg_handle_t handle) { // create bulk handle and allocated memory for buffer with buf_sizes information ret = margo_bulk_create(mid, 1, nullptr, &in.total_chunk_size, HG_BULK_READWRITE, &bulk_handle); if (ret != HG_SUCCESS) { - ADAFS_DATA->spdlogger()->error("{}() Failed to create bulk handle", __func__); + GKFS_DATA->spdlogger()->error("{}() Failed to create bulk handle", __func__); return rpc_cleanup_respond(&handle, &in, &out, static_cast(nullptr)); } // access the internally allocated memory buffer and put it into buf_ptrs @@ -157,7 +157,7 @@ static hg_return_t rpc_srv_write_data(hg_handle_t handle) { ret = margo_bulk_access(bulk_handle, 0, in.total_chunk_size, HG_BULK_READWRITE, 1, &bulk_buf, &in.total_chunk_size, &actual_count); if (ret != HG_SUCCESS || actual_count != 1) { - ADAFS_DATA->spdlogger()->error("{}() Failed to access allocated buffer from bulk handle", __func__); + GKFS_DATA->spdlogger()->error("{}() Failed to access allocated buffer from bulk handle", __func__); return rpc_cleanup_respond(&handle, &in, &out, &bulk_handle); } auto const host_id = in.host_id; @@ -185,7 +185,7 @@ static hg_return_t rpc_srv_write_data(hg_handle_t handle) { * Last chunk can also happen if only one chunk is written. This is covered by 2 and 3. */ // temporary variables - auto transfer_size = (bulk_size <= CHUNKSIZE) ? bulk_size : CHUNKSIZE; + auto transfer_size = (bulk_size <= gkfs_config::rpc::chunksize) ? bulk_size : gkfs_config::rpc::chunksize; uint64_t origin_offset; uint64_t local_offset; // task structures for async writing @@ -204,12 +204,13 @@ static hg_return_t rpc_srv_write_data(hg_handle_t handle) { // offset case. Only relevant in the first iteration of the loop and if the chunk hashes to this host if (chnk_id_file == in.chunk_start && in.offset > 0) { // if only 1 destination and 1 chunk (small write) the transfer_size == bulk_size - auto offset_transfer_size = (in.offset + bulk_size <= CHUNKSIZE) ? bulk_size : static_cast( - CHUNKSIZE - in.offset); + auto offset_transfer_size = (in.offset + bulk_size <= gkfs_config::rpc::chunksize) ? bulk_size + : static_cast( + gkfs_config::rpc::chunksize - in.offset); ret = margo_bulk_transfer(mid, HG_BULK_PULL, hgi->addr, in.bulk_handle, 0, bulk_handle, 0, offset_transfer_size); if (ret != HG_SUCCESS) { - ADAFS_DATA->spdlogger()->error( + GKFS_DATA->spdlogger()->error( "{}() Failed to pull data from client for chunk {} (startchunk {}; endchunk {}", __func__, chnk_id_file, in.chunk_start, in.chunk_end - 1); cancel_abt_io(&abt_tasks, &task_eventuals, chnk_id_curr); @@ -223,13 +224,14 @@ static hg_return_t rpc_srv_write_data(hg_handle_t handle) { local_offset = in.total_chunk_size - chnk_size_left_host; // origin offset of a chunk is dependent on a given offset in a write operation if (in.offset > 0) - origin_offset = (CHUNKSIZE - in.offset) + ((chnk_id_file - in.chunk_start) - 1) * CHUNKSIZE; + origin_offset = (gkfs_config::rpc::chunksize - in.offset) + + ((chnk_id_file - in.chunk_start) - 1) * gkfs_config::rpc::chunksize; else - origin_offset = (chnk_id_file - in.chunk_start) * CHUNKSIZE; + origin_offset = (chnk_id_file - in.chunk_start) * gkfs_config::rpc::chunksize; // last chunk might have different transfer_size if (chnk_id_curr == in.chunk_n - 1) transfer_size = chnk_size_left_host; - ADAFS_DATA->spdlogger()->trace( + GKFS_DATA->spdlogger()->trace( "{}() BULK_TRANSFER hostid {} file {} chnkid {} total_Csize {} Csize_left {} origin offset {} local offset {} transfersize {}", __func__, host_id, in.path, chnk_id_file, in.total_chunk_size, chnk_size_left_host, origin_offset, local_offset, transfer_size); @@ -237,7 +239,7 @@ static hg_return_t rpc_srv_write_data(hg_handle_t handle) { ret = margo_bulk_transfer(mid, HG_BULK_PULL, hgi->addr, in.bulk_handle, origin_offset, bulk_handle, local_offset, transfer_size); if (ret != HG_SUCCESS) { - ADAFS_DATA->spdlogger()->error( + GKFS_DATA->spdlogger()->error( "{}() Failed to pull data from client. file {} chunk {} (startchunk {}; endchunk {})", __func__, *path, chnk_id_file, in.chunk_start, (in.chunk_end - 1)); cancel_abt_io(&abt_tasks, &task_eventuals, chnk_id_curr); @@ -262,7 +264,7 @@ static hg_return_t rpc_srv_write_data(hg_handle_t handle) { auto abt_ret = ABT_task_create(RPC_DATA->io_pool(), write_file_abt, &task_args[chnk_id_curr], &abt_tasks[chnk_id_curr]); if (abt_ret != ABT_SUCCESS) { - ADAFS_DATA->spdlogger()->error("{}() task create failed", __func__); + GKFS_DATA->spdlogger()->error("{}() task create failed", __func__); cancel_abt_io(&abt_tasks, &task_eventuals, chnk_id_curr + 1); return rpc_cleanup_respond(&handle, &in, &out, &bulk_handle); } @@ -272,8 +274,8 @@ static hg_return_t rpc_srv_write_data(hg_handle_t handle) { } // Sanity check that all chunks where detected in previous loop if (chnk_size_left_host != 0) - ADAFS_DATA->spdlogger()->warn("{}() Not all chunks were detected!!! Size left {}", __func__, - chnk_size_left_host); + GKFS_DATA->spdlogger()->warn("{}() Not all chunks were detected!!! Size left {}", __func__, + chnk_size_left_host); /* * 4. Read task results and accumulate in out.io_size */ @@ -284,7 +286,7 @@ static hg_return_t rpc_srv_write_data(hg_handle_t handle) { // wait causes the calling ult to go into BLOCKED state, implicitly yielding to the pool scheduler auto abt_ret = ABT_eventual_wait(task_eventuals[chnk_id_curr], (void**) &task_written_size); if (abt_ret != ABT_SUCCESS) { - ADAFS_DATA->spdlogger()->error( + GKFS_DATA->spdlogger()->error( "{}() Failed to wait for write task for chunk {}", __func__, chnk_id_curr); out.err = EIO; @@ -292,8 +294,8 @@ static hg_return_t rpc_srv_write_data(hg_handle_t handle) { } assert(task_written_size != nullptr); if (*task_written_size < 0) { - ADAFS_DATA->spdlogger()->error("{}() Write task failed for chunk {}", - __func__, chnk_id_curr); + GKFS_DATA->spdlogger()->error("{}() Write task failed for chunk {}", + __func__, chnk_id_curr); out.err = -(*task_written_size); break; } @@ -304,14 +306,14 @@ static hg_return_t rpc_srv_write_data(hg_handle_t handle) { // Sanity check to see if all data has been written if (in.total_chunk_size != out.io_size) { - ADAFS_DATA->spdlogger()->warn("{}() total chunk size {} and out.io_size {} mismatch!", __func__, - in.total_chunk_size, out.io_size); + GKFS_DATA->spdlogger()->warn("{}() total chunk size {} and out.io_size {} mismatch!", __func__, + in.total_chunk_size, out.io_size); } /* * 5. Respond and cleanup */ - ADAFS_DATA->spdlogger()->debug("{}() Sending output response {}", __func__, out.err); + GKFS_DATA->spdlogger()->debug("{}() Sending output response {}", __func__, out.err); ret = rpc_cleanup_respond(&handle, &in, &out, &bulk_handle); // free tasks after responding for (auto&& task : abt_tasks) { @@ -336,14 +338,14 @@ static hg_return_t rpc_srv_read_data(hg_handle_t handle) { // Getting some information from margo auto ret = margo_get_input(handle, &in); if (ret != HG_SUCCESS) { - ADAFS_DATA->spdlogger()->error("{}() Could not get RPC input data with err {}", __func__, ret); + GKFS_DATA->spdlogger()->error("{}() Could not get RPC input data with err {}", __func__, ret); return rpc_cleanup_respond(&handle, &in, &out, &bulk_handle); } auto hgi = margo_get_info(handle); auto mid = margo_hg_info_get_instance(hgi); auto bulk_size = margo_bulk_get_size(in.bulk_handle); - ADAFS_DATA->spdlogger()->debug("{}() path: {}, size: {}, offset: {}", __func__, - in.path, bulk_size, in.offset); + GKFS_DATA->spdlogger()->debug("{}() path: {}, size: {}, offset: {}", __func__, + in.path, bulk_size, in.offset); /* * 2. Set up buffers for pull bulk transfers @@ -353,7 +355,7 @@ static hg_return_t rpc_srv_read_data(hg_handle_t handle) { // create bulk handle and allocated memory for buffer with buf_sizes information ret = margo_bulk_create(mid, 1, nullptr, &in.total_chunk_size, HG_BULK_READWRITE, &bulk_handle); if (ret != HG_SUCCESS) { - ADAFS_DATA->spdlogger()->error("{}() Failed to create bulk handle", __func__); + GKFS_DATA->spdlogger()->error("{}() Failed to create bulk handle", __func__); return rpc_cleanup_respond(&handle, &in, &out, static_cast(nullptr)); } // access the internally allocated memory buffer and put it into buf_ptrs @@ -361,7 +363,7 @@ static hg_return_t rpc_srv_read_data(hg_handle_t handle) { ret = margo_bulk_access(bulk_handle, 0, in.total_chunk_size, HG_BULK_READWRITE, 1, &bulk_buf, &in.total_chunk_size, &actual_count); if (ret != HG_SUCCESS || actual_count != 1) { - ADAFS_DATA->spdlogger()->error("{}() Failed to access allocated buffer from bulk handle", __func__); + GKFS_DATA->spdlogger()->error("{}() Failed to access allocated buffer from bulk handle", __func__); return rpc_cleanup_respond(&handle, &in, &out, &bulk_handle); } auto const host_id = in.host_id; @@ -383,7 +385,7 @@ static hg_return_t rpc_srv_read_data(hg_handle_t handle) { // temporary traveling pointer auto chnk_ptr = static_cast(bulk_buf); // temporary variables - auto transfer_size = (bulk_size <= CHUNKSIZE) ? bulk_size : CHUNKSIZE; + auto transfer_size = (bulk_size <= gkfs_config::rpc::chunksize) ? bulk_size : gkfs_config::rpc::chunksize; // tasks structures vector abt_tasks(in.chunk_n); vector task_eventuals(in.chunk_n); @@ -400,8 +402,9 @@ static hg_return_t rpc_srv_read_data(hg_handle_t handle) { // Only relevant in the first iteration of the loop and if the chunk hashes to this host if (chnk_id_file == in.chunk_start && in.offset > 0) { // if only 1 destination and 1 chunk (small read) the transfer_size == bulk_size - auto offset_transfer_size = (in.offset + bulk_size <= CHUNKSIZE) ? bulk_size : static_cast( - CHUNKSIZE - in.offset); + auto offset_transfer_size = (in.offset + bulk_size <= gkfs_config::rpc::chunksize) ? bulk_size + : static_cast( + gkfs_config::rpc::chunksize - in.offset); // Setting later transfer offsets local_offsets[chnk_id_curr] = 0; origin_offsets[chnk_id_curr] = 0; @@ -415,9 +418,10 @@ static hg_return_t rpc_srv_read_data(hg_handle_t handle) { // origin offset of a chunk is dependent on a given offset in a write operation if (in.offset > 0) origin_offsets[chnk_id_curr] = - (CHUNKSIZE - in.offset) + ((chnk_id_file - in.chunk_start) - 1) * CHUNKSIZE; + (gkfs_config::rpc::chunksize - in.offset) + + ((chnk_id_file - in.chunk_start) - 1) * gkfs_config::rpc::chunksize; else - origin_offsets[chnk_id_curr] = (chnk_id_file - in.chunk_start) * CHUNKSIZE; + origin_offsets[chnk_id_curr] = (chnk_id_file - in.chunk_start) * gkfs_config::rpc::chunksize; // last chunk might have different transfer_size if (chnk_id_curr == in.chunk_n - 1) transfer_size = chnk_size_left_host; @@ -441,7 +445,7 @@ static hg_return_t rpc_srv_read_data(hg_handle_t handle) { auto abt_ret = ABT_task_create(RPC_DATA->io_pool(), read_file_abt, &task_args[chnk_id_curr], &abt_tasks[chnk_id_curr]); if (abt_ret != ABT_SUCCESS) { - ADAFS_DATA->spdlogger()->error("{}() task create failed", __func__); + GKFS_DATA->spdlogger()->error("{}() task create failed", __func__); cancel_abt_io(&abt_tasks, &task_eventuals, chnk_id_curr + 1); return rpc_cleanup_respond(&handle, &in, &out, &bulk_handle); } @@ -449,8 +453,8 @@ static hg_return_t rpc_srv_read_data(hg_handle_t handle) { } // Sanity check that all chunks where detected in previous loop if (chnk_size_left_host != 0) - ADAFS_DATA->spdlogger()->warn("{}() Not all chunks were detected!!! Size left {}", __func__, - chnk_size_left_host); + GKFS_DATA->spdlogger()->warn("{}() Not all chunks were detected!!! Size left {}", __func__, + chnk_size_left_host); /* * 4. Read task results and accumulate in out.io_size */ @@ -461,18 +465,18 @@ static hg_return_t rpc_srv_read_data(hg_handle_t handle) { // wait causes the calling ult to go into BLOCKED state, implicitly yielding to the pool scheduler auto abt_ret = ABT_eventual_wait(task_eventuals[chnk_id_curr], (void**) &task_read_size); if (abt_ret != ABT_SUCCESS) { - ADAFS_DATA->spdlogger()->error( + GKFS_DATA->spdlogger()->error( "{}() Failed to wait for read task for chunk {}", __func__, chnk_id_curr); out.err = EIO; break; } assert(task_read_size != nullptr); - if(*task_read_size < 0){ - if(-(*task_read_size) == ENOENT) { + if(*task_read_size < 0) { + if (-(*task_read_size) == ENOENT) { continue; } - ADAFS_DATA->spdlogger()->warn( + GKFS_DATA->spdlogger()->warn( "{}() Read task failed for chunk {}", __func__, chnk_id_curr); out.err = -(*task_read_size); @@ -486,7 +490,7 @@ static hg_return_t rpc_srv_read_data(hg_handle_t handle) { ret = margo_bulk_transfer(mid, HG_BULK_PUSH, hgi->addr, in.bulk_handle, origin_offsets[chnk_id_curr], bulk_handle, local_offsets[chnk_id_curr], *task_read_size); if (ret != HG_SUCCESS) { - ADAFS_DATA->spdlogger()->error( + GKFS_DATA->spdlogger()->error( "{}() Failed push chnkid {} on path {} to client. origin offset {} local offset {} chunk size {}", __func__, chnk_id_curr, in.path, origin_offsets[chnk_id_curr], local_offsets[chnk_id_curr], chnk_sizes[chnk_id_curr]); @@ -499,7 +503,7 @@ static hg_return_t rpc_srv_read_data(hg_handle_t handle) { /* * 5. Respond and cleanup */ - ADAFS_DATA->spdlogger()->debug("{}() Sending output response, err: {}", __func__, out.err); + GKFS_DATA->spdlogger()->debug("{}() Sending output response, err: {}", __func__, out.err); ret = rpc_cleanup_respond(&handle, &in, &out, &bulk_handle); // free tasks after responding cancel_abt_io(&abt_tasks, &task_eventuals, in.chunk_n); @@ -514,26 +518,26 @@ static hg_return_t rpc_srv_trunc_data(hg_handle_t handle) { auto ret = margo_get_input(handle, &in); if (ret != HG_SUCCESS) { - ADAFS_DATA->spdlogger()->error("{}() Could not get RPC input data with err {}", __func__, ret); + GKFS_DATA->spdlogger()->error("{}() Could not get RPC input data with err {}", __func__, ret); throw runtime_error("Failed to get RPC input data"); } - ADAFS_DATA->spdlogger()->debug("{}() path: '{}', length: {}", __func__, in.path, in.length); + GKFS_DATA->spdlogger()->debug("{}() path: '{}', length: {}", __func__, in.path, in.length); - unsigned int chunk_start = chnk_id_for_offset(in.length, CHUNKSIZE); + unsigned int chunk_start = chnk_id_for_offset(in.length, gkfs_config::rpc::chunksize); // If we trunc in the the middle of a chunk, do not delete that chunk - auto left_pad = chnk_lpad(in.length, CHUNKSIZE); - if(left_pad != 0) { - ADAFS_DATA->storage()->truncate_chunk(in.path, chunk_start, left_pad); + auto left_pad = chnk_lpad(in.length, gkfs_config::rpc::chunksize); + if (left_pad != 0) { + GKFS_DATA->storage()->truncate_chunk(in.path, chunk_start, left_pad); ++chunk_start; } - ADAFS_DATA->storage()->trim_chunk_space(in.path, chunk_start); + GKFS_DATA->storage()->trim_chunk_space(in.path, chunk_start); - ADAFS_DATA->spdlogger()->debug("{}() Sending output {}", __func__, out.err); + GKFS_DATA->spdlogger()->debug("{}() Sending output {}", __func__, out.err); auto hret = margo_respond(handle, &out); if (hret != HG_SUCCESS) { - ADAFS_DATA->spdlogger()->error("{}() Failed to respond"); + GKFS_DATA->spdlogger()->error("{}() Failed to respond"); } // Destroy handle when finished margo_free_input(handle, &in); @@ -544,18 +548,18 @@ static hg_return_t rpc_srv_trunc_data(hg_handle_t handle) { DEFINE_MARGO_RPC_HANDLER(rpc_srv_trunc_data) static hg_return_t rpc_srv_chunk_stat(hg_handle_t handle) { - ADAFS_DATA->spdlogger()->trace("{}() called", __func__); + GKFS_DATA->spdlogger()->trace("{}() called", __func__); rpc_chunk_stat_out_t out{}; // Get input - auto chk_stat = ADAFS_DATA->storage()->chunk_stat(); + auto chk_stat = GKFS_DATA->storage()->chunk_stat(); // Create output and send it out.chunk_size = chk_stat.chunk_size; out.chunk_total = chk_stat.chunk_total; out.chunk_free = chk_stat.chunk_free; auto hret = margo_respond(handle, &out); if (hret != HG_SUCCESS) { - ADAFS_DATA->spdlogger()->error("{}() Failed to respond", __func__); + GKFS_DATA->spdlogger()->error("{}() Failed to respond", __func__); } // Destroy handle when finished diff --git a/src/daemon/handler/h_metadentry.cpp b/src/daemon/handler/h_metadentry.cpp index 09390f954..a643b932c 100644 --- a/src/daemon/handler/h_metadentry.cpp +++ b/src/daemon/handler/h_metadentry.cpp @@ -27,22 +27,22 @@ static hg_return_t rpc_srv_mk_node(hg_handle_t handle) { auto ret = margo_get_input(handle, &in); if (ret != HG_SUCCESS) - ADAFS_DATA->spdlogger()->error("{}() Failed to retrieve input from handle", __func__); + GKFS_DATA->spdlogger()->error("{}() Failed to retrieve input from handle", __func__); assert(ret == HG_SUCCESS); - ADAFS_DATA->spdlogger()->debug("{}() Got RPC with path '{}'", __func__, in.path); + GKFS_DATA->spdlogger()->debug("{}() Got RPC with path '{}'", __func__, in.path); Metadata md(in.mode); try { // create metadentry create_metadentry(in.path, md); out.err = 0; } catch (const std::exception& e) { - ADAFS_DATA->spdlogger()->error("{}() Failed to create metadentry: {}", __func__, e.what()); + GKFS_DATA->spdlogger()->error("{}() Failed to create metadentry: '{}'", __func__, e.what()); out.err = -1; } - ADAFS_DATA->spdlogger()->debug("{}() Sending output err {}", __func__, out.err); + GKFS_DATA->spdlogger()->debug("{}() Sending output err '{}'", __func__, out.err); auto hret = margo_respond(handle, &out); if (hret != HG_SUCCESS) { - ADAFS_DATA->spdlogger()->error("{}() Failed to respond", __func__); + GKFS_DATA->spdlogger()->error("{}() Failed to respond", __func__); } // Destroy handle when finished @@ -58,9 +58,9 @@ static hg_return_t rpc_srv_stat(hg_handle_t handle) { rpc_stat_out_t out{}; auto ret = margo_get_input(handle, &in); if (ret != HG_SUCCESS) - ADAFS_DATA->spdlogger()->error("{}() Failed to retrieve input from handle", __func__); + GKFS_DATA->spdlogger()->error("{}() Failed to retrieve input from handle", __func__); assert(ret == HG_SUCCESS); - ADAFS_DATA->spdlogger()->debug("{}() path: '{}'", __func__, in.path); + GKFS_DATA->spdlogger()->debug("{}() path: '{}'", __func__, in.path); std::string val; try { @@ -68,18 +68,18 @@ static hg_return_t rpc_srv_stat(hg_handle_t handle) { val = get_metadentry_str(in.path); out.db_val = val.c_str(); out.err = 0; - ADAFS_DATA->spdlogger()->debug("{}() Sending output mode '{}'", __func__, out.db_val); + GKFS_DATA->spdlogger()->debug("{}() Sending output mode '{}'", __func__, out.db_val); } catch (const NotFoundException& e) { - ADAFS_DATA->spdlogger()->debug("{}() Entry not found: '{}'", __func__, in.path); + GKFS_DATA->spdlogger()->debug("{}() Entry not found: '{}'", __func__, in.path); out.err = ENOENT; } catch (const std::exception& e) { - ADAFS_DATA->spdlogger()->error("{}() Failed to get metadentry from DB: '{}'", __func__, e.what()); + GKFS_DATA->spdlogger()->error("{}() Failed to get metadentry from DB: '{}'", __func__, e.what()); out.err = EBUSY; } auto hret = margo_respond(handle, &out); if (hret != HG_SUCCESS) { - ADAFS_DATA->spdlogger()->error("{}() Failed to respond", __func__); + GKFS_DATA->spdlogger()->error("{}() Failed to respond", __func__); } // Destroy handle when finished @@ -96,24 +96,24 @@ static hg_return_t rpc_srv_decr_size(hg_handle_t handle) { auto ret = margo_get_input(handle, &in); if (ret != HG_SUCCESS) { - ADAFS_DATA->spdlogger()->error("{}() Failed to retrieve input from handle", __func__); + GKFS_DATA->spdlogger()->error("{}() Failed to retrieve input from handle", __func__); throw runtime_error("Failed to retrieve input from handle"); } - ADAFS_DATA->spdlogger()->debug("{}() path: '{}', length: {}", __func__, in.path, in.length); + GKFS_DATA->spdlogger()->debug("{}() path: '{}', length: {}", __func__, in.path, in.length); try { - ADAFS_DATA->mdb()->decrease_size(in.path, in.length); + GKFS_DATA->mdb()->decrease_size(in.path, in.length); out.err = 0; } catch (const std::exception& e) { - ADAFS_DATA->spdlogger()->error("{}() Failed to decrease size: {}", __func__, e.what()); + GKFS_DATA->spdlogger()->error("{}() Failed to decrease size: {}", __func__, e.what()); out.err = EIO; } - ADAFS_DATA->spdlogger()->debug("{}() Sending output {}", __func__, out.err); + GKFS_DATA->spdlogger()->debug("{}() Sending output {}", __func__, out.err); auto hret = margo_respond(handle, &out); if (hret != HG_SUCCESS) { - ADAFS_DATA->spdlogger()->error("{}() Failed to respond", __func__); + GKFS_DATA->spdlogger()->error("{}() Failed to respond", __func__); throw runtime_error("Failed to respond"); } // Destroy handle when finished @@ -130,9 +130,9 @@ static hg_return_t rpc_srv_rm_node(hg_handle_t handle) { auto ret = margo_get_input(handle, &in); if (ret != HG_SUCCESS) - ADAFS_DATA->spdlogger()->error("{}() Failed to retrieve input from handle", __func__); + GKFS_DATA->spdlogger()->error("{}() Failed to retrieve input from handle", __func__); assert(ret == HG_SUCCESS); - ADAFS_DATA->spdlogger()->debug("{}() Got remove node RPC with path '{}'", __func__, in.path); + GKFS_DATA->spdlogger()->debug("{}() Got remove node RPC with path '{}'", __func__, in.path); try { // Remove metadentry if exists on the node @@ -149,14 +149,14 @@ static hg_return_t rpc_srv_rm_node(hg_handle_t handle) { */ out.err = 0; } catch (const std::exception& e) { - ADAFS_DATA->spdlogger()->error("{}() Failed to remove node: {}", __func__, e.what()); + GKFS_DATA->spdlogger()->error("{}() Failed to remove node: {}", __func__, e.what()); out.err = EBUSY; } - ADAFS_DATA->spdlogger()->debug("{}() Sending output {}", __func__, out.err); + GKFS_DATA->spdlogger()->debug("{}() Sending output {}", __func__, out.err); auto hret = margo_respond(handle, &out); if (hret != HG_SUCCESS) { - ADAFS_DATA->spdlogger()->error("{}() Failed to respond", __func__); + GKFS_DATA->spdlogger()->error("{}() Failed to respond", __func__); } // Destroy handle when finished margo_free_input(handle, &in); @@ -174,9 +174,9 @@ static hg_return_t rpc_srv_update_metadentry(hg_handle_t handle) { auto ret = margo_get_input(handle, &in); if (ret != HG_SUCCESS) - ADAFS_DATA->spdlogger()->error("{}() Failed to retrieve input from handle", __func__); + GKFS_DATA->spdlogger()->error("{}() Failed to retrieve input from handle", __func__); assert(ret == HG_SUCCESS); - ADAFS_DATA->spdlogger()->debug("{}() Got update metadentry RPC with path '{}'", __func__, in.path); + GKFS_DATA->spdlogger()->debug("{}() Got update metadentry RPC with path '{}'", __func__, in.path); // do update try { @@ -195,16 +195,16 @@ static hg_return_t rpc_srv_update_metadentry(hg_handle_t handle) { md.ctime(in.ctime); update_metadentry(in.path, md); out.err = 0; - } catch (const std::exception& e){ + } catch (const std::exception& e) { //TODO handle NotFoundException - ADAFS_DATA->spdlogger()->error("{}() Failed to update entry", __func__); + GKFS_DATA->spdlogger()->error("{}() Failed to update entry", __func__); out.err = 1; } - ADAFS_DATA->spdlogger()->debug("{}() Sending output {}", __func__, out.err); + GKFS_DATA->spdlogger()->debug("{}() Sending output {}", __func__, out.err); auto hret = margo_respond(handle, &out); if (hret != HG_SUCCESS) { - ADAFS_DATA->spdlogger()->error("{}() Failed to respond", __func__); + GKFS_DATA->spdlogger()->error("{}() Failed to respond", __func__); } // Destroy handle when finished @@ -222,9 +222,10 @@ static hg_return_t rpc_srv_update_metadentry_size(hg_handle_t handle) { auto ret = margo_get_input(handle, &in); if (ret != HG_SUCCESS) - ADAFS_DATA->spdlogger()->error("{}() Failed to retrieve input from handle", __func__); + GKFS_DATA->spdlogger()->error("{}() Failed to retrieve input from handle", __func__); assert(ret == HG_SUCCESS); - ADAFS_DATA->spdlogger()->debug("{}() path: {}, size: {}, offset: {}, append: {}", __func__, in.path, in.size, in.offset, in.append); + GKFS_DATA->spdlogger()->debug("{}() path: {}, size: {}, offset: {}, append: {}", __func__, in.path, in.size, + in.offset, in.append); try { update_metadentry_size(in.path, in.size, in.offset, (in.append == HG_TRUE)); @@ -233,17 +234,17 @@ static hg_return_t rpc_srv_update_metadentry_size(hg_handle_t handle) { // do to concurrency on size out.ret_size = in.size + in.offset; } catch (const NotFoundException& e) { - ADAFS_DATA->spdlogger()->debug("{}() Entry not found: '{}'", __func__, in.path); + GKFS_DATA->spdlogger()->debug("{}() Entry not found: '{}'", __func__, in.path); out.err = ENOENT; } catch (const std::exception& e) { - ADAFS_DATA->spdlogger()->error("{}() Failed to update metadentry size on DB: {}", __func__, e.what()); + GKFS_DATA->spdlogger()->error("{}() Failed to update metadentry size on DB: '{}'", __func__, e.what()); out.err = EBUSY; } - ADAFS_DATA->spdlogger()->debug("{}() Sending output {}", __func__, out.err); + GKFS_DATA->spdlogger()->debug("{}() Sending output {}", __func__, out.err); auto hret = margo_respond(handle, &out); if (hret != HG_SUCCESS) { - ADAFS_DATA->spdlogger()->error("{}() Failed to respond", __func__); + GKFS_DATA->spdlogger()->error("{}() Failed to respond", __func__); } // Destroy handle when finished @@ -261,26 +262,26 @@ static hg_return_t rpc_srv_get_metadentry_size(hg_handle_t handle) { auto ret = margo_get_input(handle, &in); if (ret != HG_SUCCESS) - ADAFS_DATA->spdlogger()->error("{}() Failed to retrieve input from handle", __func__); + GKFS_DATA->spdlogger()->error("{}() Failed to retrieve input from handle", __func__); assert(ret == HG_SUCCESS); - ADAFS_DATA->spdlogger()->debug("{}() Got update metadentry size RPC with path {}", __func__, in.path); + GKFS_DATA->spdlogger()->debug("{}() Got update metadentry size RPC with path '{}'", __func__, in.path); // do update try { out.ret_size = get_metadentry_size(in.path); out.err = 0; } catch (const NotFoundException& e) { - ADAFS_DATA->spdlogger()->debug("{}() Entry not found: {}", __func__, in.path); + GKFS_DATA->spdlogger()->debug("{}() Entry not found: '{}'", __func__, in.path); out.err = ENOENT; } catch (const std::exception& e) { - ADAFS_DATA->spdlogger()->error("{}() Failed to get metadentry size from DB: {}", __func__, e.what()); + GKFS_DATA->spdlogger()->error("{}() Failed to get metadentry size from DB: '{}'", __func__, e.what()); out.err = EBUSY; } - ADAFS_DATA->spdlogger()->debug("{}() Sending output {}", __func__, out.err); + GKFS_DATA->spdlogger()->debug("{}() Sending output '{}'", __func__, out.err); auto hret = margo_respond(handle, &out); if (hret != HG_SUCCESS) { - ADAFS_DATA->spdlogger()->error("{}() Failed to respond", __func__); + GKFS_DATA->spdlogger()->error("{}() Failed to respond", __func__); } // Destroy handle when finished @@ -299,7 +300,7 @@ static hg_return_t rpc_srv_get_dirents(hg_handle_t handle) { // Get input parmeters auto ret = margo_get_input(handle, &in); if (ret != HG_SUCCESS) { - ADAFS_DATA->spdlogger()->error( + GKFS_DATA->spdlogger()->error( "{}() Could not get RPC input data with err {}", __func__, ret); return ret; } @@ -307,7 +308,7 @@ static hg_return_t rpc_srv_get_dirents(hg_handle_t handle) { // Retrieve size of source buffer auto hgi = margo_get_info(handle); auto mid = margo_hg_info_get_instance(hgi); - ADAFS_DATA->spdlogger()->debug( + GKFS_DATA->spdlogger()->debug( "{}() Got dirents RPC with path {}", __func__, in.path); auto bulk_size = margo_bulk_get_size(in.bulk_handle); @@ -316,7 +317,7 @@ static hg_return_t rpc_srv_get_dirents(hg_handle_t handle) { out.dirents_size = entries.size(); - if(entries.size() == 0){ + if (entries.size() == 0) { out.err = 0; return rpc_cleanup_respond(&handle, &in, &out, &bulk_handle); } @@ -329,9 +330,9 @@ static hg_return_t rpc_srv_get_dirents(hg_handle_t handle) { } size_t out_size = tot_names_size + entries.size() * ( sizeof(bool) + sizeof(char) ); - if(bulk_size < out_size){ + if(bulk_size < out_size) { //Source buffer is smaller than total output size - ADAFS_DATA->spdlogger()->error("{}() Entries do not fit source buffer", __func__); + GKFS_DATA->spdlogger()->error("{}() Entries do not fit source buffer", __func__); out.err = ENOBUFS; return rpc_cleanup_respond(&handle, &in, &out, &bulk_handle); } @@ -353,7 +354,7 @@ static hg_return_t rpc_srv_get_dirents(hg_handle_t handle) { ret = margo_bulk_create(mid, 1, reinterpret_cast(&out_buff_ptr), &out_size, HG_BULK_READ_ONLY, &bulk_handle); if (ret != HG_SUCCESS) { - ADAFS_DATA->spdlogger()->error("{}() Failed to create bulk handle", __func__); + GKFS_DATA->spdlogger()->error("{}() Failed to create bulk handle", __func__); out.err = EBUSY; return rpc_cleanup_respond(&handle, &in, &out, &bulk_handle); } @@ -363,16 +364,16 @@ static hg_return_t rpc_srv_get_dirents(hg_handle_t handle) { bulk_handle, 0, out_size); if (ret != HG_SUCCESS) { - ADAFS_DATA->spdlogger()->error( + GKFS_DATA->spdlogger()->error( "{}() Failed push dirents on path {} to client", __func__, in.path - ); + ); return rpc_cleanup_respond(&handle, &in, &out, &bulk_handle); } out.dirents_size = entries.size(); out.err = 0; - ADAFS_DATA->spdlogger()->debug( + GKFS_DATA->spdlogger()->debug( "{}() Sending output response", __func__); return rpc_cleanup_respond(&handle, &in, &out, &bulk_handle); } @@ -387,9 +388,9 @@ static hg_return_t rpc_srv_mk_symlink(hg_handle_t handle) { auto ret = margo_get_input(handle, &in); if (ret != HG_SUCCESS) { - ADAFS_DATA->spdlogger()->error("{}() Failed to retrieve input from handle", __func__); + GKFS_DATA->spdlogger()->error("{}() Failed to retrieve input from handle", __func__); } - ADAFS_DATA->spdlogger()->debug("{}() Got RPC with path '{}'", __func__, in.path); + GKFS_DATA->spdlogger()->debug("{}() Got RPC with path '{}'", __func__, in.path); try { Metadata md = {LINK_MODE, in.target_path}; @@ -397,13 +398,13 @@ static hg_return_t rpc_srv_mk_symlink(hg_handle_t handle) { create_metadentry(in.path, md); out.err = 0; } catch (const std::exception& e) { - ADAFS_DATA->spdlogger()->error("{}() Failed to create metadentry: {}", __func__, e.what()); + GKFS_DATA->spdlogger()->error("{}() Failed to create metadentry: {}", __func__, e.what()); out.err = -1; } - ADAFS_DATA->spdlogger()->debug("{}() Sending output err {}", __func__, out.err); + GKFS_DATA->spdlogger()->debug("{}() Sending output err {}", __func__, out.err); auto hret = margo_respond(handle, &out); if (hret != HG_SUCCESS) { - ADAFS_DATA->spdlogger()->error("{}() Failed to respond", __func__); + GKFS_DATA->spdlogger()->error("{}() Failed to respond", __func__); } // Destroy handle when finished diff --git a/src/daemon/handler/h_preload.cpp b/src/daemon/handler/h_preload.cpp index 63d94a428..e0b1f0a9e 100644 --- a/src/daemon/handler/h_preload.cpp +++ b/src/daemon/handler/h_preload.cpp @@ -22,23 +22,23 @@ using namespace std; static hg_return_t rpc_srv_fs_config(hg_handle_t handle) { rpc_config_out_t out{}; - ADAFS_DATA->spdlogger()->debug("{}() Got config RPC", __func__); + GKFS_DATA->spdlogger()->debug("{}() Got config RPC", __func__); // get fs config - out.mountdir = ADAFS_DATA->mountdir().c_str(); - out.rootdir = ADAFS_DATA->rootdir().c_str(); - out.atime_state = static_cast(ADAFS_DATA->atime_state()); - out.mtime_state = static_cast(ADAFS_DATA->mtime_state()); - out.ctime_state = static_cast(ADAFS_DATA->ctime_state()); - out.link_cnt_state = static_cast(ADAFS_DATA->link_cnt_state()); - out.blocks_state = static_cast(ADAFS_DATA->blocks_state()); + out.mountdir = GKFS_DATA->mountdir().c_str(); + out.rootdir = GKFS_DATA->rootdir().c_str(); + out.atime_state = static_cast(GKFS_DATA->atime_state()); + out.mtime_state = static_cast(GKFS_DATA->mtime_state()); + out.ctime_state = static_cast(GKFS_DATA->ctime_state()); + out.link_cnt_state = static_cast(GKFS_DATA->link_cnt_state()); + out.blocks_state = static_cast(GKFS_DATA->blocks_state()); out.uid = getuid(); out.gid = getgid(); - ADAFS_DATA->spdlogger()->debug("{}() Sending output configs back to library", __func__); + GKFS_DATA->spdlogger()->debug("{}() Sending output configs back to library", __func__); auto hret = margo_respond(handle, &out); if (hret != HG_SUCCESS) { - ADAFS_DATA->spdlogger()->error("{}() Failed to respond to client to serve file system configurations", - __func__); + GKFS_DATA->spdlogger()->error("{}() Failed to respond to client to serve file system configurations", + __func__); } // Destroy handle when finished diff --git a/src/daemon/main.cpp b/src/daemon/main.cpp index a1bbd6890..67d47bf4b 100644 --- a/src/daemon/main.cpp +++ b/src/daemon/main.cpp @@ -23,6 +23,7 @@ #include #include #include +#include #include #include @@ -43,100 +44,99 @@ static mutex mtx; void init_environment() { // Initialize metadata db - std::string metadata_path = ADAFS_DATA->metadir() + "/rocksdb"s; - ADAFS_DATA->spdlogger()->debug("{}() Initializing metadata DB: '{}'", __func__, metadata_path); + std::string metadata_path = GKFS_DATA->metadir() + "/rocksdb"s; + GKFS_DATA->spdlogger()->debug("{}() Initializing metadata DB: '{}'", __func__, metadata_path); try { - ADAFS_DATA->mdb(std::make_shared(metadata_path)); - } catch (const std::exception & e) { - ADAFS_DATA->spdlogger()->error("{}() Failed to initialize metadata DB: {}", __func__, e.what()); + GKFS_DATA->mdb(std::make_shared(metadata_path)); + } catch (const std::exception& e) { + GKFS_DATA->spdlogger()->error("{}() Failed to initialize metadata DB: {}", __func__, e.what()); throw; } // Initialize data backend - std::string chunk_storage_path = ADAFS_DATA->rootdir() + "/data/chunks"s; - ADAFS_DATA->spdlogger()->debug("{}() Initializing storage backend: '{}'", __func__, chunk_storage_path); + std::string chunk_storage_path = GKFS_DATA->rootdir() + "/data/chunks"s; + GKFS_DATA->spdlogger()->debug("{}() Initializing storage backend: '{}'", __func__, chunk_storage_path); bfs::create_directories(chunk_storage_path); try { - ADAFS_DATA->storage(std::make_shared(chunk_storage_path, CHUNKSIZE)); - } catch (const std::exception & e) { - ADAFS_DATA->spdlogger()->error("{}() Failed to initialize storage backend: {}", __func__, e.what()); + GKFS_DATA->storage(std::make_shared(chunk_storage_path, gkfs_config::rpc::chunksize)); + } catch (const std::exception& e) { + GKFS_DATA->spdlogger()->error("{}() Failed to initialize storage backend: {}", __func__, e.what()); throw; } // Init margo for RPC - ADAFS_DATA->spdlogger()->debug("{}() Initializing RPC server: '{}'", - __func__, ADAFS_DATA->bind_addr()); + GKFS_DATA->spdlogger()->debug("{}() Initializing RPC server: '{}'", + __func__, GKFS_DATA->bind_addr()); try { - init_rpc_server(ADAFS_DATA->bind_addr()); - } catch (const std::exception & e) { - ADAFS_DATA->spdlogger()->error("{}() Failed to initialize RPC server: {}", __func__, e.what()); + init_rpc_server(GKFS_DATA->bind_addr()); + } catch (const std::exception& e) { + GKFS_DATA->spdlogger()->error("{}() Failed to initialize RPC server: {}", __func__, e.what()); throw; } // Init Argobots ESs to drive IO try { - ADAFS_DATA->spdlogger()->debug("{}() Initializing I/O pool", __func__); + GKFS_DATA->spdlogger()->debug("{}() Initializing I/O pool", __func__); init_io_tasklet_pool(); - } catch (const std::exception & e) { - ADAFS_DATA->spdlogger()->error("{}() Failed to initialize Argobots pool for I/O: {}", __func__, e.what()); + } catch (const std::exception& e) { + GKFS_DATA->spdlogger()->error("{}() Failed to initialize Argobots pool for I/O: {}", __func__, e.what()); throw; } // TODO set metadata configurations. these have to go into a user configurable file that is parsed here - ADAFS_DATA->atime_state(MDATA_USE_ATIME); - ADAFS_DATA->mtime_state(MDATA_USE_MTIME); - ADAFS_DATA->ctime_state(MDATA_USE_CTIME); - ADAFS_DATA->link_cnt_state(MDATA_USE_LINK_CNT); - ADAFS_DATA->blocks_state(MDATA_USE_BLOCKS); + GKFS_DATA->atime_state(gkfs_config::metadata::use_atime); + GKFS_DATA->mtime_state(gkfs_config::metadata::use_mtime); + GKFS_DATA->ctime_state(gkfs_config::metadata::use_ctime); + GKFS_DATA->link_cnt_state(gkfs_config::metadata::use_link_cnt); + GKFS_DATA->blocks_state(gkfs_config::metadata::use_blocks); // Create metadentry for root directory Metadata root_md{S_IFDIR | S_IRWXU | S_IRWXG | S_IRWXO}; try { create_metadentry("/", root_md); - } catch (const std::exception& e ) { + } catch (const std::exception& e) { throw runtime_error("Failed to write root metadentry to KV store: "s + e.what()); } - - if (!ADAFS_DATA->hosts_file().empty()) { - populate_hosts_file(); + // setup hostfile to let clients know that a daemon is running on this host + if (!GKFS_DATA->hosts_file().empty()) { + gkfs::util::populate_hosts_file(); } - - ADAFS_DATA->spdlogger()->info("Startup successful. Daemon is ready."); + GKFS_DATA->spdlogger()->info("Startup successful. Daemon is ready."); } /** * Destroys the margo, argobots, and mercury environments */ void destroy_enviroment() { - ADAFS_DATA->spdlogger()->debug("{}() Removing mount directory", __func__); + GKFS_DATA->spdlogger()->debug("{}() Removing mount directory", __func__); boost::system::error_code ecode; - bfs::remove_all(ADAFS_DATA->mountdir(), ecode); - ADAFS_DATA->spdlogger()->debug("{}() Freeing I/O executions streams", __func__); + bfs::remove_all(GKFS_DATA->mountdir(), ecode); + GKFS_DATA->spdlogger()->debug("{}() Freeing I/O executions streams", __func__); for (unsigned int i = 0; i < RPC_DATA->io_streams().size(); i++) { ABT_xstream_join(RPC_DATA->io_streams().at(i)); ABT_xstream_free(&RPC_DATA->io_streams().at(i)); } - if (!ADAFS_DATA->hosts_file().empty()) { - ADAFS_DATA->spdlogger()->debug("{}() Removing hosts file", __func__); + if (!GKFS_DATA->hosts_file().empty()) { + GKFS_DATA->spdlogger()->debug("{}() Removing hosts file", __func__); try { destroy_hosts_file(); } catch (const bfs::filesystem_error& e) { - ADAFS_DATA->spdlogger()->debug("{}() hosts file not found", __func__); + GKFS_DATA->spdlogger()->debug("{}() hosts file not found", __func__); } } if (RPC_DATA->server_rpc_mid() != nullptr) { - ADAFS_DATA->spdlogger()->debug("{}() Finalizing margo RPC server", __func__); + GKFS_DATA->spdlogger()->debug("{}() Finalizing margo RPC server", __func__); margo_finalize(RPC_DATA->server_rpc_mid()); } - ADAFS_DATA->spdlogger()->info("{}() Closing metadata DB", __func__); - ADAFS_DATA->close_mdb(); + GKFS_DATA->spdlogger()->info("{}() Closing metadata DB", __func__); + GKFS_DATA->close_mdb(); } void init_io_tasklet_pool() { - assert(DAEMON_IO_XSTREAMS >= 0); - unsigned int xstreams_num = DAEMON_IO_XSTREAMS; + assert(gkfs_config::rpc::daemon_io_xstreams >= 0); + unsigned int xstreams_num = gkfs_config::rpc::daemon_io_xstreams; //retrieve the pool of the just created scheduler ABT_pool pool; @@ -177,7 +177,7 @@ void init_rpc_server(const string & protocol_port) { MARGO_SERVER_MODE, &hg_options, HG_TRUE, - DAEMON_RPC_HANDLER_XSTREAMS); + gkfs_config::rpc::daemon_handler_xstreams); if (mid == MARGO_INSTANCE_NULL) { throw runtime_error("Failed to initialize the Margo RPC server"); } @@ -199,7 +199,7 @@ void init_rpc_server(const string & protocol_port) { std::string addr_self_str(addr_self_cstring); RPC_DATA->self_addr_str(addr_self_str); - ADAFS_DATA->spdlogger()->info("{}() Accepting RPCs on address {}", __func__, addr_self_cstring); + GKFS_DATA->spdlogger()->info("{}() Accepting RPCs on address {}", __func__, addr_self_cstring); // Put context and class into RPC_data object RPC_DATA->server_rpc_mid(mid); @@ -213,31 +213,31 @@ void init_rpc_server(const string & protocol_port) { * @param hg_class */ void register_server_rpcs(margo_instance_id mid) { - MARGO_REGISTER(mid, hg_tag::fs_config, void, rpc_config_out_t, rpc_srv_fs_config); - MARGO_REGISTER(mid, hg_tag::create, rpc_mk_node_in_t, rpc_err_out_t, rpc_srv_mk_node); - MARGO_REGISTER(mid, hg_tag::stat, rpc_path_only_in_t, rpc_stat_out_t, rpc_srv_stat); - MARGO_REGISTER(mid, hg_tag::decr_size, rpc_trunc_in_t, rpc_err_out_t, rpc_srv_decr_size); - MARGO_REGISTER(mid, hg_tag::remove, rpc_rm_node_in_t, rpc_err_out_t, rpc_srv_rm_node); - MARGO_REGISTER(mid, hg_tag::update_metadentry, rpc_update_metadentry_in_t, rpc_err_out_t, + MARGO_REGISTER(mid, gkfs::hg_tag::fs_config, void, rpc_config_out_t, rpc_srv_fs_config); + MARGO_REGISTER(mid, gkfs::hg_tag::create, rpc_mk_node_in_t, rpc_err_out_t, rpc_srv_mk_node); + MARGO_REGISTER(mid, gkfs::hg_tag::stat, rpc_path_only_in_t, rpc_stat_out_t, rpc_srv_stat); + MARGO_REGISTER(mid, gkfs::hg_tag::decr_size, rpc_trunc_in_t, rpc_err_out_t, rpc_srv_decr_size); + MARGO_REGISTER(mid, gkfs::hg_tag::remove, rpc_rm_node_in_t, rpc_err_out_t, rpc_srv_rm_node); + MARGO_REGISTER(mid, gkfs::hg_tag::update_metadentry, rpc_update_metadentry_in_t, rpc_err_out_t, rpc_srv_update_metadentry); - MARGO_REGISTER(mid, hg_tag::get_metadentry_size, rpc_path_only_in_t, rpc_get_metadentry_size_out_t, + MARGO_REGISTER(mid, gkfs::hg_tag::get_metadentry_size, rpc_path_only_in_t, rpc_get_metadentry_size_out_t, rpc_srv_get_metadentry_size); - MARGO_REGISTER(mid, hg_tag::update_metadentry_size, rpc_update_metadentry_size_in_t, + MARGO_REGISTER(mid, gkfs::hg_tag::update_metadentry_size, rpc_update_metadentry_size_in_t, rpc_update_metadentry_size_out_t, rpc_srv_update_metadentry_size); - MARGO_REGISTER(mid, hg_tag::get_dirents, rpc_get_dirents_in_t, rpc_get_dirents_out_t, + MARGO_REGISTER(mid, gkfs::hg_tag::get_dirents, rpc_get_dirents_in_t, rpc_get_dirents_out_t, rpc_srv_get_dirents); #ifdef HAS_SYMLINKS - MARGO_REGISTER(mid, hg_tag::mk_symlink, rpc_mk_symlink_in_t, rpc_err_out_t, rpc_srv_mk_symlink); + MARGO_REGISTER(mid, gkfs::hg_tag::mk_symlink, rpc_mk_symlink_in_t, rpc_err_out_t, rpc_srv_mk_symlink); #endif - MARGO_REGISTER(mid, hg_tag::write_data, rpc_write_data_in_t, rpc_data_out_t, rpc_srv_write_data); - MARGO_REGISTER(mid, hg_tag::read_data, rpc_read_data_in_t, rpc_data_out_t, rpc_srv_read_data); - MARGO_REGISTER(mid, hg_tag::trunc_data, rpc_trunc_in_t, rpc_err_out_t, rpc_srv_trunc_data); - MARGO_REGISTER(mid, hg_tag::chunk_stat, rpc_chunk_stat_in_t, rpc_chunk_stat_out_t, rpc_srv_chunk_stat); + MARGO_REGISTER(mid, gkfs::hg_tag::write_data, rpc_write_data_in_t, rpc_data_out_t, rpc_srv_write_data); + MARGO_REGISTER(mid, gkfs::hg_tag::read_data, rpc_read_data_in_t, rpc_data_out_t, rpc_srv_read_data); + MARGO_REGISTER(mid, gkfs::hg_tag::trunc_data, rpc_trunc_in_t, rpc_err_out_t, rpc_srv_trunc_data); + MARGO_REGISTER(mid, gkfs::hg_tag::chunk_stat, rpc_chunk_stat_in_t, rpc_chunk_stat_out_t, rpc_srv_chunk_stat); } void populate_hosts_file() { - const auto& hosts_file = ADAFS_DATA->hosts_file(); - ADAFS_DATA->spdlogger()->debug("{}() Populating hosts file: '{}'", __func__, hosts_file); + const auto& hosts_file = GKFS_DATA->hosts_file(); + GKFS_DATA->spdlogger()->debug("{}() Populating hosts file: '{}'", __func__, hosts_file); ofstream lfstream(hosts_file, ios::out | ios::app); if (!lfstream) { throw runtime_error( @@ -252,16 +252,16 @@ void populate_hosts_file() { } void destroy_hosts_file() { - std::remove(ADAFS_DATA->hosts_file().c_str()); + std::remove(GKFS_DATA->hosts_file().c_str()); } void shutdown_handler(int dummy) { - ADAFS_DATA->spdlogger()->info("{}() Received signal: '{}'", __func__, strsignal(dummy)); + GKFS_DATA->spdlogger()->info("{}() Received signal: '{}'", __func__, strsignal(dummy)); shutdown_please.notify_all(); } void initialize_loggers() { - std::string path = DEFAULT_DAEMON_LOG_PATH; + std::string path = gkfs_config::logging::daemon_log_path; // Try to get log path from env variable std::string env_path_key = DAEMON_ENV_PREFIX; env_path_key += "DAEMON_LOG_PATH"; @@ -270,7 +270,7 @@ void initialize_loggers() { path = env_path; } - spdlog::level::level_enum level = get_spdlog_level(DEFAULT_DAEMON_LOG_LEVEL); + spdlog::level::level_enum level = get_spdlog_level(gkfs_config::logging::daemon_log_level); // Try to get log path from env variable std::string env_level_key = DAEMON_ENV_PREFIX; env_level_key += "LOG_LEVEL"; @@ -328,7 +328,7 @@ int main(int argc, const char* argv[]) { #else cout << "Create check parents: OFF" << endl; #endif - cout << "Chunk size: " << CHUNKSIZE << " bytes" << endl; + cout << "Chunk size: " << gkfs_config::rpc::chunksize << " bytes" << endl; return 0; } @@ -338,9 +338,9 @@ int main(int argc, const char* argv[]) { std::cerr << "Error: " << e.what() << "\n"; return 1; } - + initialize_loggers(); - ADAFS_DATA->spdlogger(spdlog::get("main")); + GKFS_DATA->spdlogger(spdlog::get("main")); string addr; @@ -350,47 +350,47 @@ int main(int argc, const char* argv[]) { addr = get_my_hostname(true); } - ADAFS_DATA->bind_addr(fmt::format("{}://{}", RPC_PROTOCOL, addr)); + GKFS_DATA->bind_addr(fmt::format("{}://{}", RPC_PROTOCOL, addr)); string hosts_file; if (vm.count("hosts-file")) { hosts_file = vm["hosts-file"].as(); } else { - hosts_file = - gkfs::env::get_var(gkfs::env::HOSTS_FILE, DEFAULT_HOSTS_FILE); + hosts_file = + gkfs::env::get_var(gkfs::env::HOSTS_FILE, gkfs_config::hostfile_path); } - ADAFS_DATA->hosts_file(hosts_file); + GKFS_DATA->hosts_file(hosts_file); - ADAFS_DATA->spdlogger()->info("{}() Initializing environment", __func__); + GKFS_DATA->spdlogger()->info("{}() Initializing environment", __func__); assert(vm.count("mountdir")); auto mountdir = vm["mountdir"].as(); - // Create mountdir. We use this dir to get some information on the underlying fs with statfs in adafs_statfs + // Create mountdir. We use this dir to get some information on the underlying fs with statfs in gkfs_statfs bfs::create_directories(mountdir); - ADAFS_DATA->mountdir(bfs::canonical(mountdir).native()); + GKFS_DATA->mountdir(bfs::canonical(mountdir).native()); assert(vm.count("rootdir")); auto rootdir = vm["rootdir"].as(); auto rootdir_path = bfs::path(rootdir) / fmt::format_int(getpid()).str(); - ADAFS_DATA->spdlogger()->debug("{}() Root directory: '{}'", + GKFS_DATA->spdlogger()->debug("{}() Root directory: '{}'", __func__, rootdir_path.native()); bfs::create_directories(rootdir_path); - ADAFS_DATA->rootdir(rootdir_path.native()); + GKFS_DATA->rootdir(rootdir_path.native()); if (vm.count("metadir")) { auto metadir = vm["metadir"].as(); bfs::create_directories(metadir); - ADAFS_DATA->metadir(bfs::canonical(metadir).native()); + GKFS_DATA->metadir(bfs::canonical(metadir).native()); } else { // use rootdir as metadata dir - ADAFS_DATA->metadir(ADAFS_DATA->rootdir()); + GKFS_DATA->metadir(GKFS_DATA->rootdir()); } try { init_environment(); } catch (const std::exception& e) { auto emsg = fmt::format("Failed to initialize environment: {}", e.what()); - ADAFS_DATA->spdlogger()->error(emsg); + GKFS_DATA->spdlogger()->error(emsg); cerr << emsg << endl; destroy_enviroment(); exit(EXIT_FAILURE); @@ -403,8 +403,8 @@ int main(int argc, const char* argv[]) { unique_lock lk(mtx); // Wait for shutdown signal to initiate shutdown protocols shutdown_please.wait(lk); - ADAFS_DATA->spdlogger()->info("{}() Shutting down", __func__); + GKFS_DATA->spdlogger()->info("{}() Shutting down...", __func__); destroy_enviroment(); - ADAFS_DATA->spdlogger()->info("{}() Exiting", __func__); + GKFS_DATA->spdlogger()->info("{}() Complete. Exiting...", __func__); return 0; } diff --git a/src/daemon/ops/metadentry.cpp b/src/daemon/ops/metadentry.cpp index 73d1d43ce..968b47aa5 100644 --- a/src/daemon/ops/metadentry.cpp +++ b/src/daemon/ops/metadentry.cpp @@ -26,22 +26,22 @@ using namespace std; void create_metadentry(const std::string& path, Metadata& md) { // update metadata object based on what metadata is needed - if (ADAFS_DATA->atime_state() || ADAFS_DATA->mtime_state() || ADAFS_DATA->ctime_state()) { + if (GKFS_DATA->atime_state() || GKFS_DATA->mtime_state() || GKFS_DATA->ctime_state()) { std::time_t time; std::time(&time); auto time_s = fmt::format_int(time).str(); - if (ADAFS_DATA->atime_state()) + if (GKFS_DATA->atime_state()) md.atime(time); - if (ADAFS_DATA->mtime_state()) + if (GKFS_DATA->mtime_state()) md.mtime(time); - if (ADAFS_DATA->ctime_state()) + if (GKFS_DATA->ctime_state()) md.ctime(time); } - ADAFS_DATA->mdb()->put(path, md.serialize()); + GKFS_DATA->mdb()->put(path, md.serialize()); } std::string get_metadentry_str(const std::string& path) { - return ADAFS_DATA->mdb()->get(path); + return GKFS_DATA->mdb()->get(path); } /** @@ -60,8 +60,8 @@ Metadata get_metadentry(const std::string& path) { * @return */ void remove_node(const string& path) { - ADAFS_DATA->mdb()->remove(path); // remove metadentry - ADAFS_DATA->storage()->destroy_chunk_space(path); // destroys all chunks for the path on this node + GKFS_DATA->mdb()->remove(path); // remove metadentry + GKFS_DATA->storage()->destroy_chunk_space(path); // destroys all chunks for the path on this node } /** @@ -81,13 +81,13 @@ size_t get_metadentry_size(const string& path) { * @return the updated size */ void update_metadentry_size(const string& path, size_t io_size, off64_t offset, bool append) { - ADAFS_DATA->mdb()->increase_size(path, io_size + offset, append); + GKFS_DATA->mdb()->increase_size(path, io_size + offset, append); } void update_metadentry(const string& path, Metadata& md) { - ADAFS_DATA->mdb()->update(path, path, md.serialize()); + GKFS_DATA->mdb()->update(path, path, md.serialize()); } std::vector> get_dirents(const std::string& dir){ - return ADAFS_DATA->mdb()->get_dirents(dir); + return GKFS_DATA->mdb()->get_dirents(dir); } \ No newline at end of file diff --git a/src/daemon/util.cpp b/src/daemon/util.cpp new file mode 100644 index 000000000..c0e520493 --- /dev/null +++ b/src/daemon/util.cpp @@ -0,0 +1,40 @@ +/* + Copyright 2018-2019, Barcelona Supercomputing Center (BSC), Spain + Copyright 2015-2019, Johannes Gutenberg Universitaet Mainz, Germany + + This software was partially supported by the + EC H2020 funded project NEXTGenIO (Project ID: 671951, www.nextgenio.eu). + + This software was partially supported by the + ADA-FS project under the SPPEXA project funded by the DFG. + + SPDX-License-Identifier: MIT +*/ +#include +#include +#include + +#include +#include + +using namespace std; + +void gkfs::util::populate_hosts_file() { + const auto& hosts_file = GKFS_DATA->hosts_file(); + GKFS_DATA->spdlogger()->debug("{}() Populating hosts file: '{}'", __func__, hosts_file); + ofstream lfstream(hosts_file, ios::out | ios::app); + if (!lfstream) { + throw runtime_error( + fmt::format("Failed to open hosts file '{}': {}", hosts_file, strerror(errno))); + } + lfstream << fmt::format("{} {}", get_my_hostname(true), RPC_DATA->self_addr_str()) << std::endl; + if (!lfstream) { + throw runtime_error( + fmt::format("Failed to write on hosts file '{}': {}", hosts_file, strerror(errno))); + } + lfstream.close(); +} + +void gkfs::util::destroy_hosts_file() { + std::remove(GKFS_DATA->hosts_file().c_str()); +} diff --git a/src/global/CMakeLists.txt b/src/global/CMakeLists.txt index 25dca79e2..61455f69a 100644 --- a/src/global/CMakeLists.txt +++ b/src/global/CMakeLists.txt @@ -25,6 +25,7 @@ target_sources(env_util PUBLIC ${INCLUDE_DIR}/global/env_util.hpp PRIVATE + ${INCLUDE_DIR}/config.hpp ${CMAKE_CURRENT_LIST_DIR}/env_util.cpp ) diff --git a/src/global/metadata.cpp b/src/global/metadata.cpp index 5457ab679..45669cf36 100644 --- a/src/global/metadata.cpp +++ b/src/global/metadata.cpp @@ -11,8 +11,8 @@ SPDX-License-Identifier: MIT */ -#include "global/metadata.hpp" -#include "global/configure.hpp" +#include +#include #include #include @@ -75,31 +75,31 @@ Metadata::Metadata(const std::string& binary_str) { ptr += read; // The order is important. don't change. - if (MDATA_USE_ATIME) { + if (gkfs_config::metadata::use_atime) { assert(*ptr == MSP); atime_ = static_cast(std::stol(++ptr, &read)); assert(read > 0); ptr += read; } - if (MDATA_USE_MTIME) { + if (gkfs_config::metadata::use_mtime) { assert(*ptr == MSP); mtime_ = static_cast(std::stol(++ptr, &read)); assert(read > 0); ptr += read; } - if (MDATA_USE_CTIME) { + if (gkfs_config::metadata::use_ctime) { assert(*ptr == MSP); ctime_ = static_cast(std::stol(++ptr, &read)); assert(read > 0); ptr += read; } - if (MDATA_USE_LINK_CNT) { + if (gkfs_config::metadata::use_link_cnt) { assert(*ptr == MSP); link_count_ = static_cast(std::stoul(++ptr, &read)); assert(read > 0); ptr += read; } - if (MDATA_USE_BLOCKS) { // last one will not encounter a delimiter anymore + if (gkfs_config::metadata::use_blocks) { // last one will not encounter a delimiter anymore assert(*ptr == MSP); blocks_ = static_cast(std::stoul(++ptr, &read)); assert(read > 0); @@ -111,7 +111,7 @@ Metadata::Metadata(const std::string& binary_str) { assert(*ptr == MSP); target_path_ = ++ptr; // target_path should be there only if this is a link - assert(target_path_.size() == 0 || S_ISLNK(mode_)); + assert(target_path_.empty() || S_ISLNK(mode_)); ptr += target_path_.size(); #endif @@ -126,23 +126,23 @@ std::string Metadata::serialize() const s += fmt::format_int(mode_).c_str(); // add mandatory mode s += MSP; s += fmt::format_int(size_).c_str(); // add mandatory size - if (MDATA_USE_ATIME) { + if (gkfs_config::metadata::use_atime) { s += MSP; s += fmt::format_int(atime_).c_str(); } - if (MDATA_USE_MTIME) { + if (gkfs_config::metadata::use_mtime) { s += MSP; s += fmt::format_int(mtime_).c_str(); } - if (MDATA_USE_CTIME) { + if (gkfs_config::metadata::use_ctime) { s += MSP; s += fmt::format_int(ctime_).c_str(); } - if (MDATA_USE_LINK_CNT) { + if (gkfs_config::metadata::use_link_cnt) { s += MSP; s += fmt::format_int(link_count_).c_str(); } - if (MDATA_USE_BLOCKS) { + if (gkfs_config::metadata::use_blocks) { s += MSP; s += fmt::format_int(blocks_).c_str(); } @@ -180,56 +180,56 @@ time_t Metadata::atime() const { return atime_; } -void Metadata::atime(time_t atime_) { - Metadata::atime_ = atime_; +void Metadata::atime(time_t atime) { + Metadata::atime_ = atime; } time_t Metadata::mtime() const { return mtime_; } -void Metadata::mtime(time_t mtime_) { - Metadata::mtime_ = mtime_; +void Metadata::mtime(time_t mtime) { + Metadata::mtime_ = mtime; } time_t Metadata::ctime() const { return ctime_; } -void Metadata::ctime(time_t ctime_) { - Metadata::ctime_ = ctime_; +void Metadata::ctime(time_t ctime) { + Metadata::ctime_ = ctime; } mode_t Metadata::mode() const { return mode_; } -void Metadata::mode(mode_t mode_) { - Metadata::mode_ = mode_; +void Metadata::mode(mode_t mode) { + Metadata::mode_ = mode; } nlink_t Metadata::link_count() const { return link_count_; } -void Metadata::link_count(nlink_t link_count_) { - Metadata::link_count_ = link_count_; +void Metadata::link_count(nlink_t link_count) { + Metadata::link_count_ = link_count; } size_t Metadata::size() const { return size_; } -void Metadata::size(size_t size_) { - Metadata::size_ = size_; +void Metadata::size(size_t size) { + Metadata::size_ = size; } blkcnt_t Metadata::blocks() const { return blocks_; } -void Metadata::blocks(blkcnt_t blocks_) { - Metadata::blocks_ = blocks_; +void Metadata::blocks(blkcnt_t blocks) { + Metadata::blocks_ = blocks; } #ifdef HAS_SYMLINKS -- GitLab From 59b5ea2a9100ed28ad2bb13753fb789bb3b9adaf Mon Sep 17 00:00:00 2001 From: Marc Vef Date: Sun, 9 Feb 2020 11:54:03 +0100 Subject: [PATCH 02/25] Code maintenance: Reformatting code, removed dead code, rearranged imports Reformatted code w.r.t. unifying code styles as of the official CPPCoreGuidelines by Stroustrup and Sutter --- include/client/hooks.hpp | 66 +- include/client/intcp_functions.hpp | 80 -- include/client/intercept.hpp | 4 +- include/client/open_dir.hpp | 35 +- include/client/open_file_map.hpp | 3 +- include/client/preload.hpp | 1 - include/client/preload_context.hpp | 36 +- include/client/preload_util.hpp | 20 - include/client/resolve.hpp | 5 +- include/client/rpc/hg_rpcs.hpp | 786 ++++++++++-------- include/client/rpc/ld_rpc_data_ws.hpp | 12 +- include/client/rpc/ld_rpc_management.hpp | 2 - include/client/rpc/ld_rpc_metadentry.hpp | 23 +- include/daemon/backend/data/chunk_storage.hpp | 69 +- include/daemon/backend/exceptions.hpp | 4 +- include/daemon/backend/metadata/db.hpp | 51 +- include/daemon/backend/metadata/merge.hpp | 107 +-- include/daemon/classes/fs_data.hpp | 21 +- include/daemon/classes/rpc_data.hpp | 1 - include/daemon/handler/rpc_defs.hpp | 2 + include/daemon/main.hpp | 6 +- include/daemon/ops/metadentry.hpp | 2 - include/daemon/util.hpp | 3 - include/global/chunk_calc_util.hpp | 1 + include/global/log_util.hpp | 2 - include/global/metadata.hpp | 28 +- include/global/path_util.hpp | 7 +- include/global/rpc/distributor.hpp | 60 +- include/global/rpc/rpc_utils.hpp | 4 +- src/client/gkfs_functions.cpp | 95 ++- src/client/hooks.cpp | 171 ++-- src/client/intercept.cpp | 439 +++++----- src/client/logging.cpp | 4 + src/client/open_dir.cpp | 8 +- src/client/open_file_map.cpp | 21 +- src/client/preload.cpp | 60 +- src/client/preload_context.cpp | 91 +- src/client/preload_util.cpp | 20 +- src/client/resolve.cpp | 50 +- src/client/rpc/ld_rpc_data_ws.cpp | 655 ++++++++------- src/client/rpc/ld_rpc_management.cpp | 84 +- src/client/rpc/ld_rpc_metadentry.cpp | 705 ++++++++-------- src/daemon/backend/data/chunk_storage.cpp | 144 ++-- src/daemon/backend/metadata/db.cpp | 46 +- src/daemon/backend/metadata/merge.cpp | 79 +- src/daemon/classes/fs_data.cpp | 36 +- src/daemon/handler/h_data.cpp | 15 +- src/daemon/handler/h_metadentry.cpp | 21 +- src/daemon/handler/h_preload.cpp | 3 +- src/daemon/main.cpp | 22 +- src/daemon/ops/metadentry.cpp | 2 +- src/daemon/util.cpp | 1 + src/global/env_util.cpp | 5 +- src/global/log_util.cpp | 37 +- src/global/metadata.cpp | 38 +- src/global/path_util.cpp | 27 +- src/global/rpc/distributor.cpp | 34 +- src/global/rpc/rpc_utils.cpp | 46 +- 58 files changed, 2211 insertions(+), 2189 deletions(-) delete mode 100644 include/client/intcp_functions.hpp diff --git a/include/client/hooks.hpp b/include/client/hooks.hpp index 98057e3b5..d1328b5c9 100644 --- a/include/client/hooks.hpp +++ b/include/client/hooks.hpp @@ -18,7 +18,8 @@ #include -int hook_openat(int dirfd, const char *cpath, int flags, mode_t mode); +int hook_openat(int dirfd, const char* cpath, int flags, mode_t mode); + int hook_close(int fd); int hook_stat(const char* path, struct stat* buf); @@ -28,37 +29,66 @@ int hook_lstat(const char* path, struct stat* buf); int hook_fstat(unsigned int fd, struct stat* buf); int hook_fstatat(int dirfd, const char* cpath, struct stat* buf, int flags); + int hook_read(unsigned int fd, void* buf, size_t count); -int hook_pread(unsigned int fd, char * buf, size_t count, loff_t pos); -int hook_write(unsigned int fd, const char * buf, size_t count); -int hook_pwrite(unsigned int fd, const char * buf, size_t count, loff_t pos); -int hook_writev(unsigned long fd, const struct iovec * iov, unsigned long iovcnt); -int hook_pwritev(unsigned long fd, const struct iovec * iov, unsigned long iovcnt, + +int hook_pread(unsigned int fd, char* buf, size_t count, loff_t pos); + +int hook_write(unsigned int fd, const char* buf, size_t count); + +int hook_pwrite(unsigned int fd, const char* buf, size_t count, loff_t pos); + +int hook_writev(unsigned long fd, const struct iovec* iov, unsigned long iovcnt); + +int hook_pwritev(unsigned long fd, const struct iovec* iov, unsigned long iovcnt, unsigned long pos_l, unsigned long pos_h); -int hook_unlinkat(int dirfd, const char * cpath, int flags); -int hook_symlinkat(const char * oldname, int newdfd, const char * newname); + +int hook_unlinkat(int dirfd, const char* cpath, int flags); + +int hook_symlinkat(const char* oldname, int newdfd, const char* newname); + int hook_access(const char* path, int mask); -int hook_faccessat(int dirfd, const char * cpath, int mode); + +int hook_faccessat(int dirfd, const char* cpath, int mode); + off_t hook_lseek(unsigned int fd, off_t offset, unsigned int whence); -int hook_truncate(const char *path, long length); + +int hook_truncate(const char* path, long length); + int hook_ftruncate(unsigned int fd, unsigned long length); + int hook_dup(unsigned int fd); + int hook_dup2(unsigned int oldfd, unsigned int newfd); + int hook_dup3(unsigned int oldfd, unsigned int newfd, int flags); -int hook_getdents(unsigned int fd, struct linux_dirent *dirp, unsigned int count); -int hook_getdents64(unsigned int fd, struct linux_dirent64 *dirp, unsigned int count); -int hook_mkdirat(int dirfd, const char * cpath, mode_t mode); + +int hook_getdents(unsigned int fd, struct linux_dirent* dirp, unsigned int count); + +int hook_getdents64(unsigned int fd, struct linux_dirent64* dirp, unsigned int count); + +int hook_mkdirat(int dirfd, const char* cpath, mode_t mode); + int hook_fchmodat(int dirfd, const char* path, mode_t mode); + int hook_fchmod(unsigned int dirfd, mode_t mode); + int hook_chdir(const char* path); + int hook_fchdir(unsigned int fd); -int hook_getcwd(char * buf, unsigned long size); -int hook_readlinkat(int dirfd, const char * cpath, char * buf, int bufsiz); + +int hook_getcwd(char* buf, unsigned long size); + +int hook_readlinkat(int dirfd, const char* cpath, char* buf, int bufsiz); + int hook_fcntl(unsigned int fd, unsigned int cmd, unsigned long arg); -int hook_renameat(int olddfd, const char * oldname, int newdfd, const char * newname, + +int hook_renameat(int olddfd, const char* oldname, int newdfd, const char* newname, unsigned int flags); -int hook_statfs(const char * path, struct statfs * buf); -int hook_fstatfs(unsigned int fd, struct statfs * buf); + +int hook_statfs(const char* path, struct statfs* buf); + +int hook_fstatfs(unsigned int fd, struct statfs* buf); #endif diff --git a/include/client/intcp_functions.hpp b/include/client/intcp_functions.hpp deleted file mode 100644 index 22f850ff0..000000000 --- a/include/client/intcp_functions.hpp +++ /dev/null @@ -1,80 +0,0 @@ -/* - Copyright 2018-2019, Barcelona Supercomputing Center (BSC), Spain - Copyright 2015-2019, Johannes Gutenberg Universitaet Mainz, Germany - - This software was partially supported by the - EC H2020 funded project NEXTGenIO (Project ID: 671951, www.nextgenio.eu). - - This software was partially supported by the - ADA-FS project under the SPPEXA project funded by the DFG. - - SPDX-License-Identifier: MIT -*/ - -#ifndef GEKKOFS_INTCP_FUNCTIONS_HPP -#define GEKKOFS_INTCP_FUNCTIONS_HPP - -#include - -extern "C" { - -# define weak_alias(name, aliasname) \ - extern __typeof (name) aliasname __attribute__ ((weak, alias (#name))); - -# define strong_alias(name, aliasname) \ - extern __typeof (name) aliasname __attribute__ ((alias (#name))); - -/** - * In the glibc headers the following two functions (readdir & opendir) - * marks the @dirp parameter with a non-null attribute. - * If we try to implement them directly instead of the weak aliased function, - * the compiler will assume that the parameter is actually null and - * will optimized expression like `(dirp == nullptr)`. -*/ - -struct dirent* intcp_readdir(DIR* dirp); -weak_alias(intcp_readdir, readdir) - -int intcp_dirfd(DIR* dirp); -weak_alias(intcp_dirfd, dirfd) - -int intcp_closedir(DIR* dirp); -weak_alias(intcp_closedir, closedir) - -size_t intcp_fread(void *ptr, size_t size, size_t nmemb, FILE *stream); -strong_alias(intcp_fread, fread) -strong_alias(intcp_fread, fread_unlocked) -size_t intcp_fwrite(const void *ptr, size_t size, size_t nmemb, FILE *stream); -strong_alias(intcp_fwrite, fwrite) -strong_alias(intcp_fwrite, fwrite_unlocked) - -int intcp_open(const char* path, int flags, ...); -strong_alias(intcp_open, open) -strong_alias(intcp_open, __open_2) -int intcp_open64(const char* path, int flags, ...); -strong_alias(intcp_open64, open64) -strong_alias(intcp_open64, __open64_2) -int intcp_openat(int dirfd, const char *cpath, int flags, ...); -strong_alias(intcp_openat, openat) -strong_alias(intcp_openat, __openat_2) -int intcp_openat64(int dirfd, const char *path, int flags, ...); -strong_alias(intcp_openat64, openat64) -strong_alias(intcp_openat64, __openat64_2) -int intcp_symlink(const char* oldname, const char* newname) noexcept; -strong_alias(intcp_symlink, symlink) -strong_alias(intcp_symlink, __symlink) -int intcp_symlinkat(const char* oldname, int newfd, const char* newname) noexcept; -strong_alias(intcp_symlinkat, symlinkat) -ssize_t intcp_readlink(const char * cpath, char * buf, size_t bufsize) noexcept; -strong_alias(intcp_readlink, readlink) -ssize_t intcp_readlinkat(int dirfd, const char * cpath, char * buf, size_t bufsize) noexcept; -strong_alias(intcp_readlinkat, readlinkat) - -int intcp_statvfs(const char *path, struct statvfs *buf) noexcept; -strong_alias(intcp_statvfs, statvfs) -int intcp_fstatvfs(int fd, struct statvfs *buf) noexcept; -strong_alias(intcp_fstatvfs, fstatvfs) - -#endif // GEKKOFS_INTCP_FUNCTIONS_HPP - -} // extern C diff --git a/include/client/intercept.hpp b/include/client/intercept.hpp index d49d248d0..e392f580b 100644 --- a/include/client/intercept.hpp +++ b/include/client/intercept.hpp @@ -24,10 +24,12 @@ int hook_guard_wrapper(long syscall_number, long arg0, long arg1, long arg2, long arg3, long arg4, long arg5, - long *syscall_return_value); + long* syscall_return_value); void start_self_interception(); + void start_interception(); + void stop_interception(); #endif diff --git a/include/client/open_dir.hpp b/include/client/open_dir.hpp index 583c122b9..fc2fbe81b 100644 --- a/include/client/open_dir.hpp +++ b/include/client/open_dir.hpp @@ -24,25 +24,30 @@ class DirEntry { - private: - std::string name_; - FileType type_; - public: - DirEntry(const std::string& name, const FileType type); - const std::string& name(); - FileType type(); +private: + std::string name_; + FileType type_; +public: + DirEntry(const std::string& name, FileType type); + + const std::string& name(); + + FileType type(); }; -class OpenDir: public OpenFile { - private: - std::vector entries; +class OpenDir : public OpenFile { +private: + std::vector entries; + + +public: + explicit OpenDir(const std::string& path); + + void add(const std::string& name, const FileType& type); + const DirEntry& getdent(unsigned int pos); - public: - OpenDir(const std::string& path); - void add(const std::string& name, const FileType& type); - const DirEntry& getdent(unsigned int pos); - size_t size(); + size_t size(); }; diff --git a/include/client/open_file_map.hpp b/include/client/open_file_map.hpp index 7d76b780f..1e84c49fc 100644 --- a/include/client/open_file_map.hpp +++ b/include/client/open_file_map.hpp @@ -54,7 +54,7 @@ public: OpenFile(const std::string& path, int flags, FileType type = FileType::regular); - ~OpenFile(); + ~OpenFile() = default; // getter/setter std::string path() const; @@ -111,6 +111,7 @@ public: int dup2(int oldfd, int newfd); int generate_fd_idx(); + int get_fd_idx(); }; diff --git a/include/client/preload.hpp b/include/client/preload.hpp index e1e12dcf5..a19b03e2e 100644 --- a/include/client/preload.hpp +++ b/include/client/preload.hpp @@ -16,7 +16,6 @@ #include - #define EUNKNOWN (-1) #define CTX PreloadContext::getInstance() diff --git a/include/client/preload_context.hpp b/include/client/preload_context.hpp index cd9dcfeb5..4f0e3bc5f 100644 --- a/include/client/preload_context.hpp +++ b/include/client/preload_context.hpp @@ -25,12 +25,14 @@ /* Forward declarations */ class OpenFileMap; -class Distributor; -namespace gkfs { namespace log { - struct logger; -}} +class Distributor; +namespace gkfs { + namespace log { + struct logger; + } +} struct FsConfig { // configurable metadata @@ -59,7 +61,7 @@ class PreloadContext { static auto constexpr MIN_INTERNAL_FD = MAX_OPEN_FDS - MAX_INTERNAL_FDS; static auto constexpr MAX_USER_FDS = MIN_INTERNAL_FD; - private: +private: PreloadContext(); std::shared_ptr ofm_; @@ -80,53 +82,69 @@ class PreloadContext { bool internal_fds_must_relocate_; std::bitset protected_fds_; - public: +public: static PreloadContext* getInstance() { static PreloadContext instance; return &instance; } PreloadContext(PreloadContext const&) = delete; + void operator=(PreloadContext const&) = delete; void init_logging(); + void mountdir(const std::string& path); + const std::string& mountdir() const; + const std::vector& mountdir_components() const; void cwd(const std::string& path); + const std::string& cwd() const; const std::vector& hosts() const; + void hosts(const std::vector& addrs); + void clear_hosts(); uint64_t local_host_id() const; + void local_host_id(uint64_t id); RelativizeStatus relativize_fd_path(int dirfd, - const char * raw_path, + const char* raw_path, std::string& relative_path, bool resolve_last_link = true) const; - bool relativize_path(const char * raw_path, std::string& relative_path, bool resolve_last_link = true) const; + bool relativize_path(const char* raw_path, std::string& relative_path, bool resolve_last_link = true) const; + const std::shared_ptr& file_map() const; void distributor(std::shared_ptr distributor); + std::shared_ptr distributor() const; + const std::shared_ptr& fs_conf() const; void enable_interception(); + void disable_interception(); + bool interception_enabled() const; int register_internal_fd(int fd); + void unregister_internal_fd(int fd); + bool is_internal_fd(int fd) const; void protect_user_fds(); - void unprotect_user_fds(); + + void unprotect_user_fds(); }; diff --git a/include/client/preload_util.hpp b/include/client/preload_util.hpp index bb7b8b973..a2d0e1553 100644 --- a/include/client/preload_util.hpp +++ b/include/client/preload_util.hpp @@ -40,26 +40,6 @@ struct MetadentryUpdateFlags { namespace hermes { class async_engine; } extern std::unique_ptr ld_network_service; -// RPC IDs -extern hg_id_t rpc_config_id; -extern hg_id_t rpc_mk_node_id; -extern hg_id_t rpc_stat_id; -extern hg_id_t rpc_rm_node_id; -extern hg_id_t rpc_decr_size_id; -extern hg_id_t rpc_update_metadentry_id; -extern hg_id_t rpc_get_metadentry_size_id; -extern hg_id_t rpc_update_metadentry_size_id; -extern hg_id_t rpc_write_data_id; -extern hg_id_t rpc_read_data_id; -extern hg_id_t rpc_trunc_data_id; -extern hg_id_t rpc_get_dirents_id; -extern hg_id_t rpc_chunk_stat_id; - -#ifdef HAS_SYMLINKS -extern hg_id_t ipc_mk_symlink_id; -extern hg_id_t rpc_mk_symlink_id; -#endif - // function definitions namespace gkfs { namespace client { diff --git a/include/client/resolve.hpp b/include/client/resolve.hpp index 4e16d281c..35532fe23 100644 --- a/include/client/resolve.hpp +++ b/include/client/resolve.hpp @@ -13,13 +13,16 @@ #include -bool resolve_path (const std::string& path, std::string& resolved, bool resolve_last_link = true); +bool resolve_path(const std::string& path, std::string& resolved, bool resolve_last_link = true); std::string get_sys_cwd(); + void set_sys_cwd(const std::string& path); void set_env_cwd(const std::string& path); + void unset_env_cwd(); void init_cwd(); + void set_cwd(const std::string& path, bool internal); diff --git a/include/client/rpc/hg_rpcs.hpp b/include/client/rpc/hg_rpcs.hpp index 00465cecb..2b7613e28 100644 --- a/include/client/rpc/hg_rpcs.hpp +++ b/include/client/rpc/hg_rpcs.hpp @@ -38,9 +38,8 @@ namespace hermes { namespace detail { struct hg_void_t { }; -static HG_INLINE hg_return_t -hg_proc_void_t(hg_proc_t proc, void *data) -{ + static HG_INLINE hg_return_t + hg_proc_void_t(hg_proc_t proc, void* data) { (void) proc; (void) data; @@ -58,6 +57,7 @@ struct fs_config { // forward declarations of public input/output types for this RPC class input; + class output; // traits used so that the engine knows what to do with the RPC @@ -69,7 +69,7 @@ struct fs_config { using mercury_output_type = rpc_config_out_t; // RPC public identifier - // (N.B: we reuse the same IDs assigned by Margo so that the daemon + // (N.B: we reuse the same IDs assigned by Margo so that the daemon // understands Hermes RPCs) constexpr static const uint64_t public_id = 3033006080; @@ -83,27 +83,31 @@ struct fs_config { constexpr static const auto requires_response = true; // Mercury callback to serialize input arguments - constexpr static const auto mercury_in_proc_cb = - hermes::detail::hg_proc_void_t; + constexpr static const auto mercury_in_proc_cb = + hermes::detail::hg_proc_void_t; // Mercury callback to serialize output arguments - constexpr static const auto mercury_out_proc_cb = - HG_GEN_PROC_NAME(rpc_config_out_t); + constexpr static const auto mercury_out_proc_cb = + HG_GEN_PROC_NAME(rpc_config_out_t); class input { - template + template friend hg_return_t hermes::detail::post_to_mercury(ExecutionContext*); public: - input() { } + input() {} + input(input&& rhs) = default; + input(const input& other) = default; + input& operator=(input&& rhs) = default; + input& operator=(const input& other) = default; explicit - input(const hermes::detail::hg_void_t& other) { } + input(const hermes::detail::hg_void_t& other) {} explicit operator hermes::detail::hg_void_t() { @@ -113,20 +117,20 @@ struct fs_config { class output { - template + template friend hg_return_t hermes::detail::post_to_mercury(ExecutionContext*); public: output() : - m_mountdir(), - m_rootdir(), - m_atime_state(), - m_mtime_state(), - m_ctime_state(), - m_link_cnt_state(), - m_blocks_state(), - m_uid(), - m_gid() {} + m_mountdir(), + m_rootdir(), + m_atime_state(), + m_mtime_state(), + m_ctime_state(), + m_link_cnt_state(), + m_blocks_state(), + m_uid(), + m_gid() {} output(const std::string& mountdir, const std::string& rootdir, @@ -137,29 +141,32 @@ struct fs_config { bool blocks_state, uint32_t uid, uint32_t gid) : - m_mountdir(mountdir), - m_rootdir(rootdir), - m_atime_state(atime_state), - m_mtime_state(mtime_state), - m_ctime_state(ctime_state), - m_link_cnt_state(link_cnt_state), - m_blocks_state(blocks_state), - m_uid(uid), - m_gid(gid) {} + m_mountdir(mountdir), + m_rootdir(rootdir), + m_atime_state(atime_state), + m_mtime_state(mtime_state), + m_ctime_state(ctime_state), + m_link_cnt_state(link_cnt_state), + m_blocks_state(blocks_state), + m_uid(uid), + m_gid(gid) {} output(output&& rhs) = default; + output(const output& other) = default; + output& operator=(output&& rhs) = default; + output& operator=(const output& other) = default; - explicit + explicit output(const rpc_config_out_t& out) { - if(out.mountdir != nullptr) { + if (out.mountdir != nullptr) { m_mountdir = out.mountdir; } - if(out.rootdir != nullptr) { + if (out.rootdir != nullptr) { m_rootdir = out.rootdir; } @@ -182,12 +189,12 @@ struct fs_config { return m_rootdir; } - bool + bool atime_state() const { return m_atime_state; } - bool + bool mtime_state() const { return m_mtime_state; } @@ -197,17 +204,17 @@ struct fs_config { return m_ctime_state; } - bool + bool link_cnt_state() const { return m_link_cnt_state; } - bool + bool blocks_state() const { return m_blocks_state; } - uint32_t + uint32_t uid() const { return m_uid; } @@ -237,6 +244,7 @@ struct create { // forward declarations of public input/output types for this RPC class input; + class output; // traits used so that the engine knows what to do with the RPC @@ -248,7 +256,7 @@ struct create { using mercury_output_type = rpc_err_out_t; // RPC public identifier - // (N.B: we reuse the same IDs assigned by Margo so that the daemon + // (N.B: we reuse the same IDs assigned by Margo so that the daemon // understands Hermes RPCs) constexpr static const uint64_t public_id = 796590080; @@ -262,27 +270,30 @@ struct create { constexpr static const auto requires_response = true; // Mercury callback to serialize input arguments - constexpr static const auto mercury_in_proc_cb = - HG_GEN_PROC_NAME(rpc_mk_node_in_t); + constexpr static const auto mercury_in_proc_cb = + HG_GEN_PROC_NAME(rpc_mk_node_in_t); // Mercury callback to serialize output arguments - constexpr static const auto mercury_out_proc_cb = - HG_GEN_PROC_NAME(rpc_err_out_t); + constexpr static const auto mercury_out_proc_cb = + HG_GEN_PROC_NAME(rpc_err_out_t); class input { - template + template friend hg_return_t hermes::detail::post_to_mercury(ExecutionContext*); public: - input(const std::string& path, + input(const std::string& path, uint32_t mode) : - m_path(path), - m_mode(mode) { } + m_path(path), + m_mode(mode) {} input(input&& rhs) = default; + input(const input& other) = default; + input& operator=(input&& rhs) = default; + input& operator=(const input& other) = default; std::string @@ -297,8 +308,8 @@ struct create { explicit input(const rpc_mk_node_in_t& other) : - m_path(other.path), - m_mode(other.mode) { } + m_path(other.path), + m_mode(other.mode) {} explicit operator rpc_mk_node_in_t() { @@ -312,22 +323,25 @@ struct create { class output { - template + template friend hg_return_t hermes::detail::post_to_mercury(ExecutionContext*); public: output() : - m_err() {} + m_err() {} output(int32_t err) : - m_err(err) {} + m_err(err) {} output(output&& rhs) = default; + output(const output& other) = default; + output& operator=(output&& rhs) = default; + output& operator=(const output& other) = default; - explicit + explicit output(const rpc_err_out_t& out) { m_err = out.err; } @@ -348,6 +362,7 @@ struct stat { // forward declarations of public input/output types for this RPC class input; + class output; // traits used so that the engine knows what to do with the RPC @@ -359,7 +374,7 @@ struct stat { using mercury_output_type = rpc_stat_out_t; // RPC public identifier - // (N.B: we reuse the same IDs assigned by Margo so that the daemon + // (N.B: we reuse the same IDs assigned by Margo so that the daemon // understands Hermes RPCs) constexpr static const uint64_t public_id = 1396244480; @@ -373,25 +388,28 @@ struct stat { constexpr static const auto requires_response = true; // Mercury callback to serialize input arguments - constexpr static const auto mercury_in_proc_cb = - HG_GEN_PROC_NAME(rpc_path_only_in_t); + constexpr static const auto mercury_in_proc_cb = + HG_GEN_PROC_NAME(rpc_path_only_in_t); // Mercury callback to serialize output arguments - constexpr static const auto mercury_out_proc_cb = - HG_GEN_PROC_NAME(rpc_stat_out_t); + constexpr static const auto mercury_out_proc_cb = + HG_GEN_PROC_NAME(rpc_stat_out_t); class input { - template + template friend hg_return_t hermes::detail::post_to_mercury(ExecutionContext*); public: - input(const std::string& path ) : - m_path(path) { } + input(const std::string& path) : + m_path(path) {} input(input&& rhs) = default; + input(const input& other) = default; + input& operator=(input&& rhs) = default; + input& operator=(const input& other) = default; std::string @@ -401,7 +419,7 @@ struct stat { explicit input(const rpc_path_only_in_t& other) : - m_path(other.path) { } + m_path(other.path) {} explicit operator rpc_path_only_in_t() { @@ -414,28 +432,31 @@ struct stat { class output { - template + template friend hg_return_t hermes::detail::post_to_mercury(ExecutionContext*); public: output() : - m_err(), - m_db_val() {} + m_err(), + m_db_val() {} output(int32_t err, const std::string& db_val) : - m_err(err), - m_db_val(db_val) {} + m_err(err), + m_db_val(db_val) {} output(output&& rhs) = default; + output(const output& other) = default; + output& operator=(output&& rhs) = default; + output& operator=(const output& other) = default; - explicit + explicit output(const rpc_stat_out_t& out) { m_err = out.err; - if(out.db_val != nullptr) { + if (out.db_val != nullptr) { m_db_val = out.db_val; } } @@ -462,6 +483,7 @@ struct remove { // forward declarations of public input/output types for this RPC class input; + class output; // traits used so that the engine knows what to do with the RPC @@ -473,7 +495,7 @@ struct remove { using mercury_output_type = rpc_err_out_t; // RPC public identifier - // (N.B: we reuse the same IDs assigned by Margo so that the daemon + // (N.B: we reuse the same IDs assigned by Margo so that the daemon // understands Hermes RPCs) constexpr static const uint64_t public_id = 2549415936; @@ -487,25 +509,28 @@ struct remove { constexpr static const auto requires_response = true; // Mercury callback to serialize input arguments - constexpr static const auto mercury_in_proc_cb = - HG_GEN_PROC_NAME(rpc_rm_node_in_t); + constexpr static const auto mercury_in_proc_cb = + HG_GEN_PROC_NAME(rpc_rm_node_in_t); // Mercury callback to serialize output arguments - constexpr static const auto mercury_out_proc_cb = - HG_GEN_PROC_NAME(rpc_err_out_t); + constexpr static const auto mercury_out_proc_cb = + HG_GEN_PROC_NAME(rpc_err_out_t); class input { - template + template friend hg_return_t hermes::detail::post_to_mercury(ExecutionContext*); public: input(const std::string& path) : - m_path(path) { } + m_path(path) {} input(input&& rhs) = default; + input(const input& other) = default; + input& operator=(input&& rhs) = default; + input& operator=(const input& other) = default; std::string @@ -515,7 +540,7 @@ struct remove { explicit input(const rpc_rm_node_in_t& other) : - m_path(other.path) { } + m_path(other.path) {} explicit operator rpc_rm_node_in_t() { @@ -528,22 +553,25 @@ struct remove { class output { - template + template friend hg_return_t hermes::detail::post_to_mercury(ExecutionContext*); public: output() : - m_err() {} + m_err() {} output(int32_t err) : - m_err(err) {} + m_err(err) {} output(output&& rhs) = default; + output(const output& other) = default; + output& operator=(output&& rhs) = default; + output& operator=(const output& other) = default; - explicit + explicit output(const rpc_err_out_t& out) { m_err = out.err; } @@ -564,6 +592,7 @@ struct decr_size { // forward declarations of public input/output types for this RPC class input; + class output; // traits used so that the engine knows what to do with the RPC @@ -575,7 +604,7 @@ struct decr_size { using mercury_output_type = rpc_err_out_t; // RPC public identifier - // (N.B: we reuse the same IDs assigned by Margo so that the daemon + // (N.B: we reuse the same IDs assigned by Margo so that the daemon // understands Hermes RPCs) constexpr static const uint64_t public_id = 1291649024; @@ -589,26 +618,29 @@ struct decr_size { constexpr static const auto requires_response = true; // Mercury callback to serialize input arguments - constexpr static const auto mercury_in_proc_cb = - HG_GEN_PROC_NAME(rpc_trunc_in_t); + constexpr static const auto mercury_in_proc_cb = + HG_GEN_PROC_NAME(rpc_trunc_in_t); // Mercury callback to serialize output arguments - constexpr static const auto mercury_out_proc_cb = - HG_GEN_PROC_NAME(rpc_err_out_t); + constexpr static const auto mercury_out_proc_cb = + HG_GEN_PROC_NAME(rpc_err_out_t); class input { - template + template friend hg_return_t hermes::detail::post_to_mercury(ExecutionContext*); public: input(const std::string& path, uint64_t length) : - m_path(path), - m_length(length) { } + m_path(path), + m_length(length) {} input(input&& rhs) = default; + input(const input& other) = default; + input& operator=(input&& rhs) = default; + input& operator=(const input& other) = default; std::string @@ -623,8 +655,8 @@ struct decr_size { explicit input(const rpc_trunc_in_t& other) : - m_path(other.path), - m_length(other.length) { } + m_path(other.path), + m_length(other.length) {} explicit operator rpc_trunc_in_t() { @@ -638,22 +670,25 @@ struct decr_size { class output { - template + template friend hg_return_t hermes::detail::post_to_mercury(ExecutionContext*); public: output() : - m_err() {} + m_err() {} output(int32_t err) : - m_err(err) {} + m_err(err) {} output(output&& rhs) = default; + output(const output& other) = default; + output& operator=(output&& rhs) = default; + output& operator=(const output& other) = default; - explicit + explicit output(const rpc_err_out_t& out) { m_err = out.err; } @@ -674,6 +709,7 @@ struct update_metadentry { // forward declarations of public input/output types for this RPC class input; + class output; // traits used so that the engine knows what to do with the RPC @@ -685,7 +721,7 @@ struct update_metadentry { using mercury_output_type = rpc_err_out_t; // RPC public identifier - // (N.B: we reuse the same IDs assigned by Margo so that the daemon + // (N.B: we reuse the same IDs assigned by Margo so that the daemon // understands Hermes RPCs) constexpr static const uint64_t public_id = 99483648; @@ -699,20 +735,20 @@ struct update_metadentry { constexpr static const auto requires_response = true; // Mercury callback to serialize input arguments - constexpr static const auto mercury_in_proc_cb = - HG_GEN_PROC_NAME(rpc_update_metadentry_in_t); + constexpr static const auto mercury_in_proc_cb = + HG_GEN_PROC_NAME(rpc_update_metadentry_in_t); // Mercury callback to serialize output arguments - constexpr static const auto mercury_out_proc_cb = - HG_GEN_PROC_NAME(rpc_err_out_t); + constexpr static const auto mercury_out_proc_cb = + HG_GEN_PROC_NAME(rpc_err_out_t); class input { - template + template friend hg_return_t hermes::detail::post_to_mercury(ExecutionContext*); public: - input(const std::string& path, + input(const std::string& path, uint64_t nlink, uint32_t mode, uint32_t uid, @@ -729,27 +765,30 @@ struct update_metadentry { bool atime_flag, bool mtime_flag, bool ctime_flag) : - m_path(path), - m_nlink(nlink), - m_mode(mode), - m_uid(uid), - m_gid(gid), - m_size(size), - m_blocks(blocks), - m_atime(atime), - m_mtime(mtime), - m_ctime(ctime), - m_nlink_flag(nlink_flag), - m_mode_flag(mode_flag), - m_size_flag(size_flag), - m_block_flag(block_flag), - m_atime_flag(atime_flag), - m_mtime_flag(mtime_flag), - m_ctime_flag(ctime_flag) { } + m_path(path), + m_nlink(nlink), + m_mode(mode), + m_uid(uid), + m_gid(gid), + m_size(size), + m_blocks(blocks), + m_atime(atime), + m_mtime(mtime), + m_ctime(ctime), + m_nlink_flag(nlink_flag), + m_mode_flag(mode_flag), + m_size_flag(size_flag), + m_block_flag(block_flag), + m_atime_flag(atime_flag), + m_mtime_flag(mtime_flag), + m_ctime_flag(ctime_flag) {} input(input&& rhs) = default; + input(const input& other) = default; + input& operator=(input&& rhs) = default; + input& operator=(const input& other) = default; std::string @@ -762,104 +801,104 @@ struct update_metadentry { return m_nlink; } - uint32_t + uint32_t mode() const { return m_mode; } - uint32_t + uint32_t uid() const { return m_uid; } - uint32_t + uint32_t gid() const { return m_gid; } - int64_t + int64_t size() const { return m_size; } - int64_t + int64_t blocks() const { return m_blocks; } - int64_t + int64_t atime() const { return m_atime; } - int64_t + int64_t mtime() const { return m_mtime; } - int64_t + int64_t ctime() const { return m_ctime; } - bool + bool nlink_flag() const { return m_nlink_flag; } - bool + bool mode_flag() const { return m_mode_flag; } - bool + bool size_flag() const { return m_size_flag; } - bool + bool block_flag() const { return m_block_flag; } - bool + bool atime_flag() const { return m_atime_flag; } - bool + bool mtime_flag() const { return m_mtime_flag; } - bool + bool ctime_flag() const { return m_ctime_flag; } explicit input(const rpc_update_metadentry_in_t& other) : - m_path(other.path), - m_nlink(other.nlink), - m_mode(other.mode), - m_uid(other.uid), - m_gid(other.gid), - m_size(other.size), - m_blocks(other.blocks), - m_atime(other.atime), - m_mtime(other.mtime), - m_ctime(other.ctime), - m_nlink_flag(other.nlink_flag), - m_mode_flag(other.mode_flag), - m_size_flag(other.size_flag), - m_block_flag(other.block_flag), - m_atime_flag(other.atime_flag), - m_mtime_flag(other.mtime_flag), - m_ctime_flag(other.ctime_flag) { } + m_path(other.path), + m_nlink(other.nlink), + m_mode(other.mode), + m_uid(other.uid), + m_gid(other.gid), + m_size(other.size), + m_blocks(other.blocks), + m_atime(other.atime), + m_mtime(other.mtime), + m_ctime(other.ctime), + m_nlink_flag(other.nlink_flag), + m_mode_flag(other.mode_flag), + m_size_flag(other.size_flag), + m_block_flag(other.block_flag), + m_atime_flag(other.atime_flag), + m_mtime_flag(other.mtime_flag), + m_ctime_flag(other.ctime_flag) {} explicit operator rpc_update_metadentry_in_t() { - return {m_path.c_str(), + return {m_path.c_str(), m_nlink, m_mode, m_uid, @@ -900,22 +939,25 @@ struct update_metadentry { class output { - template + template friend hg_return_t hermes::detail::post_to_mercury(ExecutionContext*); public: output() : - m_err() {} + m_err() {} output(int32_t err) : - m_err(err) {} + m_err(err) {} output(output&& rhs) = default; + output(const output& other) = default; + output& operator=(output&& rhs) = default; + output& operator=(const output& other) = default; - explicit + explicit output(const rpc_err_out_t& out) { m_err = out.err; } @@ -936,6 +978,7 @@ struct get_metadentry_size { // forward declarations of public input/output types for this RPC class input; + class output; // traits used so that the engine knows what to do with the RPC @@ -947,7 +990,7 @@ struct get_metadentry_size { using mercury_output_type = rpc_get_metadentry_size_out_t; // RPC public identifier - // (N.B: we reuse the same IDs assigned by Margo so that the daemon + // (N.B: we reuse the same IDs assigned by Margo so that the daemon // understands Hermes RPCs) constexpr static const uint64_t public_id = 3426484224; @@ -961,25 +1004,28 @@ struct get_metadentry_size { constexpr static const auto requires_response = true; // Mercury callback to serialize input arguments - constexpr static const auto mercury_in_proc_cb = - HG_GEN_PROC_NAME(rpc_path_only_in_t); + constexpr static const auto mercury_in_proc_cb = + HG_GEN_PROC_NAME(rpc_path_only_in_t); // Mercury callback to serialize output arguments - constexpr static const auto mercury_out_proc_cb = - HG_GEN_PROC_NAME(rpc_err_out_t); + constexpr static const auto mercury_out_proc_cb = + HG_GEN_PROC_NAME(rpc_err_out_t); class input { - template + template friend hg_return_t hermes::detail::post_to_mercury(ExecutionContext*); public: - input(const std::string& path ) : - m_path(path) { } + input(const std::string& path) : + m_path(path) {} input(input&& rhs) = default; + input(const input& other) = default; + input& operator=(input&& rhs) = default; + input& operator=(const input& other) = default; std::string @@ -989,7 +1035,7 @@ struct get_metadentry_size { explicit input(const rpc_path_only_in_t& other) : - m_path(other.path) { } + m_path(other.path) {} explicit operator rpc_path_only_in_t() { @@ -1002,24 +1048,27 @@ struct get_metadentry_size { class output { - template + template friend hg_return_t hermes::detail::post_to_mercury(ExecutionContext*); public: output() : - m_err(), - m_ret_size() {} + m_err(), + m_ret_size() {} output(int32_t err, int64_t ret_size) : - m_err(err), - m_ret_size(ret_size) {} + m_err(err), + m_ret_size(ret_size) {} output(output&& rhs) = default; + output(const output& other) = default; + output& operator=(output&& rhs) = default; + output& operator=(const output& other) = default; - explicit + explicit output(const rpc_get_metadentry_size_out_t& out) { m_err = out.err; m_ret_size = out.ret_size; @@ -1047,6 +1096,7 @@ struct update_metadentry_size { // forward declarations of public input/output types for this RPC class input; + class output; // traits used so that the engine knows what to do with the RPC @@ -1058,7 +1108,7 @@ struct update_metadentry_size { using mercury_output_type = rpc_update_metadentry_size_out_t; // RPC public identifier - // (N.B: we reuse the same IDs assigned by Margo so that the daemon + // (N.B: we reuse the same IDs assigned by Margo so that the daemon // understands Hermes RPCs) constexpr static const uint64_t public_id = 2760900608; @@ -1072,31 +1122,34 @@ struct update_metadentry_size { constexpr static const auto requires_response = true; // Mercury callback to serialize input arguments - constexpr static const auto mercury_in_proc_cb = - HG_GEN_PROC_NAME(rpc_update_metadentry_size_in_t); + constexpr static const auto mercury_in_proc_cb = + HG_GEN_PROC_NAME(rpc_update_metadentry_size_in_t); // Mercury callback to serialize output arguments - constexpr static const auto mercury_out_proc_cb = - HG_GEN_PROC_NAME(rpc_update_metadentry_size_out_t); + constexpr static const auto mercury_out_proc_cb = + HG_GEN_PROC_NAME(rpc_update_metadentry_size_out_t); class input { - template + template friend hg_return_t hermes::detail::post_to_mercury(ExecutionContext*); public: - input(const std::string& path, - uint64_t size, - int64_t offset, + input(const std::string& path, + uint64_t size, + int64_t offset, bool append) : - m_path(path), - m_size(size), - m_offset(offset), - m_append(append) { } + m_path(path), + m_size(size), + m_offset(offset), + m_append(append) {} input(input&& rhs) = default; + input(const input& other) = default; + input& operator=(input&& rhs) = default; + input& operator=(const input& other) = default; std::string @@ -1121,10 +1174,10 @@ struct update_metadentry_size { explicit input(const rpc_update_metadentry_size_in_t& other) : - m_path(other.path), - m_size(other.size), - m_offset(other.offset), - m_append(other.append) { } + m_path(other.path), + m_size(other.size), + m_offset(other.offset), + m_append(other.append) {} explicit operator rpc_update_metadentry_size_in_t() { @@ -1134,30 +1187,33 @@ struct update_metadentry_size { private: std::string m_path; uint64_t m_size; - int64_t m_offset; - bool m_append; + int64_t m_offset; + bool m_append; }; class output { - template + template friend hg_return_t hermes::detail::post_to_mercury(ExecutionContext*); public: output() : - m_err(), - m_ret_size() {} + m_err(), + m_ret_size() {} output(int32_t err, int64_t ret_size) : - m_err(err), - m_ret_size(ret_size) {} + m_err(err), + m_ret_size(ret_size) {} output(output&& rhs) = default; + output(const output& other) = default; + output& operator=(output&& rhs) = default; + output& operator=(const output& other) = default; - explicit + explicit output(const rpc_update_metadentry_size_out_t& out) { m_err = out.err; m_ret_size = out.ret_size; @@ -1187,6 +1243,7 @@ struct mk_symlink { // forward declarations of public input/output types for this RPC class input; + class output; // traits used so that the engine knows what to do with the RPC @@ -1198,7 +1255,7 @@ struct mk_symlink { using mercury_output_type = rpc_err_out_t; // RPC public identifier - // (N.B: we reuse the same IDs assigned by Margo so that the daemon + // (N.B: we reuse the same IDs assigned by Margo so that the daemon // understands Hermes RPCs) constexpr static const uint64_t public_id = 3207004160; @@ -1212,27 +1269,30 @@ struct mk_symlink { constexpr static const auto requires_response = true; // Mercury callback to serialize input arguments - constexpr static const auto mercury_in_proc_cb = - HG_GEN_PROC_NAME(rpc_mk_symlink_in_t); + constexpr static const auto mercury_in_proc_cb = + HG_GEN_PROC_NAME(rpc_mk_symlink_in_t); // Mercury callback to serialize output arguments - constexpr static const auto mercury_out_proc_cb = - HG_GEN_PROC_NAME(rpc_err_out_t); + constexpr static const auto mercury_out_proc_cb = + HG_GEN_PROC_NAME(rpc_err_out_t); class input { - template + template friend hg_return_t hermes::detail::post_to_mercury(ExecutionContext*); public: - input(const std::string& path, + input(const std::string& path, const std::string& target_path) : - m_path(path), - m_target_path(target_path) { } + m_path(path), + m_target_path(target_path) {} input(input&& rhs) = default; + input(const input& other) = default; + input& operator=(input&& rhs) = default; + input& operator=(const input& other) = default; std::string @@ -1247,8 +1307,8 @@ struct mk_symlink { explicit input(const rpc_mk_symlink_in_t& other) : - m_path(other.path), - m_target_path(other.target_path) { } + m_path(other.path), + m_target_path(other.target_path) {} explicit operator rpc_mk_symlink_in_t() { @@ -1262,22 +1322,25 @@ struct mk_symlink { class output { - template + template friend hg_return_t hermes::detail::post_to_mercury(ExecutionContext*); public: output() : - m_err() {} + m_err() {} output(int32_t err) : - m_err(err) {} + m_err(err) {} output(output&& rhs) = default; + output(const output& other) = default; + output& operator=(output&& rhs) = default; + output& operator=(const output& other) = default; - explicit + explicit output(const rpc_err_out_t& out) { m_err = out.err; } @@ -1300,6 +1363,7 @@ struct write_data { // forward declarations of public input/output types for this RPC class input; + class output; // traits used so that the engine knows what to do with the RPC @@ -1311,7 +1375,7 @@ struct write_data { using mercury_output_type = rpc_data_out_t; // RPC public identifier - // (N.B: we reuse the same IDs assigned by Margo so that the daemon + // (N.B: we reuse the same IDs assigned by Margo so that the daemon // understands Hermes RPCs) constexpr static const uint64_t public_id = 3716481024; @@ -1325,20 +1389,20 @@ struct write_data { constexpr static const auto requires_response = true; // Mercury callback to serialize input arguments - constexpr static const auto mercury_in_proc_cb = - HG_GEN_PROC_NAME(rpc_write_data_in_t); + constexpr static const auto mercury_in_proc_cb = + HG_GEN_PROC_NAME(rpc_write_data_in_t); // Mercury callback to serialize output arguments - constexpr static const auto mercury_out_proc_cb = - HG_GEN_PROC_NAME(rpc_data_out_t); + constexpr static const auto mercury_out_proc_cb = + HG_GEN_PROC_NAME(rpc_data_out_t); class input { - template + template friend hg_return_t hermes::detail::post_to_mercury(ExecutionContext*); public: - input(const std::string& path, + input(const std::string& path, int64_t offset, uint64_t host_id, uint64_t host_size, @@ -1347,19 +1411,22 @@ struct write_data { uint64_t chunk_end, uint64_t total_chunk_size, const hermes::exposed_memory& buffers) : - m_path(path), - m_offset(offset), - m_host_id(host_id), - m_host_size(host_size), - m_chunk_n(chunk_n), - m_chunk_start(chunk_start), - m_chunk_end(chunk_end), - m_total_chunk_size(total_chunk_size), - m_buffers(buffers) { } + m_path(path), + m_offset(offset), + m_host_id(host_id), + m_host_size(host_size), + m_chunk_n(chunk_n), + m_chunk_start(chunk_start), + m_chunk_end(chunk_end), + m_total_chunk_size(total_chunk_size), + m_buffers(buffers) {} input(input&& rhs) = default; + input(const input& other) = default; + input& operator=(input&& rhs) = default; + input& operator=(const input& other) = default; std::string @@ -1409,28 +1476,28 @@ struct write_data { explicit input(const rpc_write_data_in_t& other) : - m_path(other.path), - m_offset(other.offset), - m_host_id(other.host_id), - m_host_size(other.host_size), - m_chunk_n(other.chunk_n), - m_chunk_start(other.chunk_start), - m_chunk_end(other.chunk_end), - m_total_chunk_size(other.total_chunk_size), - m_buffers(other.bulk_handle) { } + m_path(other.path), + m_offset(other.offset), + m_host_id(other.host_id), + m_host_size(other.host_size), + m_chunk_n(other.chunk_n), + m_chunk_start(other.chunk_start), + m_chunk_end(other.chunk_end), + m_total_chunk_size(other.total_chunk_size), + m_buffers(other.bulk_handle) {} explicit operator rpc_write_data_in_t() { return { - m_path.c_str(), - m_offset, - m_host_id, - m_host_size, - m_chunk_n, - m_chunk_start, - m_chunk_end, - m_total_chunk_size, - hg_bulk_t(m_buffers) + m_path.c_str(), + m_offset, + m_host_id, + m_host_size, + m_chunk_n, + m_chunk_start, + m_chunk_end, + m_total_chunk_size, + hg_bulk_t(m_buffers) }; } @@ -1448,24 +1515,27 @@ struct write_data { class output { - template + template friend hg_return_t hermes::detail::post_to_mercury(ExecutionContext*); public: output() : - m_err(), - m_io_size() {} + m_err(), + m_io_size() {} output(int32_t err, size_t io_size) : - m_err(err), - m_io_size(io_size) {} + m_err(err), + m_io_size(io_size) {} output(output&& rhs) = default; + output(const output& other) = default; + output& operator=(output&& rhs) = default; + output& operator=(const output& other) = default; - explicit + explicit output(const rpc_data_out_t& out) { m_err = out.err; m_io_size = out.io_size; @@ -1493,6 +1563,7 @@ struct read_data { // forward declarations of public input/output types for this RPC class input; + class output; // traits used so that the engine knows what to do with the RPC @@ -1504,7 +1575,7 @@ struct read_data { using mercury_output_type = rpc_data_out_t; // RPC public identifier - // (N.B: we reuse the same IDs assigned by Margo so that the daemon + // (N.B: we reuse the same IDs assigned by Margo so that the daemon // understands Hermes RPCs) constexpr static const uint64_t public_id = 892207104; @@ -1518,20 +1589,20 @@ struct read_data { constexpr static const auto requires_response = true; // Mercury callback to serialize input arguments - constexpr static const auto mercury_in_proc_cb = - HG_GEN_PROC_NAME(rpc_read_data_in_t); + constexpr static const auto mercury_in_proc_cb = + HG_GEN_PROC_NAME(rpc_read_data_in_t); // Mercury callback to serialize output arguments - constexpr static const auto mercury_out_proc_cb = - HG_GEN_PROC_NAME(rpc_data_out_t); + constexpr static const auto mercury_out_proc_cb = + HG_GEN_PROC_NAME(rpc_data_out_t); class input { - template + template friend hg_return_t hermes::detail::post_to_mercury(ExecutionContext*); public: - input(const std::string& path, + input(const std::string& path, int64_t offset, uint64_t host_id, uint64_t host_size, @@ -1540,19 +1611,22 @@ struct read_data { uint64_t chunk_end, uint64_t total_chunk_size, const hermes::exposed_memory& buffers) : - m_path(path), - m_offset(offset), - m_host_id(host_id), - m_host_size(host_size), - m_chunk_n(chunk_n), - m_chunk_start(chunk_start), - m_chunk_end(chunk_end), - m_total_chunk_size(total_chunk_size), - m_buffers(buffers) { } + m_path(path), + m_offset(offset), + m_host_id(host_id), + m_host_size(host_size), + m_chunk_n(chunk_n), + m_chunk_start(chunk_start), + m_chunk_end(chunk_end), + m_total_chunk_size(total_chunk_size), + m_buffers(buffers) {} input(input&& rhs) = default; + input(const input& other) = default; + input& operator=(input&& rhs) = default; + input& operator=(const input& other) = default; std::string @@ -1602,28 +1676,28 @@ struct read_data { explicit input(const rpc_read_data_in_t& other) : - m_path(other.path), - m_offset(other.offset), - m_host_id(other.host_id), - m_host_size(other.host_size), - m_chunk_n(other.chunk_n), - m_chunk_start(other.chunk_start), - m_chunk_end(other.chunk_end), - m_total_chunk_size(other.total_chunk_size), - m_buffers(other.bulk_handle) { } + m_path(other.path), + m_offset(other.offset), + m_host_id(other.host_id), + m_host_size(other.host_size), + m_chunk_n(other.chunk_n), + m_chunk_start(other.chunk_start), + m_chunk_end(other.chunk_end), + m_total_chunk_size(other.total_chunk_size), + m_buffers(other.bulk_handle) {} explicit operator rpc_read_data_in_t() { return { - m_path.c_str(), - m_offset, - m_host_id, - m_host_size, - m_chunk_n, - m_chunk_start, - m_chunk_end, - m_total_chunk_size, - hg_bulk_t(m_buffers) + m_path.c_str(), + m_offset, + m_host_id, + m_host_size, + m_chunk_n, + m_chunk_start, + m_chunk_end, + m_total_chunk_size, + hg_bulk_t(m_buffers) }; } @@ -1641,24 +1715,27 @@ struct read_data { class output { - template + template friend hg_return_t hermes::detail::post_to_mercury(ExecutionContext*); public: output() : - m_err(), - m_io_size() {} + m_err(), + m_io_size() {} output(int32_t err, size_t io_size) : - m_err(err), - m_io_size(io_size) {} + m_err(err), + m_io_size(io_size) {} output(output&& rhs) = default; + output(const output& other) = default; + output& operator=(output&& rhs) = default; + output& operator=(const output& other) = default; - explicit + explicit output(const rpc_data_out_t& out) { m_err = out.err; m_io_size = out.io_size; @@ -1686,6 +1763,7 @@ struct trunc_data { // forward declarations of public input/output types for this RPC class input; + class output; // traits used so that the engine knows what to do with the RPC @@ -1697,7 +1775,7 @@ struct trunc_data { using mercury_output_type = rpc_err_out_t; // RPC public identifier - // (N.B: we reuse the same IDs assigned by Margo so that the daemon + // (N.B: we reuse the same IDs assigned by Margo so that the daemon // understands Hermes RPCs) constexpr static const uint64_t public_id = 1850933248; @@ -1711,27 +1789,30 @@ struct trunc_data { constexpr static const auto requires_response = true; // Mercury callback to serialize input arguments - constexpr static const auto mercury_in_proc_cb = - HG_GEN_PROC_NAME(rpc_trunc_in_t); + constexpr static const auto mercury_in_proc_cb = + HG_GEN_PROC_NAME(rpc_trunc_in_t); // Mercury callback to serialize output arguments - constexpr static const auto mercury_out_proc_cb = - HG_GEN_PROC_NAME(rpc_err_out_t); + constexpr static const auto mercury_out_proc_cb = + HG_GEN_PROC_NAME(rpc_err_out_t); class input { - template + template friend hg_return_t hermes::detail::post_to_mercury(ExecutionContext*); public: - input(const std::string& path, + input(const std::string& path, uint64_t length) : - m_path(path), - m_length(length) { } + m_path(path), + m_length(length) {} input(input&& rhs) = default; + input(const input& other) = default; + input& operator=(input&& rhs) = default; + input& operator=(const input& other) = default; std::string @@ -1746,14 +1827,14 @@ struct trunc_data { explicit input(const rpc_trunc_in_t& other) : - m_path(other.path), - m_length(other.length) { } + m_path(other.path), + m_length(other.length) {} explicit operator rpc_trunc_in_t() { return { - m_path.c_str(), - m_length, + m_path.c_str(), + m_length, }; } @@ -1764,22 +1845,25 @@ struct trunc_data { class output { - template + template friend hg_return_t hermes::detail::post_to_mercury(ExecutionContext*); public: output() : - m_err() {} + m_err() {} output(int32_t err) : - m_err(err) {} + m_err(err) {} output(output&& rhs) = default; + output(const output& other) = default; + output& operator=(output&& rhs) = default; + output& operator=(const output& other) = default; - explicit + explicit output(const rpc_err_out_t& out) { m_err = out.err; } @@ -1800,6 +1884,7 @@ struct get_dirents { // forward declarations of public input/output types for this RPC class input; + class output; // traits used so that the engine knows what to do with the RPC @@ -1811,7 +1896,7 @@ struct get_dirents { using mercury_output_type = rpc_get_dirents_out_t; // RPC public identifier - // (N.B: we reuse the same IDs assigned by Margo so that the daemon + // (N.B: we reuse the same IDs assigned by Margo so that the daemon // understands Hermes RPCs) constexpr static const uint64_t public_id = 4121034752; @@ -1825,27 +1910,30 @@ struct get_dirents { constexpr static const auto requires_response = true; // Mercury callback to serialize input arguments - constexpr static const auto mercury_in_proc_cb = - HG_GEN_PROC_NAME(rpc_get_dirents_in_t); + constexpr static const auto mercury_in_proc_cb = + HG_GEN_PROC_NAME(rpc_get_dirents_in_t); // Mercury callback to serialize output arguments - constexpr static const auto mercury_out_proc_cb = - HG_GEN_PROC_NAME(rpc_get_dirents_out_t); + constexpr static const auto mercury_out_proc_cb = + HG_GEN_PROC_NAME(rpc_get_dirents_out_t); class input { - template + template friend hg_return_t hermes::detail::post_to_mercury(ExecutionContext*); public: - input(const std::string& path, + input(const std::string& path, const hermes::exposed_memory& buffers) : - m_path(path), - m_buffers(buffers) { } + m_path(path), + m_buffers(buffers) {} input(input&& rhs) = default; + input(const input& other) = default; + input& operator=(input&& rhs) = default; + input& operator=(const input& other) = default; std::string @@ -1860,14 +1948,14 @@ struct get_dirents { explicit input(const rpc_get_dirents_in_t& other) : - m_path(other.path), - m_buffers(other.bulk_handle) { } + m_path(other.path), + m_buffers(other.bulk_handle) {} explicit operator rpc_get_dirents_in_t() { return { - m_path.c_str(), - hg_bulk_t(m_buffers) + m_path.c_str(), + hg_bulk_t(m_buffers) }; } @@ -1878,24 +1966,27 @@ struct get_dirents { class output { - template + template friend hg_return_t hermes::detail::post_to_mercury(ExecutionContext*); public: output() : - m_err(), - m_dirents_size() {} + m_err(), + m_dirents_size() {} output(int32_t err, size_t dirents_size) : - m_err(err), - m_dirents_size(dirents_size) {} + m_err(err), + m_dirents_size(dirents_size) {} output(output&& rhs) = default; + output(const output& other) = default; + output& operator=(output&& rhs) = default; + output& operator=(const output& other) = default; - explicit + explicit output(const rpc_get_dirents_out_t& out) { m_err = out.err; m_dirents_size = out.dirents_size; @@ -1923,6 +2014,7 @@ struct chunk_stat { // forward declarations of public input/output types for this RPC class input; + class output; // traits used so that the engine knows what to do with the RPC @@ -1934,7 +2026,7 @@ struct chunk_stat { using mercury_output_type = rpc_chunk_stat_out_t; // RPC public identifier - // (N.B: we reuse the same IDs assigned by Margo so that the daemon + // (N.B: we reuse the same IDs assigned by Margo so that the daemon // understands Hermes RPCs) constexpr static const uint64_t public_id = 532742144; @@ -1948,25 +2040,28 @@ struct chunk_stat { constexpr static const auto requires_response = true; // Mercury callback to serialize input arguments - constexpr static const auto mercury_in_proc_cb = - HG_GEN_PROC_NAME(rpc_chunk_stat_in_t); + constexpr static const auto mercury_in_proc_cb = + HG_GEN_PROC_NAME(rpc_chunk_stat_in_t); // Mercury callback to serialize output arguments - constexpr static const auto mercury_out_proc_cb = - HG_GEN_PROC_NAME(rpc_chunk_stat_out_t); + constexpr static const auto mercury_out_proc_cb = + HG_GEN_PROC_NAME(rpc_chunk_stat_out_t); class input { - template + template friend hg_return_t hermes::detail::post_to_mercury(ExecutionContext*); public: input(int32_t dummy) : - m_dummy(dummy) { } + m_dummy(dummy) {} input(input&& rhs) = default; + input(const input& other) = default; + input& operator=(input&& rhs) = default; + input& operator=(const input& other) = default; int32_t @@ -1976,11 +2071,11 @@ struct chunk_stat { explicit input(const rpc_chunk_stat_in_t& other) : - m_dummy(other.dummy) { } + m_dummy(other.dummy) {} explicit operator rpc_chunk_stat_in_t() { - return { m_dummy }; + return {m_dummy}; } private: @@ -1989,26 +2084,29 @@ struct chunk_stat { class output { - template + template friend hg_return_t hermes::detail::post_to_mercury(ExecutionContext*); public: output() : - m_chunk_size(), - m_chunk_total(), - m_chunk_free() {} + m_chunk_size(), + m_chunk_total(), + m_chunk_free() {} output(uint64_t chunk_size, uint64_t chunk_total, uint64_t chunk_free) : - m_chunk_size(chunk_size), - m_chunk_total(chunk_total), - m_chunk_free(chunk_free) {} + m_chunk_size(chunk_size), + m_chunk_total(chunk_total), + m_chunk_free(chunk_free) {} output(output&& rhs) = default; + output(const output& other) = default; + output& operator=(output&& rhs) = default; + output& operator=(const output& other) = default; - explicit + explicit output(const rpc_chunk_stat_out_t& out) { m_chunk_size = out.chunk_size; m_chunk_total = out.chunk_total; diff --git a/include/client/rpc/ld_rpc_data_ws.hpp b/include/client/rpc/ld_rpc_data_ws.hpp index 74da98849..cc619670d 100644 --- a/include/client/rpc/ld_rpc_data_ws.hpp +++ b/include/client/rpc/ld_rpc_data_ws.hpp @@ -18,20 +18,20 @@ namespace rpc_send { - ssize_t write(const std::string& path, const void* buf, const bool append_flag, const off64_t in_offset, - const size_t write_size, const int64_t updated_metadentry_size); + ssize_t write(const std::string& path, const void* buf, bool append_flag, off64_t in_offset, + size_t write_size, int64_t updated_metadentry_size); struct ChunkStat { unsigned long chunk_size; unsigned long chunk_total; unsigned long chunk_free; -}; + }; -ssize_t read(const std::string& path, void* buf, const off64_t offset, const size_t read_size); + ssize_t read(const std::string& path, void* buf, off64_t offset, size_t read_size); -int trunc_data(const std::string& path, size_t current_size, size_t new_size); + int trunc_data(const std::string& path, size_t current_size, size_t new_size); -ChunkStat chunk_stat(); + ChunkStat chunk_stat(); } diff --git a/include/client/rpc/ld_rpc_management.hpp b/include/client/rpc/ld_rpc_management.hpp index f70063de6..f03a68a40 100644 --- a/include/client/rpc/ld_rpc_management.hpp +++ b/include/client/rpc/ld_rpc_management.hpp @@ -17,10 +17,8 @@ namespace rpc_send { - bool get_fs_config(); - } // end namespace rpc_send #endif //GEKKOFS_MARGO_RPC_NANAGMENT_HPP diff --git a/include/client/rpc/ld_rpc_metadentry.hpp b/include/client/rpc/ld_rpc_metadentry.hpp index 4c52de261..7985c5439 100644 --- a/include/client/rpc/ld_rpc_metadentry.hpp +++ b/include/client/rpc/ld_rpc_metadentry.hpp @@ -26,26 +26,27 @@ class Metadata; namespace rpc_send { + int mk_node(const std::string& path, mode_t mode); -int mk_node(const std::string& path, mode_t mode); + int stat(const std::string& path, std::string& attr); -int stat(const std::string& path, std::string& attr); + int rm_node(const std::string& path, bool remove_metadentry_only, ssize_t size); -int rm_node(const std::string& path, const bool remove_metadentry_only, const ssize_t size); + int decr_size(const std::string& path, size_t length); -int decr_size(const std::string& path, size_t length); + int update_metadentry(const std::string& path, const Metadata& md, const MetadentryUpdateFlags& md_flags); -int update_metadentry(const std::string& path, const Metadata& md, const MetadentryUpdateFlags& md_flags); + int update_metadentry_size(const std::string& path, size_t size, off64_t offset, bool append_flag, + off64_t& ret_size); -int update_metadentry_size(const std::string& path, size_t size, off64_t offset, bool append_flag, - off64_t& ret_size); + int get_metadentry_size(const std::string& path, off64_t& ret_size); -int get_metadentry_size(const std::string& path, off64_t& ret_size); - -void get_dirents(OpenDir& open_dir); + void get_dirents(OpenDir& open_dir); #ifdef HAS_SYMLINKS -int mk_symlink(const std::string& path, const std::string& target_path); + + int mk_symlink(const std::string& path, const std::string& target_path); + #endif diff --git a/include/daemon/backend/data/chunk_storage.hpp b/include/daemon/backend/data/chunk_storage.hpp index 9008090c1..ebaeb88b2 100644 --- a/include/daemon/backend/data/chunk_storage.hpp +++ b/include/daemon/backend/data/chunk_storage.hpp @@ -14,8 +14,11 @@ #ifndef GEKKOFS_CHUNK_STORAGE_HPP #define GEKKOFS_CHUNK_STORAGE_HPP +extern "C" { #include -#include +} + +#include #include #include @@ -24,7 +27,6 @@ namespace spdlog { class logger; } - struct ChunkStat { unsigned long chunk_size; unsigned long chunk_total; @@ -32,32 +34,43 @@ struct ChunkStat { }; class ChunkStorage { - private: - static constexpr const char * LOGGER_NAME = "ChunkStorage"; - - std::shared_ptr log; - - std::string root_path; - size_t chunksize; - inline std::string absolute(const std::string& internal_path) const; - static inline std::string get_chunks_dir(const std::string& file_path); - static inline std::string get_chunk_path(const std::string& file_path, unsigned int chunk_id); - void init_chunk_space(const std::string& file_path) const; - - public: - ChunkStorage(const std::string& path, const size_t chunksize); - void write_chunk(const std::string& file_path, unsigned int chunk_id, - const char * buff, size_t size, off64_t offset, - ABT_eventual& eventual) const; - void read_chunk(const std::string& file_path, unsigned int chunk_id, - char * buff, size_t size, off64_t offset, - ABT_eventual& eventual) const; - void trim_chunk_space(const std::string& file_path, unsigned int chunk_start, - unsigned int chunk_end = UINT_MAX); - void delete_chunk(const std::string& file_path, unsigned int chunk_id); - void truncate_chunk(const std::string& file_path, unsigned int chunk_id, off_t length); - void destroy_chunk_space(const std::string& file_path) const; - ChunkStat chunk_stat() const; +private: + static constexpr const char* LOGGER_NAME = "ChunkStorage"; + + std::shared_ptr log; + + std::string root_path; + size_t chunksize; + + inline std::string absolute(const std::string& internal_path) const; + + static inline std::string get_chunks_dir(const std::string& file_path); + + static inline std::string get_chunk_path(const std::string& file_path, unsigned int chunk_id); + + void init_chunk_space(const std::string& file_path) const; + +public: + ChunkStorage(const std::string& path, size_t chunksize); + + void write_chunk(const std::string& file_path, unsigned int chunk_id, + const char* buff, size_t size, off64_t offset, + ABT_eventual& eventual) const; + + void read_chunk(const std::string& file_path, unsigned int chunk_id, + char* buff, size_t size, off64_t offset, + ABT_eventual& eventual) const; + + void trim_chunk_space(const std::string& file_path, unsigned int chunk_start, + unsigned int chunk_end = std::numeric_limits::max()); + + void delete_chunk(const std::string& file_path, unsigned int chunk_id); + + void truncate_chunk(const std::string& file_path, unsigned int chunk_id, off_t length); + + void destroy_chunk_space(const std::string& file_path) const; + + ChunkStat chunk_stat() const; }; #endif //GEKKOFS_CHUNK_STORAGE_HPP diff --git a/include/daemon/backend/exceptions.hpp b/include/daemon/backend/exceptions.hpp index f0319dcce..c4273629d 100644 --- a/include/daemon/backend/exceptions.hpp +++ b/include/daemon/backend/exceptions.hpp @@ -23,8 +23,8 @@ public: }; class NotFoundException : public DBException { - public: - NotFoundException(const std::string & s) : DBException(s) {}; +public: + NotFoundException(const std::string& s) : DBException(s) {}; }; #endif //GEKKOFS_DB_EXCEPTIONS_HPP diff --git a/include/daemon/backend/metadata/db.hpp b/include/daemon/backend/metadata/db.hpp index 8690ec56d..cb10886ff 100644 --- a/include/daemon/backend/metadata/db.hpp +++ b/include/daemon/backend/metadata/db.hpp @@ -15,33 +15,42 @@ #define GEKKOFS_METADATA_DB_HPP #include -#include "rocksdb/db.h" -#include "daemon/backend/exceptions.hpp" +#include +#include namespace rdb = rocksdb; class MetadataDB { private: std::unique_ptr db; - rdb::Options options; - rdb::WriteOptions write_opts; - std::string path; - static void optimize_rocksdb_options(rdb::Options& options); - - public: - static inline void throw_rdb_status_excpt(const rdb::Status& s); - - MetadataDB(const std::string& path); - - std::string get(const std::string& key) const; - void put(const std::string& key, const std::string& val); - void remove(const std::string& key); - bool exists(const std::string& key); - void update(const std::string& old_key, const std::string& new_key, const std::string& val); - void increase_size(const std::string& key, size_t size, bool append); - void decrease_size(const std::string& key, size_t size); - std::vector> get_dirents(const std::string& dir) const; - void iterate_all(); + rdb::Options options; + rdb::WriteOptions write_opts; + std::string path; + + static void optimize_rocksdb_options(rdb::Options& options); + +public: + static inline void throw_rdb_status_excpt(const rdb::Status& s); + + MetadataDB(const std::string& path); + + std::string get(const std::string& key) const; + + void put(const std::string& key, const std::string& val); + + void remove(const std::string& key); + + bool exists(const std::string& key); + + void update(const std::string& old_key, const std::string& new_key, const std::string& val); + + void increase_size(const std::string& key, size_t size, bool append); + + void decrease_size(const std::string& key, size_t size); + + std::vector> get_dirents(const std::string& dir) const; + + void iterate_all(); }; #endif //GEKKOFS_METADATA_DB_HPP diff --git a/include/daemon/backend/metadata/merge.hpp b/include/daemon/backend/metadata/merge.hpp index bd7ee8cd0..8b1a2791d 100644 --- a/include/daemon/backend/metadata/merge.hpp +++ b/include/daemon/backend/metadata/merge.hpp @@ -15,80 +15,91 @@ #define DB_MERGE_HPP -#include "rocksdb/merge_operator.h" -#include "global/metadata.hpp" +#include +#include namespace rdb = rocksdb; -enum class OperandID: char { +enum class OperandID : char { increase_size = 'i', decrease_size = 'd', create = 'c' }; class MergeOperand { - public: - constexpr static char operand_id_suffix = ':'; - std::string serialize() const; - static OperandID get_id(const rdb::Slice& serialized_op); - static rdb::Slice get_params(const rdb::Slice& serialized_op); - - protected: - std::string serialize_id() const; - virtual std::string serialize_params() const = 0; - virtual OperandID id() const = 0; -}; +public: + constexpr static char operand_id_suffix = ':'; + + std::string serialize() const; -class IncreaseSizeOperand: public MergeOperand { - public: - constexpr const static char separator = ','; - constexpr const static char true_char = 't'; - constexpr const static char false_char = 'f'; + static OperandID get_id(const rdb::Slice& serialized_op); - size_t size; - bool append; + static rdb::Slice get_params(const rdb::Slice& serialized_op); - IncreaseSizeOperand(const size_t size, const bool append); - IncreaseSizeOperand(const rdb::Slice& serialized_op); +protected: + std::string serialize_id() const; - OperandID id() const override; - std::string serialize_params() const override; + virtual std::string serialize_params() const = 0; + + virtual OperandID id() const = 0; }; -class DecreaseSizeOperand: public MergeOperand { - public: - size_t size; +class IncreaseSizeOperand : public MergeOperand { +public: + constexpr const static char separator = ','; + constexpr const static char true_char = 't'; + constexpr const static char false_char = 'f'; + + size_t size; + bool append; - DecreaseSizeOperand(const size_t size); - DecreaseSizeOperand(const rdb::Slice& serialized_op); + IncreaseSizeOperand(size_t size, bool append); - OperandID id() const override; - std::string serialize_params() const override; + explicit IncreaseSizeOperand(const rdb::Slice& serialized_op); + + OperandID id() const override; + + std::string serialize_params() const override; }; -class CreateOperand: public MergeOperand { - public: - std::string metadata; - CreateOperand(const std::string& metadata); +class DecreaseSizeOperand : public MergeOperand { +public: + size_t size; + + explicit DecreaseSizeOperand(size_t size); - OperandID id() const override; - std::string serialize_params() const override; + explicit DecreaseSizeOperand(const rdb::Slice& serialized_op); + + OperandID id() const override; + + std::string serialize_params() const override; }; -class MetadataMergeOperator: public rocksdb::MergeOperator { - public: - virtual ~MetadataMergeOperator(){}; - bool FullMergeV2(const MergeOperationInput& merge_in, - MergeOperationOutput* merge_out) const override; +class CreateOperand : public MergeOperand { +public: + std::string metadata; - bool PartialMergeMulti(const rdb::Slice& key, - const std::deque& operand_list, - std::string* new_value, rdb::Logger* logger) const override; + explicit CreateOperand(const std::string& metadata); - const char* Name() const override; + OperandID id() const override; - bool AllowSingleOperand() const override; + std::string serialize_params() const override; }; +class MetadataMergeOperator : public rocksdb::MergeOperator { +public: + ~MetadataMergeOperator() override = default; + + bool FullMergeV2(const MergeOperationInput& merge_in, + MergeOperationOutput* merge_out) const override; + + bool PartialMergeMulti(const rdb::Slice& key, + const std::deque& operand_list, + std::string* new_value, rdb::Logger* logger) const override; + + const char* Name() const override; + + bool AllowSingleOperand() const override; +}; #endif // DB_MERGE_HPP diff --git a/include/daemon/classes/fs_data.hpp b/include/daemon/classes/fs_data.hpp index 1d0c57bf0..d061b2017 100644 --- a/include/daemon/classes/fs_data.hpp +++ b/include/daemon/classes/fs_data.hpp @@ -19,6 +19,7 @@ /* Forward declarations */ class MetadataDB; + class ChunkStorage; #include @@ -30,13 +31,6 @@ class FsData { private: FsData() {} - // Caching - std::unordered_map hashmap_; - std::hash hashf_; - - // Later the blocksize will likely be coupled to the chunks to allow individually big chunk sizes. - blksize_t blocksize_; - //logger std::shared_ptr spdlogger_; @@ -72,18 +66,6 @@ public: // getter/setter - const std::unordered_map& hashmap() const; - - void hashmap(const std::unordered_map& hashmap_); - - const std::hash& hashf() const; - - void hashf(const std::hash& hashf_); - - blksize_t blocksize() const; - - void blocksize(blksize_t blocksize_); - const std::shared_ptr& spdlogger() const; void spdlogger(const std::shared_ptr& spdlogger_); @@ -140,5 +122,4 @@ public: }; - #endif //LFS_FS_DATA_H diff --git a/include/daemon/classes/rpc_data.hpp b/include/daemon/classes/rpc_data.hpp index b3d16115e..0c7453ecd 100644 --- a/include/daemon/classes/rpc_data.hpp +++ b/include/daemon/classes/rpc_data.hpp @@ -62,5 +62,4 @@ public: }; - #endif //LFS_RPC_DATA_HPP diff --git a/include/daemon/handler/rpc_defs.hpp b/include/daemon/handler/rpc_defs.hpp index e7a252dee..ceaeac173 100644 --- a/include/daemon/handler/rpc_defs.hpp +++ b/include/daemon/handler/rpc_defs.hpp @@ -40,7 +40,9 @@ DECLARE_MARGO_RPC_HANDLER(rpc_srv_update_metadentry_size) DECLARE_MARGO_RPC_HANDLER(rpc_srv_get_dirents) #ifdef HAS_SYMLINKS + DECLARE_MARGO_RPC_HANDLER(rpc_srv_mk_symlink) + #endif diff --git a/include/daemon/main.hpp b/include/daemon/main.hpp index 19f8ca206..4e5a628ed 100644 --- a/include/daemon/main.hpp +++ b/include/daemon/main.hpp @@ -26,6 +26,7 @@ extern "C" { #include #include } + #include #include @@ -33,14 +34,13 @@ extern "C" { #define RPC_DATA (static_cast(RPCData::getInstance())) void init_environment(); + void destroy_enviroment(); void init_io_tasklet_pool(); + void init_rpc_server(const std::string& protocol); void register_server_rpcs(margo_instance_id mid); -void populate_hosts_file(); -void destroy_hosts_file(); - #endif // GKFS_DAEMON_MAIN_HPP diff --git a/include/daemon/ops/metadentry.hpp b/include/daemon/ops/metadentry.hpp index 350713d52..d9a6cb0ac 100644 --- a/include/daemon/ops/metadentry.hpp +++ b/include/daemon/ops/metadentry.hpp @@ -18,8 +18,6 @@ #include #include -int create_node(const std::string& path, const uid_t uid, const gid_t gid, mode_t mode); - void create_metadentry(const std::string& path, Metadata& md); std::string get_metadentry_str(const std::string& path); diff --git a/include/daemon/util.hpp b/include/daemon/util.hpp index b12243da7..4aa142dc7 100644 --- a/include/daemon/util.hpp +++ b/include/daemon/util.hpp @@ -14,9 +14,6 @@ #ifndef GEKKOFS_DAEMON_UTIL_HPP #define GEKKOFS_DAEMON_UTIL_HPP -#include -#include - namespace gkfs { namespace util { void populate_hosts_file(); diff --git a/include/global/chunk_calc_util.hpp b/include/global/chunk_calc_util.hpp index 00b9df0f3..6121b9b1c 100644 --- a/include/global/chunk_calc_util.hpp +++ b/include/global/chunk_calc_util.hpp @@ -88,6 +88,7 @@ inline size_t chnk_rpad(const off64_t offset, const size_t chnk_size) { * chunk_id(0,4) = 0; */ inline uint64_t chnk_id_for_offset(const off64_t offset, const size_t chnk_size) { + // TODO This does not work for very large offsets: `offset / chnk_size` works return static_cast(chnk_lalign(offset, chnk_size) >> log2(chnk_size)); } diff --git a/include/global/log_util.hpp b/include/global/log_util.hpp index bd0d8e572..c2f81f49a 100644 --- a/include/global/log_util.hpp +++ b/include/global/log_util.hpp @@ -16,7 +16,6 @@ #include - spdlog::level::level_enum get_spdlog_level(std::string level_str); spdlog::level::level_enum get_spdlog_level(unsigned long level); @@ -24,5 +23,4 @@ spdlog::level::level_enum get_spdlog_level(unsigned long level); void setup_loggers(const std::vector& loggers, spdlog::level::level_enum level, const std::string& path); - #endif diff --git a/include/global/metadata.hpp b/include/global/metadata.hpp index 573d7ae0d..4d1f2ae99 100644 --- a/include/global/metadata.hpp +++ b/include/global/metadata.hpp @@ -16,7 +16,7 @@ #pragma once -#include "config.hpp" +#include #include #include #include @@ -40,38 +40,62 @@ private: public: - Metadata(); + Metadata() = default; + explicit Metadata(mode_t mode); + #ifdef HAS_SYMLINKS + Metadata(mode_t mode, const std::string& target_path); + #endif + // Construct from a binary representation of the object explicit Metadata(const std::string& binary_str); std::string serialize() const; void init_ACM_time(); + void update_ACM_time(bool a, bool c, bool m); //Getter and Setter time_t atime() const; + void atime(time_t atime_); + time_t mtime() const; + void mtime(time_t mtime_); + time_t ctime() const; + void ctime(time_t ctime_); + mode_t mode() const; + void mode(mode_t mode_); + nlink_t link_count() const; + void link_count(nlink_t link_count_); + size_t size() const; + void size(size_t size_); + blkcnt_t blocks() const; + void blocks(blkcnt_t blocks_); + #ifdef HAS_SYMLINKS + std::string target_path() const; + void target_path(const std::string& target_path); + bool is_link() const; + #endif }; diff --git a/include/global/path_util.hpp b/include/global/path_util.hpp index b12b01a40..bdcc42acf 100644 --- a/include/global/path_util.hpp +++ b/include/global/path_util.hpp @@ -21,13 +21,16 @@ constexpr unsigned int PATH_MAX_LEN = 4096; // 4k chars constexpr char PSP = '/'; // PATH SEPARATOR - bool is_relative_path(const std::string& path); + bool is_absolute_path(const std::string& path); + bool has_trailing_slash(const std::string& path); -std::string prepend_path(const std::string& path, const char * raw_path); +std::string prepend_path(const std::string& path, const char* raw_path); + std::string dirname(const std::string& path); + std::vector split_path(const std::string& path); #endif //GEKKOFS_PATH_UTIL_HPP diff --git a/include/global/rpc/distributor.hpp b/include/global/rpc/distributor.hpp index d09fbc164..2bced7ae7 100644 --- a/include/global/rpc/distributor.hpp +++ b/include/global/rpc/distributor.hpp @@ -18,42 +18,52 @@ #include #include - using ChunkID = unsigned int; using Host = unsigned int; class Distributor { - public: - virtual Host localhost() const = 0; - virtual Host locate_data(const std::string& path, const ChunkID& chnk_id) const = 0; - virtual Host locate_file_metadata(const std::string& path) const = 0; - virtual std::vector locate_directory_metadata(const std::string& path) const = 0; +public: + virtual Host localhost() const = 0; + + virtual Host locate_data(const std::string& path, const ChunkID& chnk_id) const = 0; + + virtual Host locate_file_metadata(const std::string& path) const = 0; + + virtual std::vector locate_directory_metadata(const std::string& path) const = 0; }; class SimpleHashDistributor : public Distributor { - private: - Host localhost_; - unsigned int hosts_size_; - std::vector all_hosts_; - std::hash str_hash; - public: - SimpleHashDistributor(Host localhost, unsigned int hosts_size); - Host localhost() const override; - Host locate_data(const std::string& path, const ChunkID& chnk_id) const override; - Host locate_file_metadata(const std::string& path) const override; - std::vector locate_directory_metadata(const std::string& path) const override; +private: + Host localhost_; + unsigned int hosts_size_; + std::vector all_hosts_; + std::hash str_hash; +public: + SimpleHashDistributor(Host localhost, unsigned int hosts_size); + + Host localhost() const override; + + Host locate_data(const std::string& path, const ChunkID& chnk_id) const override; + + Host locate_file_metadata(const std::string& path) const override; + + std::vector locate_directory_metadata(const std::string& path) const override; }; class LocalOnlyDistributor : public Distributor { - private: - Host localhost_; - public: - LocalOnlyDistributor(Host localhost); - Host localhost() const override; - Host locate_data(const std::string& path, const ChunkID& chnk_id) const override; - Host locate_file_metadata(const std::string& path) const override; - std::vector locate_directory_metadata(const std::string& path) const override; +private: + Host localhost_; +public: + explicit LocalOnlyDistributor(Host localhost); + + Host localhost() const override; + + Host locate_data(const std::string& path, const ChunkID& chnk_id) const override; + + Host locate_file_metadata(const std::string& path) const override; + + std::vector locate_directory_metadata(const std::string& path) const override; }; #endif //GEKKOFS_RPC_LOCATOR_HPP diff --git a/include/global/rpc/rpc_utils.hpp b/include/global/rpc/rpc_utils.hpp index 2a55721eb..14f487829 100644 --- a/include/global/rpc/rpc_utils.hpp +++ b/include/global/rpc/rpc_utils.hpp @@ -65,8 +65,6 @@ hg_bool_t bool_to_merc_bool(bool state); std::string get_my_hostname(bool short_hostname = false); -std::string get_host_by_name(const std::string & hostname); - -bool is_handle_sm(margo_instance_id mid, const hg_addr_t& addr); +std::string get_host_by_name(const std::string& hostname); #endif //GEKKOFS_RPC_UTILS_HPP diff --git a/src/client/gkfs_functions.cpp b/src/client/gkfs_functions.cpp index ed11b7484..548fe3287 100644 --- a/src/client/gkfs_functions.cpp +++ b/src/client/gkfs_functions.cpp @@ -11,37 +11,40 @@ SPDX-License-Identifier: MIT */ -#include -#include #include -#include #include -#include "client/preload_util.hpp" +#include #include #include #include #include #include +#include + +extern "C" { +#include +#include +} -#define __ALIGN_KERNEL_MASK(x, mask) (((x) + (mask)) & ~(mask)) -#define __ALIGN_KERNEL(x, a) __ALIGN_KERNEL_MASK(x, (typeof(x))(a) - 1) +#define __ALIGN_KERNEL_MASK(x, mask) (((x) + (mask)) & ~(mask)) +#define __ALIGN_KERNEL(x, a) __ALIGN_KERNEL_MASK(x, (typeof(x))(a) - 1) #define ALIGN(x, a) __ALIGN_KERNEL((x), (a)) struct linux_dirent { - unsigned long d_ino; - unsigned long d_off; - unsigned short d_reclen; - char d_name[1]; + unsigned long d_ino; + unsigned long d_off; + unsigned short d_reclen; + char d_name[1]; }; struct linux_dirent64 { - unsigned long long d_ino; - unsigned long long d_off; - unsigned short d_reclen; + unsigned long long d_ino; + unsigned long long d_off; + unsigned short d_reclen; unsigned char d_type; - char d_name[1]; + char d_name[1]; }; using namespace std; @@ -63,7 +66,7 @@ int gkfs_open(const std::string& path, mode_t mode, int flags) { bool exists = true; auto md = gkfs_metadata(path); if (!md) { - if(errno == ENOENT) { + if (errno == ENOENT) { exists = false; } else { LOG(ERROR, "Error while retriving stat to file"); @@ -72,7 +75,7 @@ int gkfs_open(const std::string& path, mode_t mode, int flags) { } if (!exists) { - if (! (flags & O_CREAT)) { + if (!(flags & O_CREAT)) { // file doesn't exists and O_CREAT was not set errno = ENOENT; return -1; @@ -81,7 +84,7 @@ int gkfs_open(const std::string& path, mode_t mode, int flags) { /*** CREATION ***/ assert(flags & O_CREAT); - if(flags & O_DIRECTORY){ + if (flags & O_DIRECTORY) { LOG(ERROR, "O_DIRECTORY use with O_CREAT. NOT SUPPORTED"); errno = ENOTSUP; return -1; @@ -95,7 +98,7 @@ int gkfs_open(const std::string& path, mode_t mode, int flags) { } else { /* File already exists */ - if(flags & O_EXCL) { + if (flags & O_EXCL) { // File exists and O_EXCL was set errno = EEXIST; return -1; @@ -112,7 +115,7 @@ int gkfs_open(const std::string& path, mode_t mode, int flags) { } #endif - if(S_ISDIR(md->mode())) { + if (S_ISDIR(md->mode())) { return gkfs_opendir(path); } @@ -120,7 +123,7 @@ int gkfs_open(const std::string& path, mode_t mode, int flags) { /*** Regular file exists ***/ assert(S_ISREG(md->mode())); - if( (flags & O_TRUNC) && ((flags & O_RDWR) || (flags & O_WRONLY)) ) { + if ((flags & O_TRUNC) && ((flags & O_RDWR) || (flags & O_WRONLY))) { if (gkfs_truncate(path, md->size(), 0)) { LOG(ERROR, "Error truncating file"); return -1; @@ -247,7 +250,7 @@ int gkfs_statfs(struct statfs* buf) { buf->f_namelen = PATH_MAX_LEN; buf->f_frsize = 0; buf->f_flags = - ST_NOATIME | ST_NODIRATIME | ST_NOSUID | ST_NODEV | ST_SYNCHRONOUS; + ST_NOATIME | ST_NODIRATIME | ST_NOSUID | ST_NODEV | ST_SYNCHRONOUS; return 0; } @@ -265,7 +268,7 @@ int gkfs_statvfs(struct statvfs* buf) { buf->f_namemax = PATH_MAX_LEN; buf->f_frsize = 0; buf->f_flag = - ST_NOATIME | ST_NODIRATIME | ST_NOSUID | ST_NODEV | ST_SYNCHRONOUS; + ST_NOATIME | ST_NODIRATIME | ST_NOSUID | ST_NODEV | ST_SYNCHRONOUS; return 0; } @@ -322,7 +325,7 @@ int gkfs_truncate(const std::string& path, off_t old_size, off_t new_size) { return -1; } - if(rpc_send::trunc_data(path, old_size, new_size)){ + if (rpc_send::trunc_data(path, old_size, new_size)) { LOG(DEBUG, "Failed to truncate data"); return -1; } @@ -349,7 +352,7 @@ int gkfs_truncate(const std::string& path, off_t length) { return -1; } auto size = md->size(); - if(static_cast(length) > size) { + if (static_cast(length) > size) { LOG(DEBUG, "Length is greater then file size: {} > {}", length, size); errno = EINVAL; return -1; @@ -423,7 +426,7 @@ ssize_t gkfs_pwritev(int fd, const struct iovec* iov, int iovcnt, off_t offset) if (count == 0) { continue; } - auto buf = (iov+i)->iov_base; + auto buf = (iov + i)->iov_base; ret = gkfs_pwrite(file, reinterpret_cast(buf), count, pos); if (ret == -1) { break; @@ -523,7 +526,7 @@ int gkfs_rmdir(const std::string& path) { auto open_dir = std::make_shared(path); rpc_send::get_dirents(*open_dir); - if(open_dir->size() != 0){ + if (open_dir->size() != 0) { errno = ENOTEMPTY; return -1; } @@ -532,11 +535,11 @@ int gkfs_rmdir(const std::string& path) { int getdents(unsigned int fd, - struct linux_dirent *dirp, + struct linux_dirent* dirp, unsigned int count) { auto open_dir = CTX->file_map()->get_dir(fd); - if(open_dir == nullptr){ + if (open_dir == nullptr) { //Cast did not succeeded: open_file is a regular file errno = EBADF; return -1; @@ -548,24 +551,25 @@ int getdents(unsigned int fd, } unsigned int written = 0; - struct linux_dirent * current_dirp = nullptr; - while(pos < open_dir->size()) { + struct linux_dirent* current_dirp = nullptr; + while (pos < open_dir->size()) { DirEntry de = open_dir->getdent(pos); - auto total_size = ALIGN(offsetof(struct linux_dirent, d_name) + - de.name().size() + 3, sizeof(long)); + auto total_size = ALIGN(offsetof( + struct linux_dirent, d_name) + + de.name().size() + 3, sizeof(long)); if (total_size > (count - written)) { //no enough space left on user buffer to insert next dirent break; } - current_dirp = reinterpret_cast( - reinterpret_cast(dirp) + written); + current_dirp = reinterpret_cast( + reinterpret_cast(dirp) + written); current_dirp->d_ino = std::hash()( open_dir->path() + "/" + de.name()); current_dirp->d_reclen = total_size; *(reinterpret_cast(current_dirp) + total_size - 1) = - ((de.type() == FileType::regular)? DT_REG : DT_DIR); + ((de.type() == FileType::regular) ? DT_REG : DT_DIR); LOG(DEBUG, "name {}: {}", pos, de.name()); std::strcpy(&(current_dirp->d_name[0]), de.name().c_str()); @@ -584,11 +588,11 @@ int getdents(unsigned int fd, int getdents64(unsigned int fd, - struct linux_dirent64 *dirp, - unsigned int count) { + struct linux_dirent64* dirp, + unsigned int count) { auto open_dir = CTX->file_map()->get_dir(fd); - if(open_dir == nullptr){ + if (open_dir == nullptr) { //Cast did not succeeded: open_file is a regular file errno = EBADF; return -1; @@ -600,22 +604,23 @@ int getdents64(unsigned int fd, } unsigned int written = 0; - struct linux_dirent64 * current_dirp = nullptr; - while(pos < open_dir->size()) { + struct linux_dirent64* current_dirp = nullptr; + while (pos < open_dir->size()) { DirEntry de = open_dir->getdent(pos); - auto total_size = ALIGN(offsetof(struct linux_dirent64, d_name) + - de.name().size() + 3, sizeof(long)); + auto total_size = ALIGN(offsetof( + struct linux_dirent64, d_name) + + de.name().size() + 3, sizeof(long)); if (total_size > (count - written)) { //no enough space left on user buffer to insert next dirent break; } - current_dirp = reinterpret_cast( - reinterpret_cast(dirp) + written); + current_dirp = reinterpret_cast( + reinterpret_cast(dirp) + written); current_dirp->d_ino = std::hash()( open_dir->path() + "/" + de.name()); current_dirp->d_reclen = total_size; - current_dirp->d_type = ((de.type() == FileType::regular)? DT_REG : DT_DIR); + current_dirp->d_type = ((de.type() == FileType::regular) ? DT_REG : DT_DIR); LOG(DEBUG, "name {}: {}", pos, de.name()); std::strcpy(&(current_dirp->d_name[0]), de.name().c_str()); diff --git a/src/client/hooks.cpp b/src/client/hooks.cpp index 04c14f2fa..04bcbdf0c 100644 --- a/src/client/hooks.cpp +++ b/src/client/hooks.cpp @@ -11,33 +11,36 @@ SPDX-License-Identifier: MIT */ -#include "client/hooks.hpp" -#include "client/preload.hpp" -#include "client/logging.hpp" +#include +#include +#include +#include +#include +#include -#include "client/gkfs_functions.hpp" -#include "client/resolve.hpp" -#include "client/open_dir.hpp" -#include "global/path_util.hpp" +#include +#include + +extern "C" { #include #include #include -#include +} static inline int with_errno(int ret) { - return (ret < 0)? -errno : ret; + return (ret < 0) ? -errno : ret; } -int hook_openat(int dirfd, const char *cpath, int flags, mode_t mode) { +int hook_openat(int dirfd, const char* cpath, int flags, mode_t mode) { LOG(DEBUG, "{}() called with fd: {}, path: \"{}\", flags: {}, mode: {}", __func__, dirfd, cpath, flags, mode); std::string resolved; auto rstatus = CTX->relativize_fd_path(dirfd, cpath, resolved); - switch(rstatus) { + switch (rstatus) { case RelativizeStatus::fd_unknown: return syscall_no_intercept(SYS_openat, dirfd, cpath, flags, mode); @@ -60,13 +63,13 @@ int hook_close(int fd) { LOG(DEBUG, "{}() called with fd: {}", __func__, fd); - if(CTX->file_map()->exist(fd)) { + if (CTX->file_map()->exist(fd)) { // No call to the daemon is required CTX->file_map()->remove(fd); return 0; } - if(CTX->is_internal_fd(fd)) { + if (CTX->is_internal_fd(fd)) { // the client application (for some reason) is trying to close an // internal fd: ignore it return 0; @@ -77,7 +80,7 @@ int hook_close(int fd) { int hook_stat(const char* path, struct stat* buf) { - LOG(DEBUG, "{}() called with path: \"{}\", buf: {}", + LOG(DEBUG, "{}() called with path: \"{}\", buf: {}", __func__, path, fmt::ptr(buf)); std::string rel_path; @@ -89,7 +92,7 @@ int hook_stat(const char* path, struct stat* buf) { int hook_lstat(const char* path, struct stat* buf) { - LOG(DEBUG, "{}() called with path: \"{}\", buf: {}", + LOG(DEBUG, "{}() called with path: \"{}\", buf: {}", __func__, path, fmt::ptr(buf)); std::string rel_path; @@ -111,19 +114,19 @@ int hook_fstat(unsigned int fd, struct stat* buf) { return syscall_no_intercept(SYS_fstat, fd, buf); } -int hook_fstatat(int dirfd, const char * cpath, struct stat * buf, int flags) { +int hook_fstatat(int dirfd, const char* cpath, struct stat* buf, int flags) { LOG(DEBUG, "{}() called with path: \"{}\", fd: {}, buf: {}, flags: {}", __func__, cpath, dirfd, fmt::ptr(buf), flags); - if(flags & AT_EMPTY_PATH) { + if (flags & AT_EMPTY_PATH) { LOG(ERROR, "{}() AT_EMPTY_PATH flag not supported", __func__); return -ENOTSUP; } std::string resolved; auto rstatus = CTX->relativize_fd_path(dirfd, cpath, resolved); - switch(rstatus) { + switch (rstatus) { case RelativizeStatus::fd_unknown: return syscall_no_intercept(SYS_newfstatat, dirfd, cpath, buf, flags); @@ -144,7 +147,7 @@ int hook_fstatat(int dirfd, const char * cpath, struct stat * buf, int flags) { int hook_read(unsigned int fd, void* buf, size_t count) { - LOG(DEBUG, "{}() called with fd: {}, buf: {} count: {}", + LOG(DEBUG, "{}() called with fd: {}, buf: {} count: {}", __func__, fd, fmt::ptr(buf), count); if (CTX->file_map()->exist(fd)) { @@ -153,7 +156,7 @@ int hook_read(unsigned int fd, void* buf, size_t count) { return syscall_no_intercept(SYS_read, fd, buf, count); } -int hook_pread(unsigned int fd, char * buf, size_t count, loff_t pos) { +int hook_pread(unsigned int fd, char* buf, size_t count, loff_t pos) { LOG(DEBUG, "{}() called with fd: {}, buf: {}, count: {}, pos: {}", __func__, fd, fmt::ptr(buf), count, pos); @@ -165,9 +168,9 @@ int hook_pread(unsigned int fd, char * buf, size_t count, loff_t pos) { return syscall_no_intercept(SYS_pread64, fd, buf, count, pos); } -int hook_write(unsigned int fd, const char * buf, size_t count) { +int hook_write(unsigned int fd, const char* buf, size_t count) { - LOG(DEBUG, "{}() called with fd: {}, buf: {}, count {}", + LOG(DEBUG, "{}() called with fd: {}, buf: {}, count {}", __func__, fd, fmt::ptr(buf), count); if (CTX->file_map()->exist(fd)) { @@ -176,7 +179,7 @@ int hook_write(unsigned int fd, const char * buf, size_t count) { return syscall_no_intercept(SYS_write, fd, buf, count); } -int hook_pwrite(unsigned int fd, const char * buf, size_t count, loff_t pos) { +int hook_pwrite(unsigned int fd, const char* buf, size_t count, loff_t pos) { LOG(DEBUG, "{}() called with fd: {}, buf: {}, count: {}, pos: {}", __func__, fd, fmt::ptr(buf), count, pos); @@ -188,9 +191,9 @@ int hook_pwrite(unsigned int fd, const char * buf, size_t count, loff_t pos) { return syscall_no_intercept(SYS_pwrite64, fd, buf, count, pos); } -int hook_writev(unsigned long fd, const struct iovec * iov, unsigned long iovcnt) { +int hook_writev(unsigned long fd, const struct iovec* iov, unsigned long iovcnt) { - LOG(DEBUG, "{}() called with fd: {}, iov: {}, iovcnt: {}", + LOG(DEBUG, "{}() called with fd: {}, iov: {}, iovcnt: {}", __func__, fd, fmt::ptr(iov), iovcnt); if (CTX->file_map()->exist(fd)) { @@ -199,11 +202,11 @@ int hook_writev(unsigned long fd, const struct iovec * iov, unsigned long iovcnt return syscall_no_intercept(SYS_writev, fd, iov, iovcnt); } -int hook_pwritev(unsigned long fd, const struct iovec * iov, unsigned long iovcnt, +int hook_pwritev(unsigned long fd, const struct iovec* iov, unsigned long iovcnt, unsigned long pos_l, unsigned long pos_h) { LOG(DEBUG, "{}() called with fd: {}, iov: {}, iovcnt: {}, " - "pos_l: {}," "pos_h: {}", + "pos_l: {}," "pos_h: {}", __func__, fd, fmt::ptr(iov), iovcnt, pos_l, pos_h); if (CTX->file_map()->exist(fd)) { @@ -213,7 +216,7 @@ int hook_pwritev(unsigned long fd, const struct iovec * iov, unsigned long iovcn return syscall_no_intercept(SYS_pwritev, fd, iov, iovcnt); } -int hook_unlinkat(int dirfd, const char * cpath, int flags) { +int hook_unlinkat(int dirfd, const char* cpath, int flags) { LOG(DEBUG, "{}() called with dirfd: {}, path: \"{}\", flags: {}", __func__, dirfd, cpath, flags); @@ -225,7 +228,7 @@ int hook_unlinkat(int dirfd, const char * cpath, int flags) { std::string resolved; auto rstatus = CTX->relativize_fd_path(dirfd, cpath, resolved, false); - switch(rstatus) { + switch (rstatus) { case RelativizeStatus::fd_unknown: return syscall_no_intercept(SYS_unlinkat, dirfd, cpath, flags); @@ -236,7 +239,7 @@ int hook_unlinkat(int dirfd, const char * cpath, int flags) { return -ENOTDIR; case RelativizeStatus::internal: - if(flags & AT_REMOVEDIR) { + if (flags & AT_REMOVEDIR) { return with_errno(gkfs_rmdir(resolved)); } else { return with_errno(gkfs_rm_node(resolved)); @@ -248,7 +251,7 @@ int hook_unlinkat(int dirfd, const char * cpath, int flags) { } } -int hook_symlinkat(const char * oldname, int newdfd, const char * newname) { +int hook_symlinkat(const char* oldname, int newdfd, const char* newname) { LOG(DEBUG, "{}() called with oldname: \"{}\", newfd: {}, newname: \"{}\"", __func__, oldname, newdfd, newname); @@ -261,7 +264,7 @@ int hook_symlinkat(const char * oldname, int newdfd, const char * newname) { std::string newname_resolved; auto rstatus = CTX->relativize_fd_path(newdfd, newname, newname_resolved, false); - switch(rstatus) { + switch (rstatus) { case RelativizeStatus::fd_unknown: return syscall_no_intercept(SYS_symlinkat, oldname, newdfd, newname); @@ -284,13 +287,13 @@ int hook_symlinkat(const char * oldname, int newdfd, const char * newname) { int hook_access(const char* path, int mask) { - LOG(DEBUG, "{}() called path: \"{}\", mask: {}", + LOG(DEBUG, "{}() called path: \"{}\", mask: {}", __func__, path, mask); std::string rel_path; if (CTX->relativize_path(path, rel_path)) { auto ret = gkfs_access(rel_path, mask); - if(ret < 0) { + if (ret < 0) { return -errno; } return ret; @@ -298,14 +301,14 @@ int hook_access(const char* path, int mask) { return syscall_no_intercept(SYS_access, rel_path.c_str(), mask); } -int hook_faccessat(int dirfd, const char * cpath, int mode) { +int hook_faccessat(int dirfd, const char* cpath, int mode) { LOG(DEBUG, "{}() called with dirfd: {}, path: \"{}\", mode: {}", __func__, dirfd, cpath, mode); std::string resolved; auto rstatus = CTX->relativize_fd_path(dirfd, cpath, resolved); - switch(rstatus) { + switch (rstatus) { case RelativizeStatus::fd_unknown: return syscall_no_intercept(SYS_faccessat, dirfd, cpath, mode); @@ -326,25 +329,25 @@ int hook_faccessat(int dirfd, const char * cpath, int mode) { off_t hook_lseek(unsigned int fd, off_t offset, unsigned int whence) { - LOG(DEBUG, "{}() called with fd: {}, offset: {}, whence: {}", + LOG(DEBUG, "{}() called with fd: {}, offset: {}, whence: {}", __func__, fd, offset, whence); if (CTX->file_map()->exist(fd)) { auto off_ret = gkfs_lseek(fd, static_cast(offset), whence); if (off_ret > std::numeric_limits::max()) { return -EOVERFLOW; - } else if(off_ret < 0) { + } else if (off_ret < 0) { return -errno; } LOG(DEBUG, "{}() returning {}", __func__, off_ret); return off_ret; } - return syscall_no_intercept(SYS_lseek, fd, offset, whence); + return syscall_no_intercept(SYS_lseek, fd, offset, whence); } int hook_truncate(const char* path, long length) { - LOG(DEBUG, "{}() called with path: {}, offset: {}", + LOG(DEBUG, "{}() called with path: {}, offset: {}", __func__, path, length); std::string rel_path; @@ -356,7 +359,7 @@ int hook_truncate(const char* path, long length) { int hook_ftruncate(unsigned int fd, unsigned long length) { - LOG(DEBUG, "{}() called with fd: {}, offset: {}", + LOG(DEBUG, "{}() called with fd: {}, offset: {}", __func__, fd, length); if (CTX->file_map()->exist(fd)) { @@ -368,7 +371,7 @@ int hook_ftruncate(unsigned int fd, unsigned long length) { int hook_dup(unsigned int fd) { - LOG(DEBUG, "{}() called with oldfd: {}", + LOG(DEBUG, "{}() called with oldfd: {}", __func__, fd); if (CTX->file_map()->exist(fd)) { @@ -379,7 +382,7 @@ int hook_dup(unsigned int fd) { int hook_dup2(unsigned int oldfd, unsigned int newfd) { - LOG(DEBUG, "{}() called with oldfd: {}, newfd: {}", + LOG(DEBUG, "{}() called with oldfd: {}, newfd: {}", __func__, oldfd, newfd); if (CTX->file_map()->exist(oldfd)) { @@ -390,7 +393,7 @@ int hook_dup2(unsigned int oldfd, unsigned int newfd) { int hook_dup3(unsigned int oldfd, unsigned int newfd, int flags) { - LOG(DEBUG, "{}() called with oldfd: {}, newfd: {}, flags: {}", + LOG(DEBUG, "{}() called with oldfd: {}, newfd: {}, flags: {}", __func__, oldfd, newfd, flags); if (CTX->file_map()->exist(oldfd)) { @@ -402,9 +405,9 @@ int hook_dup3(unsigned int oldfd, unsigned int newfd, int flags) { return syscall_no_intercept(SYS_dup3, oldfd, newfd, flags); } -int hook_getdents(unsigned int fd, struct linux_dirent *dirp, unsigned int count) { +int hook_getdents(unsigned int fd, struct linux_dirent* dirp, unsigned int count) { - LOG(DEBUG, "{}() called with fd: {}, dirp: {}, count: {}", + LOG(DEBUG, "{}() called with fd: {}, dirp: {}, count: {}", __func__, fd, fmt::ptr(dirp), count); if (CTX->file_map()->exist(fd)) { @@ -414,9 +417,9 @@ int hook_getdents(unsigned int fd, struct linux_dirent *dirp, unsigned int count } -int hook_getdents64(unsigned int fd, struct linux_dirent64 *dirp, unsigned int count) { +int hook_getdents64(unsigned int fd, struct linux_dirent64* dirp, unsigned int count) { - LOG(DEBUG, "{}() called with fd: {}, dirp: {}, count: {}", + LOG(DEBUG, "{}() called with fd: {}, dirp: {}, count: {}", __func__, fd, fmt::ptr(dirp), count); if (CTX->file_map()->exist(fd)) { @@ -426,14 +429,14 @@ int hook_getdents64(unsigned int fd, struct linux_dirent64 *dirp, unsigned int c } -int hook_mkdirat(int dirfd, const char * cpath, mode_t mode) { +int hook_mkdirat(int dirfd, const char* cpath, mode_t mode) { LOG(DEBUG, "{}() called with dirfd: {}, path: \"{}\", mode: {}", __func__, dirfd, cpath, mode); std::string resolved; auto rstatus = CTX->relativize_fd_path(dirfd, cpath, resolved); - switch(rstatus) { + switch (rstatus) { case RelativizeStatus::external: return syscall_no_intercept(SYS_mkdirat, dirfd, resolved.c_str(), mode); @@ -452,14 +455,14 @@ int hook_mkdirat(int dirfd, const char * cpath, mode_t mode) { } } -int hook_fchmodat(int dirfd, const char * cpath, mode_t mode) { +int hook_fchmodat(int dirfd, const char* cpath, mode_t mode) { - LOG(DEBUG, "{}() called dirfd: {}, path: \"{}\", mode: {}", + LOG(DEBUG, "{}() called dirfd: {}, path: \"{}\", mode: {}", __func__, dirfd, cpath, mode); std::string resolved; auto rstatus = CTX->relativize_fd_path(dirfd, cpath, resolved); - switch(rstatus) { + switch (rstatus) { case RelativizeStatus::fd_unknown: return syscall_no_intercept(SYS_fchmodat, dirfd, cpath, mode); @@ -481,7 +484,7 @@ int hook_fchmodat(int dirfd, const char * cpath, mode_t mode) { int hook_fchmod(unsigned int fd, mode_t mode) { - LOG(DEBUG, "{}() called with fd: {}, mode: {}", + LOG(DEBUG, "{}() called with fd: {}, mode: {}", __func__, fd, mode); if (CTX->file_map()->exist(fd)) { @@ -491,9 +494,9 @@ int hook_fchmod(unsigned int fd, mode_t mode) { return syscall_no_intercept(SYS_fchmod, fd, mode); } -int hook_chdir(const char * path) { +int hook_chdir(const char* path) { - LOG(DEBUG, "{}() called with path: \"{}\"", + LOG(DEBUG, "{}() called with path: \"{}\"", __func__, path); std::string rel_path; @@ -505,7 +508,7 @@ int hook_chdir(const char * path) { LOG(ERROR, "{}() path does not exists", __func__); return -ENOENT; } - if(!S_ISDIR(md->mode())) { + if (!S_ISDIR(md->mode())) { LOG(ERROR, "{}() path is not a directory", __func__); return -ENOTDIR; } @@ -527,7 +530,7 @@ int hook_chdir(const char * path) { int hook_fchdir(unsigned int fd) { - LOG(DEBUG, "{}() called with fd: {}", + LOG(DEBUG, "{}() called with fd: {}", __func__, fd); if (CTX->file_map()->exist(fd)) { @@ -535,7 +538,7 @@ int hook_fchdir(unsigned int fd) { if (open_dir == nullptr) { //Cast did not succeeded: open_file is a regular file LOG(ERROR, "{}() file descriptor refers to a normal file: '{}'", - __func__, open_dir->path()); + __func__, open_dir->path()); return -EBADF; } @@ -562,12 +565,12 @@ int hook_fchdir(unsigned int fd) { return 0; } -int hook_getcwd(char * buf, unsigned long size) { +int hook_getcwd(char* buf, unsigned long size) { - LOG(DEBUG, "{}() called with buf: {}, size: {}", + LOG(DEBUG, "{}() called with buf: {}, size: {}", __func__, fmt::ptr(buf), size); - if(CTX->cwd().size() + 1 > size) { + if (CTX->cwd().size() + 1 > size) { LOG(ERROR, "{}() buffer too small to host current working dir", __func__); return -ERANGE; } @@ -576,14 +579,14 @@ int hook_getcwd(char * buf, unsigned long size) { return (CTX->cwd().size() + 1); } -int hook_readlinkat(int dirfd, const char * cpath, char * buf, int bufsiz) { +int hook_readlinkat(int dirfd, const char* cpath, char* buf, int bufsiz) { LOG(DEBUG, "{}() called with dirfd: {}, path \"{}\", buf: {}, bufsize: {}", __func__, dirfd, cpath, fmt::ptr(buf), bufsiz); std::string resolved; auto rstatus = CTX->relativize_fd_path(dirfd, cpath, resolved, false); - switch(rstatus) { + switch (rstatus) { case RelativizeStatus::fd_unknown: return syscall_no_intercept(SYS_readlinkat, dirfd, cpath, buf, bufsiz); @@ -605,7 +608,7 @@ int hook_readlinkat(int dirfd, const char * cpath, char * buf, int bufsiz) { int hook_fcntl(unsigned int fd, unsigned int cmd, unsigned long arg) { - LOG(DEBUG, "{}() called with fd: {}, cmd: {}, arg: {}", + LOG(DEBUG, "{}() called with fd: {}, cmd: {}, arg: {}", __func__, fd, cmd, arg); if (!CTX->file_map()->exist(fd)) { @@ -621,7 +624,7 @@ int hook_fcntl(unsigned int fd, unsigned int cmd, unsigned long arg) { case F_DUPFD_CLOEXEC: LOG(DEBUG, "{}() F_DUPFD_CLOEXEC on fd {}", __func__, fd); ret = gkfs_dup(fd); - if(ret == -1) { + if (ret == -1) { return -errno; } CTX->file_map()->get(fd)->set_flag(OpenFile_flags::cloexec, true); @@ -629,7 +632,7 @@ int hook_fcntl(unsigned int fd, unsigned int cmd, unsigned long arg) { case F_GETFD: LOG(DEBUG, "{}() F_GETFD on fd {}", __func__, fd); - if(CTX->file_map()->get(fd) + if (CTX->file_map()->get(fd) ->get_flag(OpenFile_flags::cloexec)) { return FD_CLOEXEC; } @@ -638,15 +641,15 @@ int hook_fcntl(unsigned int fd, unsigned int cmd, unsigned long arg) { case F_GETFL: LOG(DEBUG, "{}() F_GETFL on fd {}", __func__, fd); ret = 0; - if(CTX->file_map()->get(fd) + if (CTX->file_map()->get(fd) ->get_flag(OpenFile_flags::rdonly)) { ret |= O_RDONLY; } - if(CTX->file_map()->get(fd) + if (CTX->file_map()->get(fd) ->get_flag(OpenFile_flags::wronly)) { ret |= O_WRONLY; } - if(CTX->file_map()->get(fd) + if (CTX->file_map()->get(fd) ->get_flag(OpenFile_flags::rdwr)) { ret |= O_RDWR; } @@ -656,29 +659,29 @@ int hook_fcntl(unsigned int fd, unsigned int cmd, unsigned long arg) { LOG(DEBUG, "{}() [fd: {}, cmd: F_SETFD, FD_CLOEXEC: {}]", __func__, fd, (arg & FD_CLOEXEC)); CTX->file_map()->get(fd) - ->set_flag(OpenFile_flags::cloexec, (arg & FD_CLOEXEC)); + ->set_flag(OpenFile_flags::cloexec, (arg & FD_CLOEXEC)); return 0; default: LOG(ERROR, "{}() unrecognized command {} on fd {}", - __func__, cmd, fd); + __func__, cmd, fd); return -ENOTSUP; } } -int hook_renameat(int olddfd, const char * oldname, - int newdfd, const char * newname, +int hook_renameat(int olddfd, const char* oldname, + int newdfd, const char* newname, unsigned int flags) { LOG(DEBUG, "{}() called with olddfd: {}, oldname: \"{}\", newfd: {}, " - "newname \"{}\", flags {}", + "newname \"{}\", flags {}", __func__, olddfd, oldname, newdfd, newname, flags); - const char * oldpath_pass; + const char* oldpath_pass; std::string oldpath_resolved; auto oldpath_status = CTX->relativize_fd_path(olddfd, oldname, oldpath_resolved); - switch(oldpath_status) { + switch (oldpath_status) { case RelativizeStatus::fd_unknown: oldpath_pass = oldname; break; @@ -699,10 +702,10 @@ int hook_renameat(int olddfd, const char * oldname, return -EINVAL; } - const char * newpath_pass; + const char* newpath_pass; std::string newpath_resolved; auto newpath_status = CTX->relativize_fd_path(newdfd, newname, newpath_resolved); - switch(newpath_status) { + switch (newpath_status) { case RelativizeStatus::fd_unknown: newpath_pass = newname; break; @@ -723,12 +726,12 @@ int hook_renameat(int olddfd, const char * oldname, return -EINVAL; } - return syscall_no_intercept(SYS_renameat2, olddfd, oldpath_pass, newdfd, newpath_pass, flags); + return syscall_no_intercept(SYS_renameat2, olddfd, oldpath_pass, newdfd, newpath_pass, flags); } -int hook_statfs(const char * path, struct statfs * buf) { +int hook_statfs(const char* path, struct statfs* buf) { - LOG(DEBUG, "{}() called with path: \"{}\", buf: {}", + LOG(DEBUG, "{}() called with path: \"{}\", buf: {}", __func__, path, fmt::ptr(buf)); std::string rel_path; @@ -738,9 +741,9 @@ int hook_statfs(const char * path, struct statfs * buf) { return syscall_no_intercept(SYS_statfs, rel_path.c_str(), buf); } -int hook_fstatfs(unsigned int fd, struct statfs * buf) { +int hook_fstatfs(unsigned int fd, struct statfs* buf) { - LOG(DEBUG, "{}() called with fd: {}, buf: {}", + LOG(DEBUG, "{}() called with fd: {}, buf: {}", __func__, fd, fmt::ptr(buf)); if (CTX->file_map()->exist(fd)) { diff --git a/src/client/intercept.cpp b/src/client/intercept.cpp index ade9a19e8..0d1d13ab2 100644 --- a/src/client/intercept.cpp +++ b/src/client/intercept.cpp @@ -11,22 +11,23 @@ SPDX-License-Identifier: MIT */ -#include "client/intercept.hpp" -#include "client/preload.hpp" -#include "client/hooks.hpp" - +#include +#include +#include #include -#include -#include -#include #include + +#include + +extern "C" { +#include +#include #include #include - #include - #include +} static thread_local bool reentrance_guard_flag; static thread_local gkfs::syscall::info saved_syscall_info; @@ -58,30 +59,30 @@ get_current_syscall_info() { * We forward syscalls to the kernel but we keep track of any syscalls that may * create or destroy a file descriptor so that we can mark them as 'internal'. */ -static inline int +static inline int hook_internal(long syscall_number, long arg0, long arg1, long arg2, long arg3, long arg4, long arg5, - long *result) { + long* result) { #if defined(GKFS_ENABLE_LOGGING) && defined(GKFS_DEBUG_BUILD) - const long args[gkfs::syscall::MAX_ARGS] = { - arg0, arg1, arg2, arg3, arg4, arg5 - }; + const long args[gkfs::syscall::MAX_ARGS] = { + arg0, arg1, arg2, arg3, arg4, arg5 + }; #endif LOG(SYSCALL, gkfs::syscall::from_internal_code | gkfs::syscall::to_hook | - gkfs::syscall::not_executed, syscall_number, args); + gkfs::syscall::not_executed, syscall_number, args); switch (syscall_number) { case SYS_open: - *result = syscall_no_intercept(syscall_number, - reinterpret_cast(arg0), - static_cast(arg1), - static_cast(arg2)); + *result = syscall_no_intercept(syscall_number, + reinterpret_cast(arg0), + static_cast(arg1), + static_cast(arg2)); - if(*result >= 0) { + if (*result >= 0) { *result = CTX->register_internal_fd(*result); } @@ -89,11 +90,11 @@ hook_internal(long syscall_number, case SYS_creat: *result = syscall_no_intercept(syscall_number, - reinterpret_cast(arg0), - O_WRONLY | O_CREAT | O_TRUNC, - static_cast(arg1)); + reinterpret_cast(arg0), + O_WRONLY | O_CREAT | O_TRUNC, + static_cast(arg1)); - if(*result >= 0) { + if (*result >= 0) { *result = CTX->register_internal_fd(*result); } @@ -101,12 +102,12 @@ hook_internal(long syscall_number, case SYS_openat: *result = syscall_no_intercept(syscall_number, - static_cast(arg0), - reinterpret_cast(arg1), - static_cast(arg2), - static_cast(arg3)); + static_cast(arg0), + reinterpret_cast(arg1), + static_cast(arg2), + static_cast(arg3)); - if(*result >= 0) { + if (*result >= 0) { *result = CTX->register_internal_fd(*result); } @@ -114,9 +115,9 @@ hook_internal(long syscall_number, case SYS_epoll_create: *result = syscall_no_intercept(syscall_number, - static_cast(arg0)); + static_cast(arg0)); - if(*result >= 0) { + if (*result >= 0) { *result = CTX->register_internal_fd(*result); } @@ -124,9 +125,9 @@ hook_internal(long syscall_number, case SYS_epoll_create1: *result = syscall_no_intercept(syscall_number, - static_cast(arg0)); + static_cast(arg0)); - if(*result >= 0) { + if (*result >= 0) { *result = CTX->register_internal_fd(*result); } @@ -134,9 +135,9 @@ hook_internal(long syscall_number, case SYS_dup: *result = syscall_no_intercept(syscall_number, - static_cast(arg0)); + static_cast(arg0)); - if(*result >= 0) { + if (*result >= 0) { *result = CTX->register_internal_fd(*result); } @@ -144,10 +145,10 @@ hook_internal(long syscall_number, case SYS_dup2: *result = syscall_no_intercept(syscall_number, - static_cast(arg0), - static_cast(arg1)); + static_cast(arg0), + static_cast(arg1)); - if(*result >= 0) { + if (*result >= 0) { *result = CTX->register_internal_fd(*result); } @@ -155,11 +156,11 @@ hook_internal(long syscall_number, case SYS_dup3: *result = syscall_no_intercept(syscall_number, - static_cast(arg0), - static_cast(arg1), - static_cast(arg2)); + static_cast(arg0), + static_cast(arg1), + static_cast(arg2)); - if(*result >= 0) { + if (*result >= 0) { *result = CTX->register_internal_fd(*result); } @@ -168,7 +169,7 @@ hook_internal(long syscall_number, case SYS_inotify_init: *result = syscall_no_intercept(syscall_number); - if(*result >= 0) { + if (*result >= 0) { *result = CTX->register_internal_fd(*result); } @@ -176,9 +177,9 @@ hook_internal(long syscall_number, case SYS_inotify_init1: *result = syscall_no_intercept(syscall_number, - static_cast(arg0)); + static_cast(arg0)); - if(*result >= 0) { + if (*result >= 0) { *result = CTX->register_internal_fd(*result); } @@ -186,44 +187,44 @@ hook_internal(long syscall_number, case SYS_perf_event_open: *result = syscall_no_intercept(syscall_number, - reinterpret_cast(arg0), - static_cast(arg1), - static_cast(arg2), - static_cast(arg3), - static_cast(arg4)); + reinterpret_cast(arg0), + static_cast(arg1), + static_cast(arg2), + static_cast(arg3), + static_cast(arg4)); - if(*result >= 0) { + if (*result >= 0) { *result = CTX->register_internal_fd(*result); } break; case SYS_signalfd: *result = syscall_no_intercept(syscall_number, - static_cast(arg0), - reinterpret_cast(arg1)); + static_cast(arg0), + reinterpret_cast(arg1)); - if(*result >= 0) { + if (*result >= 0) { *result = CTX->register_internal_fd(*result); } break; case SYS_signalfd4: *result = syscall_no_intercept(syscall_number, - static_cast(arg0), - reinterpret_cast(arg1), - static_cast(arg2)); + static_cast(arg0), + reinterpret_cast(arg1), + static_cast(arg2)); - if(*result >= 0) { + if (*result >= 0) { *result = CTX->register_internal_fd(*result); } break; case SYS_timerfd_create: *result = syscall_no_intercept(syscall_number, - static_cast(arg0), - static_cast(arg1)); + static_cast(arg0), + static_cast(arg1)); - if(*result >= 0) { + if (*result >= 0) { *result = CTX->register_internal_fd(*result); } break; @@ -235,7 +236,7 @@ hook_internal(long syscall_number, static_cast(arg1), static_cast(arg2)); - if(*result >= 0) { + if (*result >= 0) { *result = CTX->register_internal_fd(*result); } break; @@ -248,11 +249,11 @@ hook_internal(long syscall_number, static_cast(arg2), reinterpret_cast(arg3)); - if(*result >= 0) { - reinterpret_cast(arg3)[0] = - CTX->register_internal_fd(reinterpret_cast(arg3)[0]); - reinterpret_cast(arg3)[1] = - CTX->register_internal_fd(reinterpret_cast(arg3)[1]); + if (*result >= 0) { + reinterpret_cast(arg3)[0] = + CTX->register_internal_fd(reinterpret_cast(arg3)[0]); + reinterpret_cast(arg3)[1] = + CTX->register_internal_fd(reinterpret_cast(arg3)[1]); } break; @@ -261,11 +262,11 @@ hook_internal(long syscall_number, *result = syscall_no_intercept(syscall_number, reinterpret_cast(arg0)); - if(*result >= 0) { - reinterpret_cast(arg0)[0] = - CTX->register_internal_fd(reinterpret_cast(arg0)[0]); - reinterpret_cast(arg0)[1] = - CTX->register_internal_fd(reinterpret_cast(arg0)[1]); + if (*result >= 0) { + reinterpret_cast(arg0)[0] = + CTX->register_internal_fd(reinterpret_cast(arg0)[0]); + reinterpret_cast(arg0)[1] = + CTX->register_internal_fd(reinterpret_cast(arg0)[1]); } break; @@ -275,11 +276,11 @@ hook_internal(long syscall_number, *result = syscall_no_intercept(syscall_number, reinterpret_cast(arg0), static_cast(arg1)); - if(*result >= 0) { - reinterpret_cast(arg0)[0] = - CTX->register_internal_fd(reinterpret_cast(arg0)[0]); - reinterpret_cast(arg0)[1] = - CTX->register_internal_fd(reinterpret_cast(arg0)[1]); + if (*result >= 0) { + reinterpret_cast(arg0)[0] = + CTX->register_internal_fd(reinterpret_cast(arg0)[0]); + reinterpret_cast(arg0)[1] = + CTX->register_internal_fd(reinterpret_cast(arg0)[1]); } break; @@ -289,7 +290,7 @@ hook_internal(long syscall_number, *result = syscall_no_intercept(syscall_number, static_cast(arg0)); - if(*result >= 0) { + if (*result >= 0) { *result = CTX->register_internal_fd(*result); } break; @@ -300,38 +301,37 @@ hook_internal(long syscall_number, static_cast(arg0), static_cast(arg1)); - if(*result >= 0) { + if (*result >= 0) { *result = CTX->register_internal_fd(*result); } break; - case SYS_recvmsg: - { + case SYS_recvmsg: { *result = syscall_no_intercept(syscall_number, - static_cast(arg0), - reinterpret_cast(arg1), - static_cast(arg2)); + static_cast(arg0), + reinterpret_cast(arg1), + static_cast(arg2)); // The recvmsg() syscall can receive file descriptors from another // process that the kernel automatically adds to the client's fds // as if dup2 had been called. Whenever that happens, we need to // make sure that we register these additional fds as internal, or // we could inadvertently overwrite them - if(*result >= 0) { + if (*result >= 0) { auto* hdr = reinterpret_cast(arg1); struct cmsghdr* cmsg = CMSG_FIRSTHDR(hdr); - for(; cmsg != NULL; cmsg = CMSG_NXTHDR(hdr, cmsg)) { - if(cmsg->cmsg_type == SCM_RIGHTS) { + for (; cmsg != NULL; cmsg = CMSG_NXTHDR(hdr, cmsg)) { + if (cmsg->cmsg_type == SCM_RIGHTS) { - size_t nfd = cmsg->cmsg_len > CMSG_LEN(0) ? - (cmsg->cmsg_len - CMSG_LEN(0)) / sizeof(int) : - 0; + size_t nfd = cmsg->cmsg_len > CMSG_LEN(0) ? + (cmsg->cmsg_len - CMSG_LEN(0)) / sizeof(int) : + 0; int* fds = - reinterpret_cast(CMSG_DATA(cmsg)); + reinterpret_cast(CMSG_DATA(cmsg)); - for(size_t i = 0; i < nfd; ++i) { + for (size_t i = 0; i < nfd; ++i) { LOG(DEBUG, "recvmsg() provided extra fd {}", fds[i]); // ensure we update the fds in cmsg @@ -351,7 +351,7 @@ hook_internal(long syscall_number, reinterpret_cast(arg1), reinterpret_cast(arg2)); - if(*result >= 0) { + if (*result >= 0) { *result = CTX->register_internal_fd(*result); } break; @@ -362,18 +362,18 @@ hook_internal(long syscall_number, static_cast(arg1), arg2); - if(*result >= 0 && - (static_cast(arg1) == F_DUPFD || - static_cast(arg1) == F_DUPFD_CLOEXEC)) { + if (*result >= 0 && + (static_cast(arg1) == F_DUPFD || + static_cast(arg1) == F_DUPFD_CLOEXEC)) { *result = CTX->register_internal_fd(*result); } break; case SYS_close: *result = syscall_no_intercept(syscall_number, - static_cast(arg0)); + static_cast(arg0)); - if(*result == 0) { + if (*result == 0) { CTX->unregister_internal_fd(arg0); } break; @@ -383,14 +383,14 @@ hook_internal(long syscall_number, // (syscalls forwarded to the kernel that return are logged in // hook_forwarded_syscall()) ::save_current_syscall_info( - gkfs::syscall::from_internal_code | + gkfs::syscall::from_internal_code | gkfs::syscall::to_kernel | gkfs::syscall::not_executed); return gkfs::syscall::forward_to_kernel; } LOG(SYSCALL, gkfs::syscall::from_internal_code | - gkfs::syscall::to_hook | gkfs::syscall::executed, + gkfs::syscall::to_hook | gkfs::syscall::executed, syscall_number, args, *result); return gkfs::syscall::hooked; @@ -402,61 +402,61 @@ hook_internal(long syscall_number, * * This hook is used to implement any application filesystem-related syscalls. */ -static inline +static inline int hook(long syscall_number, long arg0, long arg1, long arg2, long arg3, long arg4, long arg5, - long *result) { + long* result) { #if defined(GKFS_ENABLE_LOGGING) && defined(GKFS_DEBUG_BUILD) - const long args[gkfs::syscall::MAX_ARGS] = { - arg0, arg1, arg2, arg3, arg4, arg5 - }; + const long args[gkfs::syscall::MAX_ARGS] = { + arg0, arg1, arg2, arg3, arg4, arg5 + }; #endif - LOG(SYSCALL, gkfs::syscall::from_external_code | - gkfs::syscall::to_hook | gkfs::syscall::not_executed, + LOG(SYSCALL, gkfs::syscall::from_external_code | + gkfs::syscall::to_hook | gkfs::syscall::not_executed, syscall_number, args); switch (syscall_number) { case SYS_execve: *result = syscall_no_intercept(syscall_number, - reinterpret_cast(arg0), - reinterpret_cast(arg1), - reinterpret_cast(arg2)); + reinterpret_cast(arg0), + reinterpret_cast(arg1), + reinterpret_cast(arg2)); break; #ifdef SYS_execveat case SYS_execveat: *result = syscall_no_intercept(syscall_number, - arg0, - reinterpret_cast(arg1), - reinterpret_cast(arg2), - reinterpret_cast(arg3), - arg4); + arg0, + reinterpret_cast(arg1), + reinterpret_cast(arg2), + reinterpret_cast(arg3), + arg4); break; #endif case SYS_open: *result = hook_openat(AT_FDCWD, - reinterpret_cast(arg0), - static_cast(arg1), - static_cast(arg2)); + reinterpret_cast(arg0), + static_cast(arg1), + static_cast(arg2)); break; case SYS_creat: *result = hook_openat(AT_FDCWD, - reinterpret_cast(arg0), - O_WRONLY | O_CREAT | O_TRUNC, - static_cast(arg1)); + reinterpret_cast(arg0), + O_WRONLY | O_CREAT | O_TRUNC, + static_cast(arg1)); break; case SYS_openat: *result = hook_openat(static_cast(arg0), - reinterpret_cast(arg1), - static_cast(arg2), - static_cast(arg3)); + reinterpret_cast(arg1), + static_cast(arg2), + static_cast(arg3)); break; case SYS_close: @@ -475,14 +475,14 @@ int hook(long syscall_number, case SYS_fstat: *result = hook_fstat(static_cast(arg0), - reinterpret_cast(arg1)); + reinterpret_cast(arg1)); break; case SYS_newfstatat: *result = hook_fstatat(static_cast(arg0), - reinterpret_cast(arg1), - reinterpret_cast(arg2), - static_cast(arg3)); + reinterpret_cast(arg1), + reinterpret_cast(arg2), + static_cast(arg3)); break; case SYS_read: @@ -493,40 +493,40 @@ int hook(long syscall_number, case SYS_pread64: *result = hook_pread(static_cast(arg0), - reinterpret_cast(arg1), - static_cast(arg2), - static_cast(arg3)); + reinterpret_cast(arg1), + static_cast(arg2), + static_cast(arg3)); break; case SYS_pwrite64: *result = hook_pwrite(static_cast(arg0), - reinterpret_cast(arg1), - static_cast(arg2), - static_cast(arg3)); + reinterpret_cast(arg1), + static_cast(arg2), + static_cast(arg3)); break; case SYS_write: *result = hook_write(static_cast(arg0), - reinterpret_cast(arg1), - static_cast(arg2)); + reinterpret_cast(arg1), + static_cast(arg2)); break; case SYS_writev: *result = hook_writev(static_cast(arg0), - reinterpret_cast(arg1), - static_cast(arg2)); + reinterpret_cast(arg1), + static_cast(arg2)); break; case SYS_pwritev: *result = hook_pwritev(static_cast(arg0), - reinterpret_cast(arg1), - static_cast(arg2), - static_cast(arg3), - static_cast(arg4)); + reinterpret_cast(arg1), + static_cast(arg2), + static_cast(arg3), + static_cast(arg4)); break; case SYS_unlink: *result = hook_unlinkat(AT_FDCWD, - reinterpret_cast(arg0), + reinterpret_cast(arg0), 0); break; @@ -538,37 +538,37 @@ int hook(long syscall_number, case SYS_rmdir: *result = hook_unlinkat(AT_FDCWD, - reinterpret_cast(arg0), + reinterpret_cast(arg0), AT_REMOVEDIR); break; case SYS_symlink: - *result = hook_symlinkat(reinterpret_cast(arg0), - AT_FDCWD, - reinterpret_cast(arg1)); + *result = hook_symlinkat(reinterpret_cast(arg0), + AT_FDCWD, + reinterpret_cast(arg1)); break; case SYS_symlinkat: - *result = hook_symlinkat(reinterpret_cast(arg0), - static_cast(arg1), - reinterpret_cast(arg2)); + *result = hook_symlinkat(reinterpret_cast(arg0), + static_cast(arg1), + reinterpret_cast(arg2)); break; case SYS_access: *result = hook_access(reinterpret_cast(arg0), - static_cast(arg1)); + static_cast(arg1)); break; case SYS_faccessat: *result = hook_faccessat(static_cast(arg0), - reinterpret_cast(arg1), - static_cast(arg2)); + reinterpret_cast(arg1), + static_cast(arg2)); break; case SYS_lseek: *result = hook_lseek(static_cast(arg0), - static_cast(arg1), - static_cast(arg2)); + static_cast(arg1), + static_cast(arg2)); break; case SYS_truncate: @@ -578,7 +578,7 @@ int hook(long syscall_number, case SYS_ftruncate: *result = hook_ftruncate(static_cast(arg0), - static_cast(arg1)); + static_cast(arg1)); break; case SYS_dup: @@ -598,26 +598,26 @@ int hook(long syscall_number, case SYS_getdents: *result = hook_getdents(static_cast(arg0), - reinterpret_cast(arg1), + reinterpret_cast(arg1), static_cast(arg2)); break; case SYS_getdents64: *result = hook_getdents64(static_cast(arg0), - reinterpret_cast(arg1), + reinterpret_cast(arg1), static_cast(arg2)); break; case SYS_mkdirat: *result = hook_mkdirat(static_cast(arg0), - reinterpret_cast(arg1), + reinterpret_cast(arg1), static_cast(arg2)); break; case SYS_mkdir: *result = hook_mkdirat(AT_FDCWD, - reinterpret_cast(arg0), - static_cast(arg1)); + reinterpret_cast(arg0), + static_cast(arg1)); break; case SYS_chmod: @@ -638,7 +638,7 @@ int hook(long syscall_number, break; case SYS_chdir: - *result = hook_chdir(reinterpret_cast(arg0)); + *result = hook_chdir(reinterpret_cast(arg0)); break; case SYS_fchdir: @@ -646,21 +646,21 @@ int hook(long syscall_number, break; case SYS_getcwd: - *result = hook_getcwd(reinterpret_cast(arg0), + *result = hook_getcwd(reinterpret_cast(arg0), static_cast(arg1)); break; case SYS_readlink: *result = hook_readlinkat(AT_FDCWD, - reinterpret_cast(arg0), - reinterpret_cast(arg1), + reinterpret_cast(arg0), + reinterpret_cast(arg1), static_cast(arg2)); break; case SYS_readlinkat: *result = hook_readlinkat(static_cast(arg0), - reinterpret_cast(arg1), - reinterpret_cast(arg2), + reinterpret_cast(arg1), + reinterpret_cast(arg2), static_cast(arg3)); break; @@ -672,36 +672,36 @@ int hook(long syscall_number, case SYS_rename: *result = hook_renameat(AT_FDCWD, - reinterpret_cast(arg0), + reinterpret_cast(arg0), AT_FDCWD, - reinterpret_cast(arg1), + reinterpret_cast(arg1), 0); break; case SYS_renameat: *result = hook_renameat(static_cast(arg0), - reinterpret_cast(arg1), + reinterpret_cast(arg1), static_cast(arg2), - reinterpret_cast(arg3), + reinterpret_cast(arg3), 0); break; case SYS_renameat2: *result = hook_renameat(static_cast(arg0), - reinterpret_cast(arg1), + reinterpret_cast(arg1), static_cast(arg2), - reinterpret_cast(arg3), + reinterpret_cast(arg3), static_cast(arg4)); break; case SYS_fstatfs: *result = hook_fstatfs(static_cast(arg0), - reinterpret_cast(arg1)); + reinterpret_cast(arg1)); break; case SYS_statfs: - *result = hook_statfs(reinterpret_cast(arg0), - reinterpret_cast(arg1)); + *result = hook_statfs(reinterpret_cast(arg0), + reinterpret_cast(arg1)); break; default: @@ -709,14 +709,14 @@ int hook(long syscall_number, // (syscalls forwarded to the kernel that return are logged in // hook_forwarded_syscall()) ::save_current_syscall_info( - gkfs::syscall::from_external_code | + gkfs::syscall::from_external_code | gkfs::syscall::to_kernel | gkfs::syscall::not_executed); return gkfs::syscall::forward_to_kernel; } LOG(SYSCALL, gkfs::syscall::from_external_code | - gkfs::syscall::to_hook | gkfs::syscall::executed, + gkfs::syscall::to_hook | gkfs::syscall::executed, syscall_number, args, *result); return gkfs::syscall::hooked; @@ -726,77 +726,76 @@ static void hook_forwarded_syscall(long syscall_number, long arg0, long arg1, long arg2, long arg3, long arg4, long arg5, - long result) -{ + long result) { - if(::get_current_syscall_info() == gkfs::syscall::no_info) { + if (::get_current_syscall_info() == gkfs::syscall::no_info) { return; } #if defined(GKFS_ENABLE_LOGGING) && defined(GKFS_DEBUG_BUILD) - const long args[gkfs::syscall::MAX_ARGS] = { - arg0, arg1, arg2, arg3, arg4, arg5 - }; + const long args[gkfs::syscall::MAX_ARGS] = { + arg0, arg1, arg2, arg3, arg4, arg5 + }; #endif - LOG(SYSCALL, - ::get_current_syscall_info() | - gkfs::syscall::executed, + LOG(SYSCALL, + ::get_current_syscall_info() | + gkfs::syscall::executed, syscall_number, args, result); ::reset_current_syscall_info(); } static void -hook_clone_at_child(unsigned long flags, +hook_clone_at_child(unsigned long flags, void* child_stack, - int* ptid, - int* ctid, - long newtls) { + int* ptid, + int* ctid, + long newtls) { #if defined(GKFS_ENABLE_LOGGING) && defined(GKFS_DEBUG_BUILD) const long args[gkfs::syscall::MAX_ARGS] = { - static_cast(flags), - reinterpret_cast(child_stack), - reinterpret_cast(ptid), - reinterpret_cast(ctid), - static_cast(newtls), - 0}; + static_cast(flags), + reinterpret_cast(child_stack), + reinterpret_cast(ptid), + reinterpret_cast(ctid), + static_cast(newtls), + 0}; #endif reentrance_guard_flag = true; - LOG(SYSCALL, - ::get_current_syscall_info() | - gkfs::syscall::executed, + LOG(SYSCALL, + ::get_current_syscall_info() | + gkfs::syscall::executed, SYS_clone, args, 0); reentrance_guard_flag = false; } static void -hook_clone_at_parent(unsigned long flags, +hook_clone_at_parent(unsigned long flags, void* child_stack, - int* ptid, - int* ctid, - long newtls, - long returned_pid) { + int* ptid, + int* ctid, + long newtls, + long returned_pid) { #if defined(GKFS_ENABLE_LOGGING) && defined(GKFS_DEBUG_BUILD) const long args[gkfs::syscall::MAX_ARGS] = { - static_cast(flags), - reinterpret_cast(child_stack), - reinterpret_cast(ptid), - reinterpret_cast(ctid), - static_cast(newtls), - 0}; + static_cast(flags), + reinterpret_cast(child_stack), + reinterpret_cast(ptid), + reinterpret_cast(ctid), + static_cast(newtls), + 0}; #endif reentrance_guard_flag = true; - LOG(SYSCALL, - ::get_current_syscall_info() | - gkfs::syscall::executed, + LOG(SYSCALL, + ::get_current_syscall_info() | + gkfs::syscall::executed, SYS_clone, args, returned_pid); reentrance_guard_flag = false; @@ -807,13 +806,13 @@ int internal_hook_guard_wrapper(long syscall_number, long arg0, long arg1, long arg2, long arg3, long arg4, long arg5, - long *syscall_return_value) { + long* syscall_return_value) { assert(CTX->interception_enabled()); if (reentrance_guard_flag) { ::save_current_syscall_info( - gkfs::syscall::from_internal_code | + gkfs::syscall::from_internal_code | gkfs::syscall::to_kernel | gkfs::syscall::not_executed); return gkfs::syscall::forward_to_kernel; @@ -851,7 +850,7 @@ int hook_guard_wrapper(long syscall_number, long arg0, long arg1, long arg2, long arg3, long arg4, long arg5, - long *syscall_return_value) { + long* syscall_return_value) { assert(CTX->interception_enabled()); @@ -859,9 +858,9 @@ hook_guard_wrapper(long syscall_number, if (reentrance_guard_flag) { int oerrno = errno; - was_hooked = hook_internal(syscall_number, - arg0, arg1, arg2, arg3, arg4, arg5, - syscall_return_value); + was_hooked = hook_internal(syscall_number, + arg0, arg1, arg2, arg3, arg4, arg5, + syscall_return_value); errno = oerrno; return was_hooked; } diff --git a/src/client/logging.cpp b/src/client/logging.cpp index 306855c89..8bbeb9312 100644 --- a/src/client/logging.cpp +++ b/src/client/logging.cpp @@ -14,9 +14,13 @@ #include #include #include + #include + +extern "C" { #include #include +} #ifdef GKFS_ENABLE_LOGGING #include diff --git a/src/client/open_dir.cpp b/src/client/open_dir.cpp index 3ce503c74..3b1cf0862 100644 --- a/src/client/open_dir.cpp +++ b/src/client/open_dir.cpp @@ -14,11 +14,9 @@ #include #include #include -#include - -DirEntry::DirEntry(const std::string& name, const FileType type): - name_(name), type_(type) { +DirEntry::DirEntry(const std::string& name, const FileType type) : + name_(name), type_(type) { } const std::string& DirEntry::name() { @@ -31,7 +29,7 @@ FileType DirEntry::type() { OpenDir::OpenDir(const std::string& path) : - OpenFile(path, 0, FileType::directory) { + OpenFile(path, 0, FileType::directory) { } diff --git a/src/client/open_file_map.cpp b/src/client/open_file_map.cpp index f47853385..6bb1ff878 100644 --- a/src/client/open_file_map.cpp +++ b/src/client/open_file_map.cpp @@ -11,7 +11,6 @@ SPDX-License-Identifier: MIT */ -#include #include #include @@ -19,12 +18,15 @@ #include #include +extern "C" { +#include +} + using namespace std; OpenFile::OpenFile(const string& path, const int flags, FileType type) : - type_(type), - path_(path) -{ + type_(type), + path_(path) { // set flags to OpenFile if (flags & O_CREAT) flags_[gkfs::client::to_underlying(OpenFile_flags::creat)] = true; @@ -42,14 +44,9 @@ OpenFile::OpenFile(const string& path, const int flags, FileType type) : pos_ = 0; // If O_APPEND flag is used, it will be used before each write. } -OpenFileMap::OpenFileMap(): - fd_idx(10000), - fd_validation_needed(false) - {} - -OpenFile::~OpenFile() { - -} +OpenFileMap::OpenFileMap() : + fd_idx(10000), + fd_validation_needed(false) {} string OpenFile::path() const { return path_; diff --git a/src/client/preload.cpp b/src/client/preload.cpp index 7812a61cb..f9e2e8434 100644 --- a/src/client/preload.cpp +++ b/src/client/preload.cpp @@ -11,46 +11,28 @@ SPDX-License-Identifier: MIT */ -#include #include #include -#include -#include "global/rpc/rpc_types.hpp" #include #include #include #include -#include -#include -#include -#include -#include +#include #include +#include + +extern "C" { +#include +} using namespace std; // make sure that things are only initialized once static pthread_once_t init_env_thread = PTHREAD_ONCE_INIT; -// RPC IDs -hg_id_t rpc_config_id; -hg_id_t rpc_mk_node_id; -hg_id_t rpc_stat_id; -hg_id_t rpc_rm_node_id; -hg_id_t rpc_decr_size_id; -hg_id_t rpc_update_metadentry_id; -hg_id_t rpc_get_metadentry_size_id; -hg_id_t rpc_update_metadentry_size_id; -hg_id_t rpc_mk_symlink_id; -hg_id_t rpc_write_data_id; -hg_id_t rpc_read_data_id; -hg_id_t rpc_trunc_data_id; -hg_id_t rpc_get_dirents_id; -hg_id_t rpc_chunk_stat_id; - std::unique_ptr ld_network_service; static inline void exit_error_msg(int errcode, const string& msg) { @@ -81,35 +63,15 @@ bool init_hermes_client(const std::string& transport_prefix) { opts |= hermes::use_auto_sm; #endif - ld_network_service = - std::make_unique( - hermes::get_transport_type(transport_prefix), opts); + ld_network_service = + std::make_unique( + hermes::get_transport_type(transport_prefix), opts); ld_network_service->run(); } catch (const std::exception& ex) { - fmt::print(stderr, "Failed to initialize Hermes RPC client {}\n", + fmt::print(stderr, "Failed to initialize Hermes RPC client {}\n", ex.what()); return false; } - - rpc_config_id = gkfs::rpc::fs_config::public_id; - rpc_mk_node_id = gkfs::rpc::create::public_id; - rpc_stat_id = gkfs::rpc::stat::public_id; - rpc_rm_node_id = gkfs::rpc::remove::public_id; - rpc_decr_size_id = gkfs::rpc::decr_size::public_id; - rpc_update_metadentry_id = gkfs::rpc::update_metadentry::public_id; - rpc_get_metadentry_size_id = gkfs::rpc::get_metadentry_size::public_id; - rpc_update_metadentry_size_id = gkfs::rpc::update_metadentry::public_id; - -#ifdef HAS_SYMLINKS - rpc_mk_symlink_id = gkfs::rpc::mk_symlink::public_id; -#endif // HAS_SYMLINKS - - rpc_write_data_id = gkfs::rpc::write_data::public_id; - rpc_read_data_id = gkfs::rpc::read_data::public_id; - rpc_trunc_data_id = gkfs::rpc::trunc_data::public_id; - rpc_get_dirents_id = gkfs::rpc::get_dirents::public_id; - rpc_chunk_stat_id = gkfs::rpc::chunk_stat::public_id; - return true; } @@ -157,7 +119,7 @@ void log_prog_name() { LOG(ERROR, "Unable to open cmdline file"); throw std::runtime_error("Unable to open cmdline file"); } - if(!getline(cmdline, line)) { + if (!getline(cmdline, line)) { throw std::runtime_error("Unable to read cmdline file"); } std::replace(line.begin(), line.end(), '\0', ' '); diff --git a/src/client/preload_context.cpp b/src/client/preload_context.cpp index 0728e18ae..a7f702353 100644 --- a/src/client/preload_context.cpp +++ b/src/client/preload_context.cpp @@ -11,24 +11,27 @@ SPDX-License-Identifier: MIT */ -#include -#include -#include -#include +#include #include -#include #include #include #include #include +#include #include #include +#include + #include -#include + +extern "C" { +#include +#include +} decltype(PreloadContext::MIN_INTERNAL_FD) constexpr PreloadContext::MIN_INTERNAL_FD; @@ -36,14 +39,14 @@ decltype(PreloadContext::MAX_USER_FDS) constexpr PreloadContext::MAX_USER_FDS; PreloadContext::PreloadContext() : - ofm_(std::make_shared()), - fs_conf_(std::make_shared()) { + ofm_(std::make_shared()), + fs_conf_(std::make_shared()) { internal_fds_.set(); internal_fds_must_relocate_ = true; } -void +void PreloadContext::init_logging() { const std::string log_opts = @@ -60,19 +63,19 @@ PreloadContext::init_logging() { gkfs::env::get_var(gkfs::env::LOG_DEBUG_VERBOSITY, "0").c_str()); const std::string log_filter = - gkfs::env::get_var(gkfs::env::LOG_SYSCALL_FILTER, ""); + gkfs::env::get_var(gkfs::env::LOG_SYSCALL_FILTER, ""); #endif - const std::string trunc_val = - gkfs::env::get_var(gkfs::env::LOG_OUTPUT_TRUNC); + const std::string trunc_val = + gkfs::env::get_var(gkfs::env::LOG_OUTPUT_TRUNC); const bool log_trunc = !(!trunc_val.empty() && trunc_val[0] == 0); gkfs::log::create_global_logger(log_opts, log_output, log_trunc #ifdef GKFS_DEBUG_BUILD - , log_filter, log_verbosity + , log_filter, log_verbosity #endif - ); + ); } void PreloadContext::mountdir(const std::string& path) { @@ -119,7 +122,7 @@ void PreloadContext::local_host_id(uint64_t id) { } RelativizeStatus PreloadContext::relativize_fd_path(int dirfd, - const char * raw_path, + const char* raw_path, std::string& relative_path, bool resolve_last_link) const { @@ -162,7 +165,7 @@ RelativizeStatus PreloadContext::relativize_fd_path(int dirfd, return RelativizeStatus::external; } -bool PreloadContext::relativize_path(const char * raw_path, std::string& relative_path, bool resolve_last_link) const { +bool PreloadContext::relativize_path(const char* raw_path, std::string& relative_path, bool resolve_last_link) const { // Relativize path should be called only after the library constructor has been executed assert(interception_enabled_); // If we run the constructor we also already setup the mountdir @@ -173,7 +176,7 @@ bool PreloadContext::relativize_path(const char * raw_path, std::string& relativ std::string path; - if(raw_path[0] != PSP) { + if (raw_path[0] != PSP) { /* Path is not absolute, we need to prepend CWD; * First reserve enough space to minimize memory copy */ @@ -216,7 +219,7 @@ int PreloadContext::register_internal_fd(int fd) { assert(fd >= 0); - if(!internal_fds_must_relocate_) { + if (!internal_fds_must_relocate_) { LOG(DEBUG, "registering fd {} as internal (no relocation needed)", fd); assert(fd >= MIN_INTERNAL_FD); internal_fds_.reset(fd - MIN_INTERNAL_FD); @@ -228,10 +231,10 @@ int PreloadContext::register_internal_fd(int fd) { std::lock_guard lock(internal_fds_mutex_); const int pos = internal_fds_._Find_first(); - if(static_cast(pos) == internal_fds_.size()) { + if (static_cast(pos) == internal_fds_.size()) { throw std::runtime_error( -"Internal GekkoFS file descriptors exhausted, increase MAX_INTERNAL_FDS in " -"CMake, rebuild GekkoFS and try again."); + "Internal GekkoFS file descriptors exhausted, increase MAX_INTERNAL_FDS in " + "CMake, rebuild GekkoFS and try again."); } internal_fds_.reset(pos); @@ -240,19 +243,19 @@ int PreloadContext::register_internal_fd(int fd) { long args[gkfs::syscall::MAX_ARGS]{fd, pos + MIN_INTERNAL_FD, O_CLOEXEC}; #endif - LOG(SYSCALL, - gkfs::syscall::from_internal_code | + LOG(SYSCALL, + gkfs::syscall::from_internal_code | gkfs::syscall::to_kernel | - gkfs::syscall::not_executed, + gkfs::syscall::not_executed, SYS_dup3, args); - const int ifd = - ::syscall_no_intercept(SYS_dup3, fd, pos + MIN_INTERNAL_FD, O_CLOEXEC); + const int ifd = + ::syscall_no_intercept(SYS_dup3, fd, pos + MIN_INTERNAL_FD, O_CLOEXEC); - LOG(SYSCALL, - gkfs::syscall::from_internal_code | + LOG(SYSCALL, + gkfs::syscall::from_internal_code | gkfs::syscall::to_kernel | - gkfs::syscall::executed, + gkfs::syscall::executed, SYS_dup3, args, ifd); assert(::syscall_error_code(ifd) == 0); @@ -261,10 +264,10 @@ int PreloadContext::register_internal_fd(int fd) { long args2[gkfs::syscall::MAX_ARGS]{fd}; #endif - LOG(SYSCALL, - gkfs::syscall::from_internal_code | + LOG(SYSCALL, + gkfs::syscall::from_internal_code | gkfs::syscall::to_kernel | - gkfs::syscall::not_executed, + gkfs::syscall::not_executed, SYS_close, args2); #if defined(GKFS_ENABLE_LOGGING) && defined(GKFS_DEBUG_BUILD) @@ -273,10 +276,10 @@ int PreloadContext::register_internal_fd(int fd) { ::syscall_no_intercept(SYS_close, fd); #endif - LOG(SYSCALL, - gkfs::syscall::from_internal_code | + LOG(SYSCALL, + gkfs::syscall::from_internal_code | gkfs::syscall::to_kernel | - gkfs::syscall::executed, + gkfs::syscall::executed, SYS_close, args2, rv); LOG(DEBUG, " (fd {} relocated to ifd {})", fd, ifd); @@ -298,7 +301,7 @@ void PreloadContext::unregister_internal_fd(int fd) { bool PreloadContext::is_internal_fd(int fd) const { - if(fd < MIN_INTERNAL_FD) { + if (fd < MIN_INTERNAL_FD) { return false; } @@ -319,12 +322,12 @@ PreloadContext::protect_user_fds() { const auto fd_is_open = [](int fd) -> bool { const int ret = ::syscall_no_intercept(SYS_fcntl, fd, F_GETFD); - return ::syscall_error_code(ret) == 0 || + return ::syscall_error_code(ret) == 0 || ::syscall_error_code(ret) != EBADF; }; - for(int fd = 0; fd < MAX_USER_FDS; ++fd) { - if(fd_is_open(fd)) { + for (int fd = 0; fd < MAX_USER_FDS; ++fd) { + if (fd_is_open(fd)) { LOG(DEBUG, " fd {} was already in use, skipping", fd); continue; } @@ -340,15 +343,15 @@ PreloadContext::protect_user_fds() { void PreloadContext::unprotect_user_fds() { - for(std::size_t fd = 0; fd < protected_fds_.size(); ++fd) { - if(!protected_fds_[fd]) { + for (std::size_t fd = 0; fd < protected_fds_.size(); ++fd) { + if (!protected_fds_[fd]) { continue; } - const int ret = - ::syscall_error_code(::syscall_no_intercept(SYS_close, fd)); + const int ret = + ::syscall_error_code(::syscall_no_intercept(SYS_close, fd)); - if(ret != 0) { + if (ret != 0) { LOG(ERROR, "Failed to unprotect fd") } } diff --git a/src/client/preload_util.cpp b/src/client/preload_util.cpp index 233123576..7796f782c 100644 --- a/src/client/preload_util.cpp +++ b/src/client/preload_util.cpp @@ -14,10 +14,11 @@ #include #include #include + #include #include #include -#include + #include #include @@ -25,7 +26,10 @@ #include #include #include + +extern "C" { #include +} using namespace std; @@ -59,7 +63,7 @@ int gkfs::client::metadata_to_stat(const std::string& path, const Metadata& md, attr.st_size = md.target_path().size() + CTX->mountdir().size(); else #endif - attr.st_size = md.size(); + attr.st_size = md.size(); if (CTX->fs_conf()->atime_state) { attr.st_atim.tv_sec = md.atime(); @@ -126,7 +130,7 @@ hermes::endpoint lookup_endpoint(const std::string& uri, } catch (const exception& ex) { error_msg = ex.what(); - LOG(WARNING, "Failed to lookup address '{}'. Attempts [{}/{}]", + LOG(WARNING, "Failed to lookup address '{}'. Attempts [{}/{}]", uri, attempts + 1, max_retries); // Wait a random amount of time and try again @@ -138,7 +142,7 @@ hermes::endpoint lookup_endpoint(const std::string& uri, } while (++attempts < max_retries); throw std::runtime_error( - fmt::format("Endpoint for address '{}' could not be found ({})", + fmt::format("Endpoint for address '{}' could not be found ({})", uri, error_msg)); } @@ -179,11 +183,11 @@ void gkfs::client::load_hosts() { ::random_device rd; // obtain a random number from hardware ::mt19937 g(rd()); // seed the random generator ::shuffle(host_ids.begin(), host_ids.end(), g); // Shuffle hosts vector - // lookup addresses and put abstract server addresses into rpc_addressesre + // lookup addresses and put abstract server addresses into rpc_addresses for (const auto& id: host_ids) { - const auto& hostname = hosts.at(id).first; - const auto& uri = hosts.at(id).second; + const auto& hostname = hosts.at(id).first; + const auto& uri = hosts.at(id).second; addrs[id] = ::lookup_endpoint(uri); @@ -193,7 +197,7 @@ void gkfs::client::load_hosts() { local_host_found = true; } - LOG(DEBUG, "Found peer: {}", addrs[id].to_string()); + LOG(DEBUG, "Found peer: {}", addrs[id].to_string()); } if (!local_host_found) { diff --git a/src/client/resolve.cpp b/src/client/resolve.cpp index da950d902..3d43661bd 100644 --- a/src/client/resolve.cpp +++ b/src/client/resolve.cpp @@ -11,17 +11,22 @@ SPDX-License-Identifier: MIT */ + +#include +#include +#include + +#include + #include #include +#include + +extern "C" { #include #include -#include #include - -#include "global/path_util.hpp" -#include "client/preload.hpp" -#include "client/logging.hpp" -#include "client/env.hpp" +} static const std::string excluded_paths[2] = {"sys/", "proc/"}; @@ -39,19 +44,20 @@ static const std::string excluded_paths[2] = {"sys/", "proc/"}; * tot_comp == 4; * ``` */ -unsigned int path_match_components(const std::string& path, unsigned int &path_components, const std::vector& components) { +unsigned int path_match_components(const std::string& path, unsigned int& path_components, + const std::vector& components) { unsigned int matched = 0; unsigned int processed_components = 0; std::string::size_type comp_size = 0; // size of current component std::string::size_type start = 0; // start index of curr component std::string::size_type end = 0; // end index of curr component (last processed Path Separator "PSP") - while(++end < path.size()) { + while (++end < path.size()) { start = end; // Find next component end = path.find(PSP, start); - if(end == std::string::npos) { + if (end == std::string::npos) { end = path.size(); } @@ -78,9 +84,9 @@ unsigned int path_match_components(const std::string& path, unsigned int &path_c * returns true if the resolved path fall inside GekkoFS namespace, * and false otherwise. */ - bool resolve_path (const std::string& path, std::string& resolved, bool resolve_last_link) { +bool resolve_path(const std::string& path, std::string& resolved, bool resolve_last_link) { - LOG(DEBUG, "path: \"{}\", resolved: \"{}\", resolve_last_link: {}", + LOG(DEBUG, "path: \"{}\", resolved: \"{}\", resolve_last_link: {}", path, resolved, resolve_last_link); assert(is_absolute_path(path)); @@ -108,13 +114,13 @@ unsigned int path_match_components(const std::string& path, unsigned int &path_c start = end; /* Skip sequence of multiple path-separators. */ - while(start < path.size() && path[start] == PSP) { + while (start < path.size() && path[start] == PSP) { ++start; } // Find next component end = path.find(PSP, start); - if(end == std::string::npos) { + if (end == std::string::npos) { end = path.size(); } comp_size = end - start; @@ -127,7 +133,7 @@ unsigned int path_match_components(const std::string& path, unsigned int &path_c // component is '.', we skip it continue; } - if (comp_size == 2 && path.at(start) == '.' && path.at(start+1) == '.') { + if (comp_size == 2 && path.at(start) == '.' && path.at(start + 1) == '.') { // component is '..' we need to rollback resolved path if (resolved.size() > 0) { resolved.erase(last_slash_pos); @@ -172,7 +178,7 @@ unsigned int path_match_components(const std::string& path, unsigned int &path_c if (realpath(resolved.c_str(), link_resolved.get()) == nullptr) { LOG(ERROR, "Failed to get realpath for link \"{}\". " - "Error: {}", resolved, ::strerror(errno)); + "Error: {}", resolved, ::strerror(errno)); resolved.append(path, end, std::string::npos); return false; @@ -184,8 +190,8 @@ unsigned int path_match_components(const std::string& path, unsigned int &path_c last_slash_pos = resolved.find_last_of(PSP); continue; } else if ((!S_ISDIR(st.st_mode)) && (end != path.size())) { - resolved.append(path, end, std::string::npos); - return false; + resolved.append(path, end, std::string::npos); + return false; } } else { // Inside GekkoFS @@ -215,7 +221,7 @@ std::string get_sys_cwd() { "Failed to retrieve current working directory"); } // getcwd could return "(unreachable)" in some cases - if(temp[0] != PSP) { + if (temp[0] != PSP) { throw std::runtime_error( "Current working directory is unreachable"); } @@ -239,7 +245,7 @@ void set_env_cwd(const std::string& path) { LOG(DEBUG, "Setting {} to \"{}\"", gkfs::env::CWD, path); - if(setenv(gkfs::env::CWD, path.c_str(), 1)) { + if (setenv(gkfs::env::CWD, path.c_str(), 1)) { LOG(ERROR, "Failed while setting {}: {}", gkfs::env::CWD, std::strerror(errno)); throw std::system_error(errno, @@ -252,9 +258,9 @@ void unset_env_cwd() { LOG(DEBUG, "Clearing {}()", gkfs::env::CWD); - if(unsetenv(gkfs::env::CWD)) { + if (unsetenv(gkfs::env::CWD)) { - LOG(ERROR, "Failed to clear {}: {}", + LOG(ERROR, "Failed to clear {}: {}", gkfs::env::CWD, std::strerror(errno)); throw std::system_error(errno, @@ -273,7 +279,7 @@ void init_cwd() { } void set_cwd(const std::string& path, bool internal) { - if(internal) { + if (internal) { set_sys_cwd(CTX->mountdir()); set_env_cwd(path); } else { diff --git a/src/client/rpc/ld_rpc_data_ws.cpp b/src/client/rpc/ld_rpc_data_ws.cpp index 74171fcd3..776c75019 100644 --- a/src/client/rpc/ld_rpc_data_ws.cpp +++ b/src/client/rpc/ld_rpc_data_ws.cpp @@ -22,18 +22,17 @@ namespace rpc_send { - -using namespace std; + using namespace std; // TODO If we decide to keep this functionality with one segment, the function can be merged mostly. // Code is mostly redundant -/** - * Sends an RPC request to a specific node to pull all chunks that belong to him - */ -ssize_t write(const string& path, const void* buf, const bool append_flag, - const off64_t in_offset, const size_t write_size, - const int64_t updated_metadentry_size) { + /** + * Sends an RPC request to a specific node to pull all chunks that belong to him + */ + ssize_t write(const string& path, const void* buf, const bool append_flag, + const off64_t in_offset, const size_t write_size, + const int64_t updated_metadentry_size) { assert(write_size > 0); @@ -55,150 +54,150 @@ ssize_t write(const string& path, const void* buf, const bool append_flag, // targets for the first and last chunk as they need special treatment uint64_t chnk_start_target = 0; - uint64_t chnk_end_target = 0; + uint64_t chnk_end_target = 0; - for (uint64_t chnk_id = chnk_start; chnk_id <= chnk_end; chnk_id++) { - auto target = CTX->distributor()->locate_data(path, chnk_id); + for (uint64_t chnk_id = chnk_start; chnk_id <= chnk_end; chnk_id++) { + auto target = CTX->distributor()->locate_data(path, chnk_id); - if (target_chnks.count(target) == 0) { - target_chnks.insert( - std::make_pair(target, std::vector{chnk_id})); - targets.push_back(target); - } else { - target_chnks[target].push_back(chnk_id); - } + if (target_chnks.count(target) == 0) { + target_chnks.insert( + std::make_pair(target, std::vector{chnk_id})); + targets.push_back(target); + } else { + target_chnks[target].push_back(chnk_id); + } - // set first and last chnk targets - if (chnk_id == chnk_start) { - chnk_start_target = target; - } + // set first and last chnk targets + if (chnk_id == chnk_start) { + chnk_start_target = target; + } - if (chnk_id == chnk_end) { - chnk_end_target = target; + if (chnk_id == chnk_end) { + chnk_end_target = target; + } } - } - // some helper variables for async RPC - std::vector bufseq{ - hermes::mutable_buffer{const_cast(buf), write_size}, - }; + // some helper variables for async RPC + std::vector bufseq{ + hermes::mutable_buffer{const_cast(buf), write_size}, + }; - // expose user buffers so that they can serve as RDMA data sources - // (these are automatically "unexposed" when the destructor is called) - hermes::exposed_memory local_buffers; + // expose user buffers so that they can serve as RDMA data sources + // (these are automatically "unexposed" when the destructor is called) + hermes::exposed_memory local_buffers; - try { - local_buffers = - ld_network_service->expose(bufseq, hermes::access_mode::read_only); - - } catch (const std::exception& ex) { - LOG(ERROR, "Failed to expose buffers for RMA"); - errno = EBUSY; - return -1; - } - - std::vector> handles; - - // Issue non-blocking RPC requests and wait for the result later - // - // TODO(amiranda): This could be simplified by adding a vector of inputs - // to async_engine::broadcast(). This would allow us to avoid manually - // looping over handles as we do below - for(const auto& target : targets) { - - // total chunk_size for target - auto total_chunk_size = target_chnks[target].size() * gkfs_config::rpc::chunksize; + try { + local_buffers = + ld_network_service->expose(bufseq, hermes::access_mode::read_only); - // receiver of first chunk must subtract the offset from first chunk - if (target == chnk_start_target) { - total_chunk_size -= chnk_lpad(offset, gkfs_config::rpc::chunksize); + } catch (const std::exception& ex) { + LOG(ERROR, "Failed to expose buffers for RMA"); + errno = EBUSY; + return -1; } - // receiver of last chunk must subtract - if (target == chnk_end_target) { - total_chunk_size -= chnk_rpad(offset + write_size, gkfs_config::rpc::chunksize); - } + std::vector> handles; - auto endp = CTX->hosts().at(target); + // Issue non-blocking RPC requests and wait for the result later + // + // TODO(amiranda): This could be simplified by adding a vector of inputs + // to async_engine::broadcast(). This would allow us to avoid manually + // looping over handles as we do below + for (const auto& target : targets) { - try { + // total chunk_size for target + auto total_chunk_size = target_chnks[target].size() * gkfs_config::rpc::chunksize; - LOG(DEBUG, "Sending RPC ..."); - - gkfs::rpc::write_data::input in( - path, - // first offset in targets is the chunk with - // a potential offset - chnk_lpad(offset, gkfs_config::rpc::chunksize), - target, - CTX->hosts().size(), - // number of chunks handled by that destination - target_chnks[target].size(), - // chunk start id of this write - chnk_start, - // chunk end id of this write - chnk_end, - // total size to write - total_chunk_size, - local_buffers); - - // TODO(amiranda): add a post() with RPC_TIMEOUT to hermes so that - // we can retry for RPC_TRIES (see old commits with margo) - // TODO(amiranda): hermes will eventually provide a post(endpoint) - // returning one result and a broadcast(endpoint_set) returning a - // result_set. When that happens we can remove the .at(0) :/ - handles.emplace_back( - ld_network_service->post(endp, in)); - - LOG(DEBUG, "host: {}, path: \"{}\", chunks: {}, size: {}, offset: {}", - target, path, in.chunk_n(), total_chunk_size, in.offset()); - - } catch(const std::exception& ex) { - LOG(ERROR, "Unable to send non-blocking rpc for " - "path \"{}\" [peer: {}]", path, target); - errno = EBUSY; - return -1; - } - } + // receiver of first chunk must subtract the offset from first chunk + if (target == chnk_start_target) { + total_chunk_size -= chnk_lpad(offset, gkfs_config::rpc::chunksize); + } - // Wait for RPC responses and then get response and add it to out_size - // which is the written size All potential outputs are served to free - // resources regardless of errors, although an errorcode is set. - bool error = false; - ssize_t out_size = 0; - std::size_t idx = 0; + // receiver of last chunk must subtract + if (target == chnk_end_target) { + total_chunk_size -= chnk_rpad(offset + write_size, gkfs_config::rpc::chunksize); + } - for(const auto& h : handles) { - try { - // XXX We might need a timeout here to not wait forever for an - // output that never comes? - auto out = h.get().at(0); + auto endp = CTX->hosts().at(target); + + try { + + LOG(DEBUG, "Sending RPC ..."); + + gkfs::rpc::write_data::input in( + path, + // first offset in targets is the chunk with + // a potential offset + chnk_lpad(offset, gkfs_config::rpc::chunksize), + target, + CTX->hosts().size(), + // number of chunks handled by that destination + target_chnks[target].size(), + // chunk start id of this write + chnk_start, + // chunk end id of this write + chnk_end, + // total size to write + total_chunk_size, + local_buffers); + + // TODO(amiranda): add a post() with RPC_TIMEOUT to hermes so that + // we can retry for RPC_TRIES (see old commits with margo) + // TODO(amiranda): hermes will eventually provide a post(endpoint) + // returning one result and a broadcast(endpoint_set) returning a + // result_set. When that happens we can remove the .at(0) :/ + handles.emplace_back( + ld_network_service->post(endp, in)); + + LOG(DEBUG, "host: {}, path: \"{}\", chunks: {}, size: {}, offset: {}", + target, path, in.chunk_n(), total_chunk_size, in.offset()); + + } catch (const std::exception& ex) { + LOG(ERROR, "Unable to send non-blocking rpc for " + "path \"{}\" [peer: {}]", path, target); + errno = EBUSY; + return -1; + } + } - if(out.err() != 0) { - LOG(ERROR, "Daemon reported error: {}", out.err()); + // Wait for RPC responses and then get response and add it to out_size + // which is the written size All potential outputs are served to free + // resources regardless of errors, although an errorcode is set. + bool error = false; + ssize_t out_size = 0; + std::size_t idx = 0; + + for (const auto& h : handles) { + try { + // XXX We might need a timeout here to not wait forever for an + // output that never comes? + auto out = h.get().at(0); + + if (out.err() != 0) { + LOG(ERROR, "Daemon reported error: {}", out.err()); + error = true; + errno = out.err(); + } + + out_size += static_cast(out.io_size()); + + } catch (const std::exception& ex) { + LOG(ERROR, "Failed to get rpc output for path \"{}\" [peer: {}]", + path, targets[idx]); error = true; - errno = out.err(); + errno = EIO; } - out_size += static_cast(out.io_size()); - - } catch(const std::exception& ex) { - LOG(ERROR, "Failed to get rpc output for path \"{}\" [peer: {}]", - path, targets[idx]); - error = true; - errno = EIO; + ++idx; } - ++idx; + return error ? -1 : out_size; } - return error ? -1 : out_size; -} - /** * Sends an RPC request to a specific node to push all chunks that belong to him */ -ssize_t read(const string& path, void* buf, const off64_t offset, const size_t read_size) { + ssize_t read(const string& path, void* buf, const off64_t offset, const size_t read_size) { // Calculate chunkid boundaries and numbers so that daemons know in which // interval to look for chunks @@ -214,265 +213,265 @@ ssize_t read(const string& path, void* buf, const off64_t offset, const size_t r // targets for the first and last chunk as they need special treatment uint64_t chnk_start_target = 0; - uint64_t chnk_end_target = 0; + uint64_t chnk_end_target = 0; - for (uint64_t chnk_id = chnk_start; chnk_id <= chnk_end; chnk_id++) { - auto target = CTX->distributor()->locate_data(path, chnk_id); + for (uint64_t chnk_id = chnk_start; chnk_id <= chnk_end; chnk_id++) { + auto target = CTX->distributor()->locate_data(path, chnk_id); - if (target_chnks.count(target) == 0) { - target_chnks.insert( - std::make_pair(target, std::vector{chnk_id})); - targets.push_back(target); - } else { - target_chnks[target].push_back(chnk_id); - } + if (target_chnks.count(target) == 0) { + target_chnks.insert( + std::make_pair(target, std::vector{chnk_id})); + targets.push_back(target); + } else { + target_chnks[target].push_back(chnk_id); + } - // set first and last chnk targets - if (chnk_id == chnk_start) { - chnk_start_target = target; - } + // set first and last chnk targets + if (chnk_id == chnk_start) { + chnk_start_target = target; + } - if (chnk_id == chnk_end) { - chnk_end_target = target; + if (chnk_id == chnk_end) { + chnk_end_target = target; + } } - } - - // some helper variables for async RPCs - std::vector bufseq{ - hermes::mutable_buffer{buf, read_size}, - }; - - // expose user buffers so that they can serve as RDMA data targets - // (these are automatically "unexposed" when the destructor is called) - hermes::exposed_memory local_buffers; - - try { - local_buffers = - ld_network_service->expose(bufseq, hermes::access_mode::write_only); - } catch (const std::exception& ex) { - LOG(ERROR, "Failed to expose buffers for RMA"); - errno = EBUSY; - return -1; - } - - std::vector> handles; + // some helper variables for async RPCs + std::vector bufseq{ + hermes::mutable_buffer{buf, read_size}, + }; - // Issue non-blocking RPC requests and wait for the result later - // - // TODO(amiranda): This could be simplified by adding a vector of inputs - // to async_engine::broadcast(). This would allow us to avoid manually - // looping over handles as we do below - for(const auto& target : targets) { + // expose user buffers so that they can serve as RDMA data targets + // (these are automatically "unexposed" when the destructor is called) + hermes::exposed_memory local_buffers; - // total chunk_size for target - auto total_chunk_size = target_chnks[target].size() * gkfs_config::rpc::chunksize; + try { + local_buffers = + ld_network_service->expose(bufseq, hermes::access_mode::write_only); - // receiver of first chunk must subtract the offset from first chunk - if (target == chnk_start_target) { - total_chunk_size -= chnk_lpad(offset, gkfs_config::rpc::chunksize); + } catch (const std::exception& ex) { + LOG(ERROR, "Failed to expose buffers for RMA"); + errno = EBUSY; + return -1; } - // receiver of last chunk must subtract - if (target == chnk_end_target) { - total_chunk_size -= chnk_rpad(offset + read_size, gkfs_config::rpc::chunksize); - } + std::vector> handles; - auto endp = CTX->hosts().at(target); + // Issue non-blocking RPC requests and wait for the result later + // + // TODO(amiranda): This could be simplified by adding a vector of inputs + // to async_engine::broadcast(). This would allow us to avoid manually + // looping over handles as we do below + for (const auto& target : targets) { - try { + // total chunk_size for target + auto total_chunk_size = target_chnks[target].size() * gkfs_config::rpc::chunksize; - LOG(DEBUG, "Sending RPC ..."); - - gkfs::rpc::read_data::input in( - path, - // first offset in targets is the chunk with - // a potential offset - chnk_lpad(offset, gkfs_config::rpc::chunksize), - target, - CTX->hosts().size(), - // number of chunks handled by that destination - target_chnks[target].size(), - // chunk start id of this write - chnk_start, - // chunk end id of this write - chnk_end, - // total size to write - total_chunk_size, - local_buffers); - - // TODO(amiranda): add a post() with RPC_TIMEOUT to hermes so that - // we can retry for RPC_TRIES (see old commits with margo) - // TODO(amiranda): hermes will eventually provide a post(endpoint) - // returning one result and a broadcast(endpoint_set) returning a - // result_set. When that happens we can remove the .at(0) :/ - handles.emplace_back( - ld_network_service->post(endp, in)); - - LOG(DEBUG, "host: {}, path: {}, chunks: {}, size: {}, offset: {}", - target, path, in.chunk_n(), total_chunk_size, in.offset()); - - } catch(const std::exception& ex) { - LOG(ERROR, "Unable to send non-blocking rpc for path \"{}\" " - "[peer: {}]", path, target); - errno = EBUSY; - return -1; - } - } + // receiver of first chunk must subtract the offset from first chunk + if (target == chnk_start_target) { + total_chunk_size -= chnk_lpad(offset, gkfs_config::rpc::chunksize); + } - // Wait for RPC responses and then get response and add it to out_size - // which is the read size. All potential outputs are served to free - // resources regardless of errors, although an errorcode is set. - bool error = false; - ssize_t out_size = 0; - std::size_t idx = 0; + // receiver of last chunk must subtract + if (target == chnk_end_target) { + total_chunk_size -= chnk_rpad(offset + read_size, gkfs_config::rpc::chunksize); + } - for(const auto& h : handles) { - try { - // XXX We might need a timeout here to not wait forever for an - // output that never comes? - auto out = h.get().at(0); + auto endp = CTX->hosts().at(target); + + try { + + LOG(DEBUG, "Sending RPC ..."); + + gkfs::rpc::read_data::input in( + path, + // first offset in targets is the chunk with + // a potential offset + chnk_lpad(offset, gkfs_config::rpc::chunksize), + target, + CTX->hosts().size(), + // number of chunks handled by that destination + target_chnks[target].size(), + // chunk start id of this write + chnk_start, + // chunk end id of this write + chnk_end, + // total size to write + total_chunk_size, + local_buffers); + + // TODO(amiranda): add a post() with RPC_TIMEOUT to hermes so that + // we can retry for RPC_TRIES (see old commits with margo) + // TODO(amiranda): hermes will eventually provide a post(endpoint) + // returning one result and a broadcast(endpoint_set) returning a + // result_set. When that happens we can remove the .at(0) :/ + handles.emplace_back( + ld_network_service->post(endp, in)); + + LOG(DEBUG, "host: {}, path: {}, chunks: {}, size: {}, offset: {}", + target, path, in.chunk_n(), total_chunk_size, in.offset()); + + } catch (const std::exception& ex) { + LOG(ERROR, "Unable to send non-blocking rpc for path \"{}\" " + "[peer: {}]", path, target); + errno = EBUSY; + return -1; + } + } - if(out.err() != 0) { - LOG(ERROR, "Daemon reported error: {}", out.err()); + // Wait for RPC responses and then get response and add it to out_size + // which is the read size. All potential outputs are served to free + // resources regardless of errors, although an errorcode is set. + bool error = false; + ssize_t out_size = 0; + std::size_t idx = 0; + + for (const auto& h : handles) { + try { + // XXX We might need a timeout here to not wait forever for an + // output that never comes? + auto out = h.get().at(0); + + if (out.err() != 0) { + LOG(ERROR, "Daemon reported error: {}", out.err()); + error = true; + errno = out.err(); + } + + out_size += static_cast(out.io_size()); + + } catch (const std::exception& ex) { + LOG(ERROR, "Failed to get rpc output for path \"{}\" [peer: {}]", + path, targets[idx]); error = true; - errno = out.err(); + errno = EIO; } - out_size += static_cast(out.io_size()); - - } catch(const std::exception& ex) { - LOG(ERROR, "Failed to get rpc output for path \"{}\" [peer: {}]", - path, targets[idx]); - error = true; - errno = EIO; + ++idx; } - ++idx; + return error ? -1 : out_size; } - return error ? -1 : out_size; -} + int trunc_data(const std::string& path, size_t current_size, size_t new_size) { -int trunc_data(const std::string& path, size_t current_size, size_t new_size) { + assert(current_size > new_size); + bool error = false; - assert(current_size > new_size); - bool error = false; + // Find out which data servers need to delete data chunks in order to + // contact only them + const unsigned int chunk_start = chnk_id_for_offset(new_size, gkfs_config::rpc::chunksize); + const unsigned int chunk_end = + chnk_id_for_offset(current_size - new_size - 1, gkfs_config::rpc::chunksize); - // Find out which data servers need to delete data chunks in order to - // contact only them - const unsigned int chunk_start = chnk_id_for_offset(new_size, gkfs_config::rpc::chunksize); - const unsigned int chunk_end = - chnk_id_for_offset(current_size - new_size - 1, gkfs_config::rpc::chunksize); + std::unordered_set hosts; + for (unsigned int chunk_id = chunk_start; chunk_id <= chunk_end; ++chunk_id) { + hosts.insert(CTX->distributor()->locate_data(path, chunk_id)); + } - std::unordered_set hosts; - for(unsigned int chunk_id = chunk_start; chunk_id <= chunk_end; ++chunk_id) { - hosts.insert(CTX->distributor()->locate_data(path, chunk_id)); - } + std::vector> handles; - std::vector> handles; + for (const auto& host: hosts) { - for (const auto& host: hosts) { + auto endp = CTX->hosts().at(host); - auto endp = CTX->hosts().at(host); + try { + LOG(DEBUG, "Sending RPC ..."); - try { - LOG(DEBUG, "Sending RPC ..."); + gkfs::rpc::trunc_data::input in(path, new_size); - gkfs::rpc::trunc_data::input in(path, new_size); + // TODO(amiranda): add a post() with RPC_TIMEOUT to hermes so that + // we can retry for RPC_TRIES (see old commits with margo) + // TODO(amiranda): hermes will eventually provide a post(endpoint) + // returning one result and a broadcast(endpoint_set) returning a + // result_set. When that happens we can remove the .at(0) :/ + handles.emplace_back( + ld_network_service->post(endp, in)); - // TODO(amiranda): add a post() with RPC_TIMEOUT to hermes so that - // we can retry for RPC_TRIES (see old commits with margo) - // TODO(amiranda): hermes will eventually provide a post(endpoint) - // returning one result and a broadcast(endpoint_set) returning a - // result_set. When that happens we can remove the .at(0) :/ - handles.emplace_back( - ld_network_service->post(endp, in)); + } catch (const std::exception& ex) { + // TODO(amiranda): we should cancel all previously posted requests + // here, unfortunately, Hermes does not support it yet :/ + LOG(ERROR, "Failed to send request to host: {}", host); + errno = EIO; + return -1; + } - } catch (const std::exception& ex) { - // TODO(amiranda): we should cancel all previously posted requests - // here, unfortunately, Hermes does not support it yet :/ - LOG(ERROR, "Failed to send request to host: {}", host); - errno = EIO; - return -1; } - } - - // Wait for RPC responses and then get response - for(const auto& h : handles) { - - try { - // XXX We might need a timeout here to not wait forever for an - // output that never comes? - auto out = h.get().at(0); - - if(out.err() != 0) { - LOG(ERROR, "received error response: {}", out.err()); + // Wait for RPC responses and then get response + for (const auto& h : handles) { + + try { + // XXX We might need a timeout here to not wait forever for an + // output that never comes? + auto out = h.get().at(0); + + if (out.err() != 0) { + LOG(ERROR, "received error response: {}", out.err()); + error = true; + errno = EIO; + } + } catch (const std::exception& ex) { + LOG(ERROR, "while getting rpc output"); error = true; errno = EIO; } - } catch(const std::exception& ex) { - LOG(ERROR, "while getting rpc output"); - error = true; - errno = EIO; } - } - return error ? -1 : 0; -} + return error ? -1 : 0; + } -ChunkStat chunk_stat() { + ChunkStat chunk_stat() { - std::vector> handles; + std::vector> handles; - for (const auto& endp : CTX->hosts()) { - try { - LOG(DEBUG, "Sending RPC to host: {}", endp.to_string()); + for (const auto& endp : CTX->hosts()) { + try { + LOG(DEBUG, "Sending RPC to host: {}", endp.to_string()); - gkfs::rpc::chunk_stat::input in(0); + gkfs::rpc::chunk_stat::input in(0); - // TODO(amiranda): add a post() with RPC_TIMEOUT to hermes so that - // we can retry for RPC_TRIES (see old commits with margo) - // TODO(amiranda): hermes will eventually provide a post(endpoint) - // returning one result and a broadcast(endpoint_set) returning a - // result_set. When that happens we can remove the .at(0) :/ - handles.emplace_back( - ld_network_service->post(endp, in)); + // TODO(amiranda): add a post() with RPC_TIMEOUT to hermes so that + // we can retry for RPC_TRIES (see old commits with margo) + // TODO(amiranda): hermes will eventually provide a post(endpoint) + // returning one result and a broadcast(endpoint_set) returning a + // result_set. When that happens we can remove the .at(0) :/ + handles.emplace_back( + ld_network_service->post(endp, in)); - } catch (const std::exception& ex) { - // TODO(amiranda): we should cancel all previously posted requests - // here, unfortunately, Hermes does not support it yet :/ - LOG(ERROR, "Failed to send request to host: {}", endp.to_string()); - throw std::runtime_error("Failed to forward non-blocking rpc request"); + } catch (const std::exception& ex) { + // TODO(amiranda): we should cancel all previously posted requests + // here, unfortunately, Hermes does not support it yet :/ + LOG(ERROR, "Failed to send request to host: {}", endp.to_string()); + throw std::runtime_error("Failed to forward non-blocking rpc request"); + } } - } - unsigned long chunk_size = gkfs_config::rpc::chunksize; - unsigned long chunk_total = 0; - unsigned long chunk_free = 0; + unsigned long chunk_size = gkfs_config::rpc::chunksize; + unsigned long chunk_total = 0; + unsigned long chunk_free = 0; - // wait for RPC responses - for(std::size_t i = 0; i < handles.size(); ++i) { + // wait for RPC responses + for (std::size_t i = 0; i < handles.size(); ++i) { - gkfs::rpc::chunk_stat::output out; + gkfs::rpc::chunk_stat::output out; - try { - // XXX We might need a timeout here to not wait forever for an - // output that never comes? - out = handles[i].get().at(0); + try { + // XXX We might need a timeout here to not wait forever for an + // output that never comes? + out = handles[i].get().at(0); - assert(out.chunk_size() == chunk_size); - chunk_total += out.chunk_total(); - chunk_free += out.chunk_free(); + assert(out.chunk_size() == chunk_size); + chunk_total += out.chunk_total(); + chunk_free += out.chunk_free(); - } catch(const std::exception& ex) { - throw std::runtime_error( - fmt::format("Failed to get rpc output for target host: {}]", i)); + } catch (const std::exception& ex) { + throw std::runtime_error( + fmt::format("Failed to get rpc output for target host: {}]", i)); + } } - } - return {chunk_size, chunk_total, chunk_free}; -} + return {chunk_size, chunk_total, chunk_free}; + } } // end namespace rpc_send diff --git a/src/client/rpc/ld_rpc_management.cpp b/src/client/rpc/ld_rpc_management.cpp index 6e52aaca6..d3132e3dd 100644 --- a/src/client/rpc/ld_rpc_management.cpp +++ b/src/client/rpc/ld_rpc_management.cpp @@ -11,58 +11,54 @@ SPDX-License-Identifier: MIT */ -#include "client/rpc/ld_rpc_management.hpp" -#include "global/rpc/rpc_types.hpp" +#include #include #include -#include // see https://github.com/boostorg/tokenizer/issues/9 -#include -#include -#include #include +#include -namespace rpc_send { +namespace rpc_send { -/** - * Gets fs configuration information from the running daemon and transfers it to the memory of the library - * @return - */ -bool get_fs_config() { - - auto endp = CTX->hosts().at(CTX->local_host_id()); - gkfs::rpc::fs_config::output out; - - try { - LOG(DEBUG, "Retrieving file system configurations from daemon"); - // TODO(amiranda): add a post() with RPC_TIMEOUT to hermes so that we can retry - // for RPC_TRIES (see old commits with margo) - // TODO(amiranda): hermes will eventually provide a post(endpoint) - // returning one result and a broadcast(endpoint_set) returning a - // result_set. When that happens we can remove the .at(0) :/ - out = ld_network_service->post(endp).get().at(0); - } catch (const std::exception& ex) { - LOG(ERROR, "Retrieving fs configurations from daemon"); - return false; + /** + * Gets fs configuration information from the running daemon and transfers it to the memory of the library + * @return + */ + bool get_fs_config() { + + auto endp = CTX->hosts().at(CTX->local_host_id()); + gkfs::rpc::fs_config::output out; + + try { + LOG(DEBUG, "Retrieving file system configurations from daemon"); + // TODO(amiranda): add a post() with RPC_TIMEOUT to hermes so that we can retry + // for RPC_TRIES (see old commits with margo) + // TODO(amiranda): hermes will eventually provide a post(endpoint) + // returning one result and a broadcast(endpoint_set) returning a + // result_set. When that happens we can remove the .at(0) :/ + out = ld_network_service->post(endp).get().at(0); + } catch (const std::exception& ex) { + LOG(ERROR, "Retrieving fs configurations from daemon"); + return false; + } + + CTX->mountdir(out.mountdir()); + LOG(INFO, "Mountdir: '{}'", CTX->mountdir()); + + CTX->fs_conf()->rootdir = out.rootdir(); + CTX->fs_conf()->atime_state = out.atime_state(); + CTX->fs_conf()->mtime_state = out.mtime_state(); + CTX->fs_conf()->ctime_state = out.ctime_state(); + CTX->fs_conf()->link_cnt_state = out.link_cnt_state(); + CTX->fs_conf()->blocks_state = out.blocks_state(); + CTX->fs_conf()->uid = out.uid(); + CTX->fs_conf()->gid = out.gid(); + + LOG(DEBUG, "Got response with mountdir {}", out.mountdir()); + + return true; } - CTX->mountdir(out.mountdir()); - LOG(INFO, "Mountdir: '{}'", CTX->mountdir()); - - CTX->fs_conf()->rootdir = out.rootdir(); - CTX->fs_conf()->atime_state = out.atime_state(); - CTX->fs_conf()->mtime_state = out.mtime_state(); - CTX->fs_conf()->ctime_state = out.ctime_state(); - CTX->fs_conf()->link_cnt_state = out.link_cnt_state(); - CTX->fs_conf()->blocks_state = out.blocks_state(); - CTX->fs_conf()->uid = out.uid(); - CTX->fs_conf()->gid = out.gid(); - - LOG(DEBUG, "Got response with mountdir {}", out.mountdir()); - - return true; -} - } diff --git a/src/client/rpc/ld_rpc_metadentry.cpp b/src/client/rpc/ld_rpc_metadentry.cpp index 3a11766f3..3336a5fad 100644 --- a/src/client/rpc/ld_rpc_metadentry.cpp +++ b/src/client/rpc/ld_rpc_metadentry.cpp @@ -12,370 +12,369 @@ */ #include -#include "client/preload.hpp" -#include "client/logging.hpp" -#include "client/preload_util.hpp" -#include "client/open_dir.hpp" +#include +#include +#include +#include +#include + #include #include #include -#include -namespace rpc_send { - -using namespace std; - -int mk_node(const std::string& path, const mode_t mode) { - - int err = EUNKNOWN; - auto endp = CTX->hosts().at( - CTX->distributor()->locate_file_metadata(path)); - - try { - LOG(DEBUG, "Sending RPC ..."); - // TODO(amiranda): add a post() with RPC_TIMEOUT to hermes so that we can - // retry for RPC_TRIES (see old commits with margo) - // TODO(amiranda): hermes will eventually provide a post(endpoint) - // returning one result and a broadcast(endpoint_set) returning a - // result_set. When that happens we can remove the .at(0) :/ - auto out = - ld_network_service->post(endp, path, mode).get().at(0); - err = out.err(); - LOG(DEBUG, "Got response success: {}", err); - - if(out.err()) { - errno = out.err(); - return -1; - } +namespace rpc_send { - } catch(const std::exception& ex) { - LOG(ERROR, "while getting rpc output"); - errno = EBUSY; - return -1; - } + using namespace std; + + int mk_node(const std::string& path, const mode_t mode) { + + int err = EUNKNOWN; + auto endp = CTX->hosts().at( + CTX->distributor()->locate_file_metadata(path)); + + try { + LOG(DEBUG, "Sending RPC ..."); + // TODO(amiranda): add a post() with RPC_TIMEOUT to hermes so that we can + // retry for RPC_TRIES (see old commits with margo) + // TODO(amiranda): hermes will eventually provide a post(endpoint) + // returning one result and a broadcast(endpoint_set) returning a + // result_set. When that happens we can remove the .at(0) :/ + auto out = + ld_network_service->post(endp, path, mode).get().at(0); + err = out.err(); + LOG(DEBUG, "Got response success: {}", err); - return err; -} - -int stat(const std::string& path, string& attr) { - - auto endp = CTX->hosts().at( - CTX->distributor()->locate_file_metadata(path)); - - try { - LOG(DEBUG, "Sending RPC ..."); - // TODO(amiranda): add a post() with RPC_TIMEOUT to hermes so that we can - // retry for RPC_TRIES (see old commits with margo) - // TODO(amiranda): hermes will eventually provide a post(endpoint) - // returning one result and a broadcast(endpoint_set) returning a - // result_set. When that happens we can remove the .at(0) :/ - auto out = - ld_network_service->post(endp, path).get().at(0); - LOG(DEBUG, "Got response success: {}", out.err()); - - if(out.err() != 0) { - errno = out.err(); + if (out.err()) { + errno = out.err(); + return -1; + } + + } catch (const std::exception& ex) { + LOG(ERROR, "while getting rpc output"); + errno = EBUSY; return -1; } - attr = out.db_val(); - return 0; - - } catch(const std::exception& ex) { - LOG(ERROR, "while getting rpc output"); - errno = EBUSY; - return -1; + return err; } - return 0; -} + int stat(const std::string& path, string& attr) { -int decr_size(const std::string& path, size_t length) { - - auto endp = CTX->hosts().at( - CTX->distributor()->locate_file_metadata(path)); + auto endp = CTX->hosts().at( + CTX->distributor()->locate_file_metadata(path)); - try { + try { + LOG(DEBUG, "Sending RPC ..."); + // TODO(amiranda): add a post() with RPC_TIMEOUT to hermes so that we can + // retry for RPC_TRIES (see old commits with margo) + // TODO(amiranda): hermes will eventually provide a post(endpoint) + // returning one result and a broadcast(endpoint_set) returning a + // result_set. When that happens we can remove the .at(0) :/ + auto out = + ld_network_service->post(endp, path).get().at(0); + LOG(DEBUG, "Got response success: {}", out.err()); - LOG(DEBUG, "Sending RPC ..."); - // TODO(amiranda): add a post() with RPC_TIMEOUT to hermes so that we can - // retry for RPC_TRIES (see old commits with margo) - // TODO(amiranda): hermes will eventually provide a post(endpoint) - // returning one result and a broadcast(endpoint_set) returning a - // result_set. When that happens we can remove the .at(0) :/ - auto out = - ld_network_service->post( - endp, path, length).get().at(0); + if (out.err() != 0) { + errno = out.err(); + return -1; + } - LOG(DEBUG, "Got response success: {}", out.err()); + attr = out.db_val(); + return 0; - if(out.err() != 0) { - errno = out.err(); + } catch (const std::exception& ex) { + LOG(ERROR, "while getting rpc output"); + errno = EBUSY; return -1; } return 0; - - } catch(const std::exception& ex) { - LOG(ERROR, "while getting rpc output"); - errno = EBUSY; - return -1; } -} -int rm_node(const std::string& path, const bool remove_metadentry_only, const ssize_t size) { - - // if only the metadentry should be removed, send one rpc to the - // metadentry's responsible node to remove the metadata - // else, send an rpc to all hosts and thus broadcast chunk_removal. - if(remove_metadentry_only) { + int decr_size(const std::string& path, size_t length) { auto endp = CTX->hosts().at( - CTX->distributor()->locate_file_metadata(path)); + CTX->distributor()->locate_file_metadata(path)); try { LOG(DEBUG, "Sending RPC ..."); // TODO(amiranda): add a post() with RPC_TIMEOUT to hermes so that we can // retry for RPC_TRIES (see old commits with margo) - // TODO(amiranda): hermes will eventually provide a post(endpoint) - // returning one result and a broadcast(endpoint_set) returning a + // TODO(amiranda): hermes will eventually provide a post(endpoint) + // returning one result and a broadcast(endpoint_set) returning a // result_set. When that happens we can remove the .at(0) :/ - auto out = - ld_network_service->post(endp, path).get().at(0); + auto out = + ld_network_service->post( + endp, path, length).get().at(0); LOG(DEBUG, "Got response success: {}", out.err()); - if(out.err() != 0) { + if (out.err() != 0) { errno = out.err(); return -1; } - + return 0; - } catch(const std::exception& ex) { + } catch (const std::exception& ex) { LOG(ERROR, "while getting rpc output"); errno = EBUSY; return -1; } - - return 0; } - std::vector> handles; + int rm_node(const std::string& path, const bool remove_metadentry_only, const ssize_t size) { - // Small files - if (static_cast(size / gkfs_config::rpc::chunksize) < CTX->hosts().size()) { + // if only the metadentry should be removed, send one rpc to the + // metadentry's responsible node to remove the metadata + // else, send an rpc to all hosts and thus broadcast chunk_removal. + if (remove_metadentry_only) { - auto endp = CTX->hosts().at( - CTX->distributor()->locate_file_metadata(path)); + auto endp = CTX->hosts().at( + CTX->distributor()->locate_file_metadata(path)); - try { - LOG(DEBUG, "Sending RPC to host: {}", endp.to_string()); - gkfs::rpc::remove::input in(path); - handles.emplace_back( - ld_network_service->post(endp, in)); + try { - uint64_t chnk_start = 0; - uint64_t chnk_end = size / gkfs_config::rpc::chunksize; + LOG(DEBUG, "Sending RPC ..."); + // TODO(amiranda): add a post() with RPC_TIMEOUT to hermes so that we can + // retry for RPC_TRIES (see old commits with margo) + // TODO(amiranda): hermes will eventually provide a post(endpoint) + // returning one result and a broadcast(endpoint_set) returning a + // result_set. When that happens we can remove the .at(0) :/ + auto out = + ld_network_service->post(endp, path).get().at(0); - for (uint64_t chnk_id = chnk_start; chnk_id <= chnk_end; chnk_id++) { - const auto target = CTX->hosts().at( - CTX->distributor()->locate_data(path, chnk_id)); + LOG(DEBUG, "Got response success: {}", out.err()); - LOG(DEBUG, "Sending RPC to host: {}", target.to_string()); + if (out.err() != 0) { + errno = out.err(); + return -1; + } - handles.emplace_back( - ld_network_service->post(target, in)); + return 0; + + } catch (const std::exception& ex) { + LOG(ERROR, "while getting rpc output"); + errno = EBUSY; + return -1; } - } catch (const std::exception & ex) { - LOG(ERROR, "Failed to send reduced remove requests"); - throw std::runtime_error( - "Failed to forward non-blocking rpc request"); + + return 0; } - } - else { // "Big" files - for (const auto& endp : CTX->hosts()) { - try { - LOG(DEBUG, "Sending RPC to host: {}", endp.to_string()); - gkfs::rpc::remove::input in(path); + std::vector> handles; - // TODO(amiranda): add a post() with RPC_TIMEOUT to hermes so that - // we can retry for RPC_TRIES (see old commits with margo) - // TODO(amiranda): hermes will eventually provide a post(endpoint) - // returning one result and a broadcast(endpoint_set) returning a - // result_set. When that happens we can remove the .at(0) :/ - // - // + // Small files + if (static_cast(size / gkfs_config::rpc::chunksize) < CTX->hosts().size()) { + auto endp = CTX->hosts().at( + CTX->distributor()->locate_file_metadata(path)); + + try { + LOG(DEBUG, "Sending RPC to host: {}", endp.to_string()); + gkfs::rpc::remove::input in(path); handles.emplace_back( ld_network_service->post(endp, in)); + uint64_t chnk_start = 0; + uint64_t chnk_end = size / gkfs_config::rpc::chunksize; + + for (uint64_t chnk_id = chnk_start; chnk_id <= chnk_end; chnk_id++) { + const auto target = CTX->hosts().at( + CTX->distributor()->locate_data(path, chnk_id)); + + LOG(DEBUG, "Sending RPC to host: {}", target.to_string()); + + handles.emplace_back( + ld_network_service->post(target, in)); + } } catch (const std::exception& ex) { - // TODO(amiranda): we should cancel all previously posted requests - // here, unfortunately, Hermes does not support it yet :/ - LOG(ERROR, "Failed to send request to host: {}", - endp.to_string()); + LOG(ERROR, "Failed to send reduced remove requests"); throw std::runtime_error( "Failed to forward non-blocking rpc request"); } + } else { // "Big" files + for (const auto& endp : CTX->hosts()) { + try { + LOG(DEBUG, "Sending RPC to host: {}", endp.to_string()); + + gkfs::rpc::remove::input in(path); + + // TODO(amiranda): add a post() with RPC_TIMEOUT to hermes so that + // we can retry for RPC_TRIES (see old commits with margo) + // TODO(amiranda): hermes will eventually provide a post(endpoint) + // returning one result and a broadcast(endpoint_set) returning a + // result_set. When that happens we can remove the .at(0) :/ + // + // + + handles.emplace_back( + ld_network_service->post(endp, in)); + + } catch (const std::exception& ex) { + // TODO(amiranda): we should cancel all previously posted requests + // here, unfortunately, Hermes does not support it yet :/ + LOG(ERROR, "Failed to send request to host: {}", + endp.to_string()); + throw std::runtime_error( + "Failed to forward non-blocking rpc request"); + } + } } + // wait for RPC responses + bool got_error = false; + + for (const auto& h : handles) { + + try { + // XXX We might need a timeout here to not wait forever for an + // output that never comes? + auto out = h.get().at(0); + + if (out.err() != 0) { + LOG(ERROR, "received error response: {}", out.err()); + got_error = true; + errno = out.err(); + } + } catch (const std::exception& ex) { + LOG(ERROR, "while getting rpc output"); + got_error = true; + errno = EBUSY; + } + } + + return got_error ? -1 : 0; } - // wait for RPC responses - bool got_error = false; - for(const auto& h : handles) { + + int update_metadentry(const string& path, const Metadata& md, const MetadentryUpdateFlags& md_flags) { + + auto endp = CTX->hosts().at( + CTX->distributor()->locate_file_metadata(path)); try { - // XXX We might need a timeout here to not wait forever for an - // output that never comes? - auto out = h.get().at(0); - if(out.err() != 0) { - LOG(ERROR, "received error response: {}", out.err()); - got_error = true; + LOG(DEBUG, "Sending RPC ..."); + // TODO(amiranda): add a post() with RPC_TIMEOUT to hermes so that we can + // retry for RPC_TRIES (see old commits with margo) + // TODO(amiranda): hermes will eventually provide a post(endpoint) + // returning one result and a broadcast(endpoint_set) returning a + // result_set. When that happens we can remove the .at(0) :/ + auto out = + ld_network_service->post( + endp, + path, + (md_flags.link_count ? md.link_count() : 0), + /* mode */ 0, + /* uid */ 0, + /* gid */ 0, + (md_flags.size ? md.size() : 0), + (md_flags.blocks ? md.blocks() : 0), + (md_flags.atime ? md.atime() : 0), + (md_flags.mtime ? md.mtime() : 0), + (md_flags.ctime ? md.ctime() : 0), + bool_to_merc_bool(md_flags.link_count), + /* mode_flag */ false, + bool_to_merc_bool(md_flags.size), + bool_to_merc_bool(md_flags.blocks), + bool_to_merc_bool(md_flags.atime), + bool_to_merc_bool(md_flags.mtime), + bool_to_merc_bool(md_flags.ctime)).get().at(0); + + LOG(DEBUG, "Got response success: {}", out.err()); + + if (out.err() != 0) { errno = out.err(); + return -1; } - } catch(const std::exception& ex) { + + return 0; + + } catch (const std::exception& ex) { LOG(ERROR, "while getting rpc output"); - got_error = true; errno = EBUSY; - } - } - - return got_error ? -1 : 0; - -} - - -int update_metadentry(const string& path, const Metadata& md, const MetadentryUpdateFlags& md_flags) { - - auto endp = CTX->hosts().at( - CTX->distributor()->locate_file_metadata(path)); - - try { - - LOG(DEBUG, "Sending RPC ..."); - // TODO(amiranda): add a post() with RPC_TIMEOUT to hermes so that we can - // retry for RPC_TRIES (see old commits with margo) - // TODO(amiranda): hermes will eventually provide a post(endpoint) - // returning one result and a broadcast(endpoint_set) returning a - // result_set. When that happens we can remove the .at(0) :/ - auto out = - ld_network_service->post( - endp, - path, - (md_flags.link_count ? md.link_count() : 0), - /* mode */ 0, - /* uid */ 0, - /* gid */ 0, - (md_flags.size ? md.size() : 0), - (md_flags.blocks ? md.blocks() : 0), - (md_flags.atime ? md.atime() : 0), - (md_flags.mtime ? md.mtime() : 0), - (md_flags.ctime ? md.ctime() : 0), - bool_to_merc_bool(md_flags.link_count), - /* mode_flag */ false, - bool_to_merc_bool(md_flags.size), - bool_to_merc_bool(md_flags.blocks), - bool_to_merc_bool(md_flags.atime), - bool_to_merc_bool(md_flags.mtime), - bool_to_merc_bool(md_flags.ctime)).get().at(0); - - LOG(DEBUG, "Got response success: {}", out.err()); - - if(out.err() != 0) { - errno = out.err(); return -1; } - - return 0; - - } catch(const std::exception& ex) { - LOG(ERROR, "while getting rpc output"); - errno = EBUSY; - return -1; } -} -int update_metadentry_size(const string& path, const size_t size, const off64_t offset, const bool append_flag, - off64_t& ret_size) { + int update_metadentry_size(const string& path, const size_t size, const off64_t offset, const bool append_flag, + off64_t& ret_size) { - auto endp = CTX->hosts().at( - CTX->distributor()->locate_file_metadata(path)); + auto endp = CTX->hosts().at( + CTX->distributor()->locate_file_metadata(path)); - try { + try { - LOG(DEBUG, "Sending RPC ..."); - // TODO(amiranda): add a post() with RPC_TIMEOUT to hermes so that we can - // retry for RPC_TRIES (see old commits with margo) - // TODO(amiranda): hermes will eventually provide a post(endpoint) - // returning one result and a broadcast(endpoint_set) returning a - // result_set. When that happens we can remove the .at(0) :/ - auto out = - ld_network_service->post( - endp, path, size, offset, - bool_to_merc_bool(append_flag)).get().at(0); + LOG(DEBUG, "Sending RPC ..."); + // TODO(amiranda): add a post() with RPC_TIMEOUT to hermes so that we can + // retry for RPC_TRIES (see old commits with margo) + // TODO(amiranda): hermes will eventually provide a post(endpoint) + // returning one result and a broadcast(endpoint_set) returning a + // result_set. When that happens we can remove the .at(0) :/ + auto out = + ld_network_service->post( + endp, path, size, offset, + bool_to_merc_bool(append_flag)).get().at(0); - LOG(DEBUG, "Got response success: {}", out.err()); + LOG(DEBUG, "Got response success: {}", out.err()); - if(out.err() != 0) { - errno = out.err(); - return -1; - } + if (out.err() != 0) { + errno = out.err(); + return -1; + } - ret_size = out.ret_size(); - return out.err(); + ret_size = out.ret_size(); + return out.err(); - return 0; + return 0; - } catch(const std::exception& ex) { - LOG(ERROR, "while getting rpc output"); - errno = EBUSY; - ret_size = 0; - return EUNKNOWN; + } catch (const std::exception& ex) { + LOG(ERROR, "while getting rpc output"); + errno = EBUSY; + ret_size = 0; + return EUNKNOWN; + } } -} -int get_metadentry_size(const std::string& path, off64_t& ret_size) { + int get_metadentry_size(const std::string& path, off64_t& ret_size) { - auto endp = CTX->hosts().at( - CTX->distributor()->locate_file_metadata(path)); + auto endp = CTX->hosts().at( + CTX->distributor()->locate_file_metadata(path)); - try { + try { - LOG(DEBUG, "Sending RPC ..."); - // TODO(amiranda): add a post() with RPC_TIMEOUT to hermes so that we can - // retry for RPC_TRIES (see old commits with margo) - // TODO(amiranda): hermes will eventually provide a post(endpoint) - // returning one result and a broadcast(endpoint_set) returning a - // result_set. When that happens we can remove the .at(0) :/ - auto out = - ld_network_service->post( - endp, path).get().at(0); + LOG(DEBUG, "Sending RPC ..."); + // TODO(amiranda): add a post() with RPC_TIMEOUT to hermes so that we can + // retry for RPC_TRIES (see old commits with margo) + // TODO(amiranda): hermes will eventually provide a post(endpoint) + // returning one result and a broadcast(endpoint_set) returning a + // result_set. When that happens we can remove the .at(0) :/ + auto out = + ld_network_service->post( + endp, path).get().at(0); - LOG(DEBUG, "Got response success: {}", out.err()); + LOG(DEBUG, "Got response success: {}", out.err()); - ret_size = out.ret_size(); - return out.err(); + ret_size = out.ret_size(); + return out.err(); - } catch(const std::exception& ex) { - LOG(ERROR, "while getting rpc output"); - errno = EBUSY; - ret_size = 0; - return EUNKNOWN; + } catch (const std::exception& ex) { + LOG(ERROR, "while getting rpc output"); + errno = EBUSY; + ret_size = 0; + return EUNKNOWN; + } } -} /** * Sends an RPC request to a specific node to push all chunks that belong to him */ -void get_dirents(OpenDir& open_dir){ + void get_dirents(OpenDir& open_dir) { - auto const root_dir = open_dir.path(); - auto const targets = - CTX->distributor()->locate_directory_metadata(root_dir); + auto const root_dir = open_dir.path(); + auto const targets = + CTX->distributor()->locate_directory_metadata(root_dir); /* preallocate receiving buffer. The actual size is not known yet. * @@ -399,129 +398,129 @@ void get_dirents(OpenDir& open_dir){ exposed_buffers.emplace_back( ld_network_service->expose( std::vector{ - hermes::mutable_buffer{ - large_buffer.get() + (i * per_host_buff_size), - per_host_buff_size - } - }, - hermes::access_mode::write_only)); - } catch (const std::exception& ex) { - throw std::runtime_error("Failed to expose buffers for RMA"); + hermes::mutable_buffer{ + large_buffer.get() + (i * per_host_buff_size), + per_host_buff_size + } + }, + hermes::access_mode::write_only)); + } catch (const std::exception& ex) { + throw std::runtime_error("Failed to expose buffers for RMA"); + } } - } - // send RPCs - std::vector> handles; + // send RPCs + std::vector> handles; - for(std::size_t i = 0; i < targets.size(); ++i) { + for (std::size_t i = 0; i < targets.size(); ++i) { - LOG(DEBUG, "target_host: {}", targets[i]); + LOG(DEBUG, "target_host: {}", targets[i]); - // Setup rpc input parameters for each host - auto endp = CTX->hosts().at(targets[i]); + // Setup rpc input parameters for each host + auto endp = CTX->hosts().at(targets[i]); - gkfs::rpc::get_dirents::input in(root_dir, exposed_buffers[i]); + gkfs::rpc::get_dirents::input in(root_dir, exposed_buffers[i]); - try { + try { - LOG(DEBUG, "Sending RPC to host: {}", targets[i]); - handles.emplace_back( - ld_network_service->post(endp, in)); - } catch(const std::exception& ex) { - LOG(ERROR, "Unable to send non-blocking get_dirents() " - "on {} [peer: {}]", root_dir, targets[i]); - throw std::runtime_error("Failed to post non-blocking RPC request"); + LOG(DEBUG, "Sending RPC to host: {}", targets[i]); + handles.emplace_back( + ld_network_service->post(endp, in)); + } catch (const std::exception& ex) { + LOG(ERROR, "Unable to send non-blocking get_dirents() " + "on {} [peer: {}]", root_dir, targets[i]); + throw std::runtime_error("Failed to post non-blocking RPC request"); + } } - } - // wait for RPC responses - for(std::size_t i = 0; i < handles.size(); ++i) { + // wait for RPC responses + for (std::size_t i = 0; i < handles.size(); ++i) { - gkfs::rpc::get_dirents::output out; + gkfs::rpc::get_dirents::output out; - try { - // XXX We might need a timeout here to not wait forever for an - // output that never comes? - out = handles[i].get().at(0); - - if(out.err() != 0) { + try { + // XXX We might need a timeout here to not wait forever for an + // output that never comes? + out = handles[i].get().at(0); + + if (out.err() != 0) { + throw std::runtime_error( + fmt::format("Failed to retrieve dir entries from " + "host '{}'. Error '{}', path '{}'", + targets[i], strerror(out.err()), root_dir)); + } + } catch (const std::exception& ex) { throw std::runtime_error( - fmt::format("Failed to retrieve dir entries from " - "host '{}'. Error '{}', path '{}'", - targets[i], strerror(out.err()), root_dir)); + fmt::format("Failed to get rpc output.. [path: {}, " + "target host: {}]", root_dir, targets[i])); } - } catch(const std::exception& ex) { - throw std::runtime_error( - fmt::format("Failed to get rpc output.. [path: {}, " - "target host: {}]", root_dir, targets[i])); - } - // each server wrote information to its pre-defined region in - // large_buffer, recover it by computing the base_address for each - // particular server and adding the appropriate offsets - assert(exposed_buffers[i].count() == 1); - void* base_ptr = exposed_buffers[i].begin()->data(); + // each server wrote information to its pre-defined region in + // large_buffer, recover it by computing the base_address for each + // particular server and adding the appropriate offsets + assert(exposed_buffers[i].count() == 1); + void* base_ptr = exposed_buffers[i].begin()->data(); - bool* bool_ptr = reinterpret_cast(base_ptr); - char* names_ptr = reinterpret_cast(base_ptr) + - (out.dirents_size() * sizeof(bool)); + bool* bool_ptr = reinterpret_cast(base_ptr); + char* names_ptr = reinterpret_cast(base_ptr) + + (out.dirents_size() * sizeof(bool)); - for(std::size_t j = 0; j < out.dirents_size(); j++) { + for (std::size_t j = 0; j < out.dirents_size(); j++) { - FileType ftype = (*bool_ptr) ? - FileType::directory : - FileType::regular; - bool_ptr++; + FileType ftype = (*bool_ptr) ? + FileType::directory : + FileType::regular; + bool_ptr++; - // Check that we are not outside the recv_buff for this specific host - assert((names_ptr - reinterpret_cast(base_ptr)) > 0); - assert( - static_cast( - names_ptr - reinterpret_cast(base_ptr)) < - per_host_buff_size); + // Check that we are not outside the recv_buff for this specific host + assert((names_ptr - reinterpret_cast(base_ptr)) > 0); + assert( + static_cast( + names_ptr - reinterpret_cast(base_ptr)) < + per_host_buff_size); - auto name = std::string(names_ptr); - names_ptr += name.size() + 1; + auto name = std::string(names_ptr); + names_ptr += name.size() + 1; - open_dir.add(name, ftype); + open_dir.add(name, ftype); + } } } -} #ifdef HAS_SYMLINKS -int mk_symlink(const std::string& path, const std::string& target_path) { + int mk_symlink(const std::string& path, const std::string& target_path) { - auto endp = CTX->hosts().at( - CTX->distributor()->locate_file_metadata(path)); + auto endp = CTX->hosts().at( + CTX->distributor()->locate_file_metadata(path)); - try { + try { - LOG(DEBUG, "Sending RPC ..."); - // TODO(amiranda): add a post() with RPC_TIMEOUT to hermes so that we can - // retry for RPC_TRIES (see old commits with margo) - // TODO(amiranda): hermes will eventually provide a post(endpoint) - // returning one result and a broadcast(endpoint_set) returning a - // result_set. When that happens we can remove the .at(0) :/ - auto out = - ld_network_service->post( - endp, path, target_path).get().at(0); + LOG(DEBUG, "Sending RPC ..."); + // TODO(amiranda): add a post() with RPC_TIMEOUT to hermes so that we can + // retry for RPC_TRIES (see old commits with margo) + // TODO(amiranda): hermes will eventually provide a post(endpoint) + // returning one result and a broadcast(endpoint_set) returning a + // result_set. When that happens we can remove the .at(0) :/ + auto out = + ld_network_service->post( + endp, path, target_path).get().at(0); - LOG(DEBUG, "Got response success: {}", out.err()); + LOG(DEBUG, "Got response success: {}", out.err()); - if(out.err() != 0) { - errno = out.err(); - return -1; - } + if (out.err() != 0) { + errno = out.err(); + return -1; + } - return 0; + return 0; - } catch(const std::exception& ex) { - LOG(ERROR, "while getting rpc output"); - errno = EBUSY; - return -1; + } catch (const std::exception& ex) { + LOG(ERROR, "while getting rpc output"); + errno = EBUSY; + return -1; + } } -} #endif diff --git a/src/daemon/backend/data/chunk_storage.cpp b/src/daemon/backend/data/chunk_storage.cpp index 65b94e11c..94000b621 100644 --- a/src/daemon/backend/data/chunk_storage.cpp +++ b/src/daemon/backend/data/chunk_storage.cpp @@ -14,22 +14,25 @@ #include #include -#include #include #include +#include + +extern "C" { #include +} namespace bfs = boost::filesystem; +using namespace std; -std::string ChunkStorage::absolute(const std::string& internal_path) const { +string ChunkStorage::absolute(const string& internal_path) const { assert(is_relative_path(internal_path)); return root_path + '/' + internal_path; } -ChunkStorage::ChunkStorage(const std::string& path, const size_t chunksize) : - root_path(path), - chunksize(chunksize) -{ +ChunkStorage::ChunkStorage(const string& path, const size_t chunksize) : + root_path(path), + chunksize(chunksize) { //TODO check path: absolute, exists, permission to write etc... assert(is_absolute_path(root_path)); @@ -40,32 +43,32 @@ ChunkStorage::ChunkStorage(const std::string& path, const size_t chunksize) : log->debug("Chunk storage initialized with path: '{}'", root_path); } -std::string ChunkStorage::get_chunks_dir(const std::string& file_path) { +string ChunkStorage::get_chunks_dir(const string& file_path) { assert(is_absolute_path(file_path)); - std::string chunk_dir = file_path.substr(1); - std::replace(chunk_dir.begin(), chunk_dir.end(), '/', ':'); + string chunk_dir = file_path.substr(1); + ::replace(chunk_dir.begin(), chunk_dir.end(), '/', ':'); return chunk_dir; } -std::string ChunkStorage::get_chunk_path(const std::string& file_path, unsigned int chunk_id) { - return get_chunks_dir(file_path) + '/' + std::to_string(chunk_id); +string ChunkStorage::get_chunk_path(const string& file_path, unsigned int chunk_id) { + return get_chunks_dir(file_path) + '/' + ::to_string(chunk_id); } -void ChunkStorage::destroy_chunk_space(const std::string& file_path) const { +void ChunkStorage::destroy_chunk_space(const string& file_path) const { auto chunk_dir = absolute(get_chunks_dir(file_path)); try { bfs::remove_all(chunk_dir); - } catch (const bfs::filesystem_error& e){ - log->error("Failed to remove chunk directory. Path: '{}', Error: {}", chunk_dir, e.what()); + } catch (const bfs::filesystem_error& e) { + log->error("Failed to remove chunk directory. Path: '{}', Error: '{}'", chunk_dir, e.what()); } } -void ChunkStorage::init_chunk_space(const std::string& file_path) const { +void ChunkStorage::init_chunk_space(const string& file_path) const { auto chunk_dir = absolute(get_chunks_dir(file_path)); auto err = mkdir(chunk_dir.c_str(), 0750); - if(err == -1 && errno != EEXIST){ - log->error("Failed to create chunk dir. Path: {}, Error: {}", chunk_dir, std::strerror(errno)); - throw std::system_error(errno, std::system_category(), "Failed to create chunk directory"); + if (err == -1 && errno != EEXIST) { + log->error("Failed to create chunk dir. Path: '{}', Error: '{}'", chunk_dir, ::strerror(errno)); + throw ::system_error(errno, ::system_category(), "Failed to create chunk directory"); } } @@ -73,46 +76,47 @@ void ChunkStorage::init_chunk_space(const std::string& file_path) const { * * This is pretty slow method because it cycle over all the chunks sapce for this file. */ -void ChunkStorage::trim_chunk_space(const std::string& file_path, - unsigned int chunk_start, unsigned int chunk_end) { +void ChunkStorage::trim_chunk_space(const string& file_path, + unsigned int chunk_start, unsigned int chunk_end) { auto chunk_dir = absolute(get_chunks_dir(file_path)); const bfs::directory_iterator end; for (bfs::directory_iterator chunk_file(chunk_dir); chunk_file != end; ++chunk_file) { auto chunk_path = chunk_file->path(); - auto chunk_id = std::stoul(chunk_path.filename().c_str()); - if(chunk_id >= chunk_start && chunk_id <= chunk_end) { + auto chunk_id = ::stoul(chunk_path.filename().c_str()); + if (chunk_id >= chunk_start && chunk_id <= chunk_end) { int ret = unlink(chunk_path.c_str()); - if(ret == -1) { - log->error("Failed to remove chunk file. File: {}, Error: {}", chunk_path.native(), std::strerror(errno)); - throw std::system_error(errno, std::system_category(), "Failed to remove chunk file"); + if (ret == -1) { + log->error("Failed to remove chunk file. File: '{}', Error: '{}'", chunk_path.native(), + ::strerror(errno)); + throw ::system_error(errno, ::system_category(), "Failed to remove chunk file"); } } } } -void ChunkStorage::delete_chunk(const std::string& file_path, unsigned int chunk_id) { +void ChunkStorage::delete_chunk(const string& file_path, unsigned int chunk_id) { auto chunk_path = absolute(get_chunk_path(file_path, chunk_id)); int ret = unlink(chunk_path.c_str()); - if(ret == -1) { - log->error("Failed to remove chunk file. File: {}, Error: {}", chunk_path, std::strerror(errno)); - throw std::system_error(errno, std::system_category(), "Failed to remove chunk file"); + if (ret == -1) { + log->error("Failed to remove chunk file. File: '{}', Error: '{}'", chunk_path, ::strerror(errno)); + throw ::system_error(errno, ::system_category(), "Failed to remove chunk file"); } } -void ChunkStorage::truncate_chunk(const std::string& file_path, unsigned int chunk_id, off_t length) { +void ChunkStorage::truncate_chunk(const string& file_path, unsigned int chunk_id, off_t length) { auto chunk_path = absolute(get_chunk_path(file_path, chunk_id)); - assert(length > 0 && (unsigned int)length <= chunksize); + assert(length > 0 && (unsigned int) length <= chunksize); int ret = truncate(chunk_path.c_str(), length); - if(ret == -1) { - log->error("Failed to truncate chunk file. File: {}, Error: {}", chunk_path, std::strerror(errno)); - throw std::system_error(errno, std::system_category(), "Failed to truncate chunk file"); + if (ret == -1) { + log->error("Failed to truncate chunk file. File: '{}', Error: '{}'", chunk_path, ::strerror(errno)); + throw ::system_error(errno, ::system_category(), "Failed to truncate chunk file"); } } -void ChunkStorage::write_chunk(const std::string& file_path, unsigned int chunk_id, - const char * buff, size_t size, off64_t offset, ABT_eventual& eventual) const { +void ChunkStorage::write_chunk(const string& file_path, unsigned int chunk_id, + const char* buff, size_t size, off64_t offset, ABT_eventual& eventual) const { assert((offset + size) <= chunksize); @@ -120,36 +124,36 @@ void ChunkStorage::write_chunk(const std::string& file_path, unsigned int chunk_ auto chunk_path = absolute(get_chunk_path(file_path, chunk_id)); int fd = open(chunk_path.c_str(), O_WRONLY | O_CREAT, 0640); - if(fd < 0) { - log->error("Failed to open chunk file for write. File: {}, Error: {}", chunk_path, std::strerror(errno)); - throw std::system_error(errno, std::system_category(), "Failed to open chunk file for write"); + if (fd < 0) { + log->error("Failed to open chunk file for write. File: '{}', Error: '{}'", chunk_path, ::strerror(errno)); + throw ::system_error(errno, ::system_category(), "Failed to open chunk file for write"); } auto wrote = pwrite(fd, buff, size, offset); if (wrote < 0) { - log->error("Failed to write chunk file. File: {}, size: {}, offset: {}, Error: {}", - chunk_path, size, offset, std::strerror(errno)); - throw std::system_error(errno, std::system_category(), "Failed to write chunk file"); + log->error("Failed to write chunk file. File: '{}', size: '{}', offset: '{}', Error: '{}'", + chunk_path, size, offset, ::strerror(errno)); + throw ::system_error(errno, ::system_category(), "Failed to write chunk file"); } ABT_eventual_set(eventual, &wrote, sizeof(size_t)); auto err = close(fd); if (err < 0) { - log->error("Failed to close chunk file after write. File: {}, Error: {}", - chunk_path, std::strerror(errno)); - //throw std::system_error(errno, std::system_category(), "Failed to close chunk file"); + log->error("Failed to close chunk file after write. File: '{}', Error: '{}'", + chunk_path, ::strerror(errno)); + //throw ::system_error(errno, ::system_category(), "Failed to close chunk file"); } } -void ChunkStorage::read_chunk(const std::string& file_path, unsigned int chunk_id, - char * buff, size_t size, off64_t offset, ABT_eventual& eventual) const { +void ChunkStorage::read_chunk(const string& file_path, unsigned int chunk_id, + char* buff, size_t size, off64_t offset, ABT_eventual& eventual) const { assert((offset + size) <= chunksize); auto chunk_path = absolute(get_chunk_path(file_path, chunk_id)); int fd = open(chunk_path.c_str(), O_RDONLY); - if(fd < 0) { - log->error("Failed to open chunk file for read. File: {}, Error: {}", chunk_path, std::strerror(errno)); - throw std::system_error(errno, std::system_category(), "Failed to open chunk file for read"); + if (fd < 0) { + log->error("Failed to open chunk file for read. File: '{}', Error: '{}'", chunk_path, ::strerror(errno)); + throw ::system_error(errno, ::system_category(), "Failed to open chunk file for read"); } size_t tot_read = 0; ssize_t read = 0; @@ -159,19 +163,19 @@ void ChunkStorage::read_chunk(const std::string& file_path, unsigned int chunk_i buff + tot_read, size - tot_read, offset + tot_read); - if(read == 0) { + if (read == 0) { break; } if (read < 0) { - log->error("Failed to read chunk file. File: {}, size: {}, offset: {}, Error: {}", - chunk_path, size, offset, std::strerror(errno)); - throw std::system_error(errno, std::system_category(), "Failed to read chunk file"); + log->error("Failed to read chunk file. File: '{}', size: '{}', offset: '{}', Error: '{}'", + chunk_path, size, offset, ::strerror(errno)); + throw ::system_error(errno, ::system_category(), "Failed to read chunk file"); } #ifndef NDEBUG - if(tot_read + read < size) { - log->warn("Read less bytes than requested: {}/{}. Total read was {}", read, size - tot_read, size); + if (tot_read + read < size) { + log->warn("Read less bytes than requested: '{}'/{}. Total read was '{}'", read, size - tot_read, size); } #endif assert(read > 0); @@ -184,29 +188,29 @@ void ChunkStorage::read_chunk(const std::string& file_path, unsigned int chunk_i auto err = close(fd); if (err < 0) { - log->error("Failed to close chunk file after read. File: {}, Error: {}", - chunk_path, std::strerror(errno)); - //throw std::system_error(errno, std::system_category(), "Failed to close chunk file"); + log->error("Failed to close chunk file after read. File: '{}', Error: '{}'", + chunk_path, ::strerror(errno)); + //throw ::system_error(errno, ::system_category(), "Failed to close chunk file"); } } ChunkStat ChunkStorage::chunk_stat() const { struct statfs sfs{}; - if(statfs(root_path.c_str(), &sfs) != 0) { + if (statfs(root_path.c_str(), &sfs) != 0) { log->error("Failed to get filesystem statistic for chunk directory." - " Error: {}", std::strerror(errno)); - throw std::system_error(errno, std::system_category(), - "statfs() failed on chunk directory"); + " Error: '{}'", ::strerror(errno)); + throw ::system_error(errno, ::system_category(), + "statfs() failed on chunk directory"); } - log->debug("Chunksize {}, total {}, free {}", sfs.f_bsize, sfs.f_blocks, sfs.f_bavail); + log->debug("Chunksize '{}', total '{}', free '{}'", sfs.f_bsize, sfs.f_blocks, sfs.f_bavail); auto bytes_total = - static_cast(sfs.f_bsize) * - static_cast(sfs.f_blocks); + static_cast(sfs.f_bsize) * + static_cast(sfs.f_blocks); auto bytes_free = - static_cast(sfs.f_bsize) * - static_cast(sfs.f_bavail); + static_cast(sfs.f_bsize) * + static_cast(sfs.f_bavail); return {chunksize, - bytes_total / chunksize, - bytes_free / chunksize}; + bytes_total / chunksize, + bytes_free / chunksize}; } \ No newline at end of file diff --git a/src/daemon/backend/metadata/db.cpp b/src/daemon/backend/metadata/db.cpp index 1ed8d59cd..a280be8cc 100644 --- a/src/daemon/backend/metadata/db.cpp +++ b/src/daemon/backend/metadata/db.cpp @@ -18,10 +18,12 @@ #include #include +extern "C" { #include +} -MetadataDB::MetadataDB(const std::string& path): path(path) { +MetadataDB::MetadataDB(const std::string& path) : path(path) { // Optimize RocksDB. This is the easiest way to get RocksDB to perform well options.IncreaseParallelism(); options.OptimizeLevelStyleCompaction(); @@ -30,7 +32,7 @@ MetadataDB::MetadataDB(const std::string& path): path(path) { options.merge_operator.reset(new MetadataMergeOperator); MetadataDB::optimize_rocksdb_options(options); write_opts.disableWAL = !(gkfs_config::rocksdb::use_write_ahead_log); - rdb::DB * rdb_ptr; + rdb::DB* rdb_ptr; auto s = rocksdb::DB::Open(options, path, &rdb_ptr); if (!s.ok()) { throw std::runtime_error("Failed to open RocksDB: " + s.ToString()); @@ -38,10 +40,10 @@ MetadataDB::MetadataDB(const std::string& path): path(path) { this->db.reset(rdb_ptr); } -void MetadataDB::throw_rdb_status_excpt(const rdb::Status& s){ +void MetadataDB::throw_rdb_status_excpt(const rdb::Status& s) { assert(!s.ok()); - if(s.IsNotFound()){ + if (s.IsNotFound()) { throw NotFoundException(s.ToString()); } else { throw DBException(s.ToString()); @@ -51,7 +53,7 @@ void MetadataDB::throw_rdb_status_excpt(const rdb::Status& s){ std::string MetadataDB::get(const std::string& key) const { std::string val; auto s = db->Get(rdb::ReadOptions(), key, &val); - if(!s.ok()){ + if (!s.ok()) { MetadataDB::throw_rdb_status_excpt(s); } return val; @@ -63,14 +65,14 @@ void MetadataDB::put(const std::string& key, const std::string& val) { auto cop = CreateOperand(val); auto s = db->Merge(write_opts, key, cop.serialize()); - if(!s.ok()){ + if (!s.ok()) { MetadataDB::throw_rdb_status_excpt(s); } } void MetadataDB::remove(const std::string& key) { auto s = db->Delete(write_opts, key); - if(!s.ok()){ + if (!s.ok()) { MetadataDB::throw_rdb_status_excpt(s); } } @@ -78,8 +80,8 @@ void MetadataDB::remove(const std::string& key) { bool MetadataDB::exists(const std::string& key) { std::string val; auto s = db->Get(rdb::ReadOptions(), key, &val); - if(!s.ok()){ - if(s.IsNotFound()){ + if (!s.ok()) { + if (s.IsNotFound()) { return false; } else { MetadataDB::throw_rdb_status_excpt(s); @@ -101,15 +103,15 @@ void MetadataDB::update(const std::string& old_key, const std::string& new_key, batch.Delete(old_key); batch.Put(new_key, val); auto s = db->Write(write_opts, &batch); - if(!s.ok()){ + if (!s.ok()) { MetadataDB::throw_rdb_status_excpt(s); } } -void MetadataDB::increase_size(const std::string& key, size_t size, bool append){ +void MetadataDB::increase_size(const std::string& key, size_t size, bool append) { auto uop = IncreaseSizeOperand(size, append); auto s = db->Merge(write_opts, key, uop.serialize()); - if(!s.ok()){ + if (!s.ok()) { MetadataDB::throw_rdb_status_excpt(s); } } @@ -117,7 +119,7 @@ void MetadataDB::increase_size(const std::string& key, size_t size, bool append) void MetadataDB::decrease_size(const std::string& key, size_t size) { auto uop = DecreaseSizeOperand(size); auto s = db->Merge(write_opts, key, uop.serialize()); - if(!s.ok()){ + if (!s.ok()) { MetadataDB::throw_rdb_status_excpt(s); } } @@ -133,7 +135,7 @@ std::vector> MetadataDB::get_dirents(const std::str auto root_path = dir; assert(is_absolute_path(root_path)); //add trailing slash if missing - if(!has_trailing_slash(root_path) && root_path.size() != 1) { + if (!has_trailing_slash(root_path) && root_path.size() != 1) { //add trailing slash only if missing and is not the root_folder "/" root_path.push_back('/'); } @@ -143,19 +145,19 @@ std::vector> MetadataDB::get_dirents(const std::str std::vector> entries; - for(it->Seek(root_path); - it->Valid() && - it->key().starts_with(root_path); - it->Next()){ + for (it->Seek(root_path); + it->Valid() && + it->key().starts_with(root_path); + it->Next()) { - if(it->key().size() == root_path.size()) { + if (it->key().size() == root_path.size()) { //we skip this path cause it is exactly the root_path continue; } /***** Get File name *****/ auto name = it->key().ToString(); - if(name.find_first_of('/', root_path.size()) != std::string::npos){ + if (name.find_first_of('/', root_path.size()) != std::string::npos) { //skip stuff deeper then one level depth continue; } @@ -163,12 +165,12 @@ std::vector> MetadataDB::get_dirents(const std::str name = name.substr(root_path.size()); //relative path of directory entries must not be empty - assert(name.size() > 0); + assert(!name.empty()); Metadata md(it->value().ToString()); auto is_dir = S_ISDIR(md.mode()); - entries.push_back(std::make_pair(std::move(name), std::move(is_dir))); + entries.emplace_back(std::move(name), is_dir); } assert(it->status().ok()); return entries; diff --git a/src/daemon/backend/metadata/merge.cpp b/src/daemon/backend/metadata/merge.cpp index 9ef371124..861104cdb 100644 --- a/src/daemon/backend/metadata/merge.cpp +++ b/src/daemon/backend/metadata/merge.cpp @@ -13,46 +13,47 @@ #include +using namespace std; -std::string MergeOperand::serialize_id() const { - std::string s; +string MergeOperand::serialize_id() const { + string s; s.reserve(2); - s += (char)id(); + s += (char) id(); // TODO check if static_cast can be used s += operand_id_suffix; return s; } -std::string MergeOperand::serialize() const { - std::string s = serialize_id(); +string MergeOperand::serialize() const { + string s = serialize_id(); s += serialize_params(); return s; } -OperandID MergeOperand::get_id(const rdb::Slice& serialized_op){ - return static_cast(serialized_op[0]); +OperandID MergeOperand::get_id(const rdb::Slice& serialized_op) { + return static_cast(serialized_op[0]); } -rdb::Slice MergeOperand::get_params(const rdb::Slice& serialized_op){ +rdb::Slice MergeOperand::get_params(const rdb::Slice& serialized_op) { assert(serialized_op[1] == operand_id_suffix); return {serialized_op.data() + 2, serialized_op.size() - 2}; } -IncreaseSizeOperand::IncreaseSizeOperand(const size_t size, const bool append): - size(size), append(append) {} +IncreaseSizeOperand::IncreaseSizeOperand(const size_t size, const bool append) : + size(size), append(append) {} -IncreaseSizeOperand::IncreaseSizeOperand(const rdb::Slice& serialized_op){ +IncreaseSizeOperand::IncreaseSizeOperand(const rdb::Slice& serialized_op) { size_t chrs_parsed = 0; size_t read = 0; //Parse size - size = std::stoul(serialized_op.data() + chrs_parsed, &read); + size = ::stoul(serialized_op.data() + chrs_parsed, &read); chrs_parsed += read + 1; assert(serialized_op[chrs_parsed - 1] == separator); //Parse append flag assert(serialized_op[chrs_parsed] == false_char || serialized_op[chrs_parsed] == true_char); - append = (serialized_op[chrs_parsed] == false_char) ? false : true; + append = serialized_op[chrs_parsed] != false_char; //check that we consumed all the input string assert(chrs_parsed + 1 == serialized_op.size()); } @@ -61,25 +62,25 @@ OperandID IncreaseSizeOperand::id() const { return OperandID::increase_size; } -std::string IncreaseSizeOperand::serialize_params() const { - std::string s; +string IncreaseSizeOperand::serialize_params() const { + string s; s.reserve(3); - s += std::to_string(size); + s += ::to_string(size); s += this->separator; - s += (append == false)? false_char : true_char; + s += !append ? false_char : true_char; return s; } DecreaseSizeOperand::DecreaseSizeOperand(const size_t size) : - size(size) {} + size(size) {} -DecreaseSizeOperand::DecreaseSizeOperand(const rdb::Slice& serialized_op){ +DecreaseSizeOperand::DecreaseSizeOperand(const rdb::Slice& serialized_op) { //Parse size size_t read = 0; //we need to convert serialized_op to a string because it doesn't contain the //leading slash needed by stoul - size = std::stoul(serialized_op.ToString(), &read); + size = ::stoul(serialized_op.ToString(), &read); //check that we consumed all the input string assert(read == serialized_op.size()); } @@ -88,18 +89,18 @@ OperandID DecreaseSizeOperand::id() const { return OperandID::decrease_size; } -std::string DecreaseSizeOperand::serialize_params() const { - return std::to_string(size); +string DecreaseSizeOperand::serialize_params() const { + return ::to_string(size); } -CreateOperand::CreateOperand(const std::string& metadata): metadata(metadata) {} +CreateOperand::CreateOperand(const string& metadata) : metadata(metadata) {} -OperandID CreateOperand::id() const{ +OperandID CreateOperand::id() const { return OperandID::create; } -std::string CreateOperand::serialize_params() const { +string CreateOperand::serialize_params() const { return metadata; } @@ -108,13 +109,13 @@ bool MetadataMergeOperator::FullMergeV2( const MergeOperationInput& merge_in, MergeOperationOutput* merge_out) const { - std::string prev_md_value; + string prev_md_value; auto ops_it = merge_in.operand_list.cbegin(); - if(merge_in.existing_value == nullptr){ + if (merge_in.existing_value == nullptr) { //The key to operate on doesn't exists in DB - if(MergeOperand::get_id(ops_it[0]) != OperandID::create){ - throw std::runtime_error("Merge operation failed: key do not exists and first operand is not a creation"); + if (MergeOperand::get_id(ops_it[0]) != OperandID::create) { + throw ::runtime_error("Merge operation failed: key do not exists and first operand is not a creation"); // TODO use logger to print err info; //Log(logger, "Key %s do not exists", existing_value->ToString().c_str()); //return false; @@ -129,28 +130,28 @@ bool MetadataMergeOperator::FullMergeV2( size_t fsize = md.size(); - for (; ops_it != merge_in.operand_list.cend(); ++ops_it){ + for (; ops_it != merge_in.operand_list.cend(); ++ops_it) { const rdb::Slice& serialized_op = *ops_it; assert(serialized_op.size() >= 2); auto operand_id = MergeOperand::get_id(serialized_op); auto parameters = MergeOperand::get_params(serialized_op); - if(operand_id == OperandID::increase_size){ + if (operand_id == OperandID::increase_size) { auto op = IncreaseSizeOperand(parameters); - if(op.append){ + if (op.append) { //append mode, just increment file fsize += op.size; } else { - fsize = std::max(op.size, fsize); + fsize = ::max(op.size, fsize); } - } else if(operand_id == OperandID::decrease_size) { + } else if (operand_id == OperandID::decrease_size) { auto op = DecreaseSizeOperand(parameters); assert(op.size < fsize); // we assume no concurrency here fsize = op.size; - } else if(operand_id == OperandID::create){ + } else if (operand_id == OperandID::create) { continue; } else { - throw std::runtime_error("Unrecognized merge operand ID: " + (char)operand_id); + throw ::runtime_error("Unrecognized merge operand ID: " + (char) operand_id); } } @@ -160,8 +161,8 @@ bool MetadataMergeOperator::FullMergeV2( } bool MetadataMergeOperator::PartialMergeMulti(const rdb::Slice& key, - const std::deque& operand_list, - std::string* new_value, rdb::Logger* logger) const { + const ::deque& operand_list, + string* new_value, rdb::Logger* logger) const { return false; } @@ -170,5 +171,5 @@ const char* MetadataMergeOperator::Name() const { } bool MetadataMergeOperator::AllowSingleOperand() const { - return true; + return true; } diff --git a/src/daemon/classes/fs_data.cpp b/src/daemon/classes/fs_data.cpp index 05e711c29..f34bf2b58 100644 --- a/src/daemon/classes/fs_data.cpp +++ b/src/daemon/classes/fs_data.cpp @@ -10,45 +10,19 @@ SPDX-License-Identifier: MIT */ - -#include - #include #include +#include // getter/setter -const std::unordered_map& FsData::hashmap() const { - return hashmap_; -} - -void FsData::hashmap(const std::unordered_map& hashmap_) { - FsData::hashmap_ = hashmap_; -} - -const std::hash& FsData::hashf() const { - return hashf_; -} - -void FsData::hashf(const std::hash& hashf_) { - FsData::hashf_ = hashf_; -} - -blksize_t FsData::blocksize() const { - return blocksize_; -} - -void FsData::blocksize(blksize_t blocksize_) { - FsData::blocksize_ = blocksize_; -} - const std::shared_ptr& FsData::spdlogger() const { return spdlogger_; } -void FsData::spdlogger(const std::shared_ptr& spdlogger_) { - FsData::spdlogger_ = spdlogger_; +void FsData::spdlogger(const std::shared_ptr& spdlogger) { + FsData::spdlogger_ = spdlogger; } const std::shared_ptr& FsData::mdb() const { @@ -75,8 +49,8 @@ const std::string& FsData::rootdir() const { return rootdir_; } -void FsData::rootdir(const std::string& rootdir_) { - FsData::rootdir_ = rootdir_; +void FsData::rootdir(const std::string& rootdir) { + FsData::rootdir_ = rootdir; } const std::string& FsData::mountdir() const { diff --git a/src/daemon/handler/h_data.cpp b/src/daemon/handler/h_data.cpp index 7abea310d..8b76d99ff 100644 --- a/src/daemon/handler/h_data.cpp +++ b/src/daemon/handler/h_data.cpp @@ -12,13 +12,14 @@ */ -#include +#include #include +#include + +#include #include #include #include -#include -#include using namespace std; @@ -472,7 +473,7 @@ static hg_return_t rpc_srv_read_data(hg_handle_t handle) { break; } assert(task_read_size != nullptr); - if(*task_read_size < 0) { + if (*task_read_size < 0) { if (-(*task_read_size) == ENOENT) { continue; } @@ -482,13 +483,13 @@ static hg_return_t rpc_srv_read_data(hg_handle_t handle) { out.err = -(*task_read_size); break; } - - if(*task_read_size == 0) { + + if (*task_read_size == 0) { continue; } ret = margo_bulk_transfer(mid, HG_BULK_PUSH, hgi->addr, in.bulk_handle, origin_offsets[chnk_id_curr], - bulk_handle, local_offsets[chnk_id_curr], *task_read_size); + bulk_handle, local_offsets[chnk_id_curr], *task_read_size); if (ret != HG_SUCCESS) { GKFS_DATA->spdlogger()->error( "{}() Failed push chnkid {} on path {} to client. origin offset {} local offset {} chunk size {}", diff --git a/src/daemon/handler/h_metadentry.cpp b/src/daemon/handler/h_metadentry.cpp index a643b932c..7f7a3a3b8 100644 --- a/src/daemon/handler/h_metadentry.cpp +++ b/src/daemon/handler/h_metadentry.cpp @@ -12,13 +12,13 @@ */ -#include -#include #include #include - #include +#include +#include + using namespace std; static hg_return_t rpc_srv_mk_node(hg_handle_t handle) { @@ -317,7 +317,7 @@ static hg_return_t rpc_srv_get_dirents(hg_handle_t handle) { out.dirents_size = entries.size(); - if (entries.size() == 0) { + if (entries.empty()) { out.err = 0; return rpc_cleanup_respond(&handle, &in, &out, &bulk_handle); } @@ -325,12 +325,12 @@ static hg_return_t rpc_srv_get_dirents(hg_handle_t handle) { //Calculate total output size //TODO OPTIMIZATION: this can be calculated inside db_get_dirents size_t tot_names_size = 0; - for(auto const& e: entries){ + for (auto const& e: entries) { tot_names_size += e.first.size(); } - size_t out_size = tot_names_size + entries.size() * ( sizeof(bool) + sizeof(char) ); - if(bulk_size < out_size) { + size_t out_size = tot_names_size + entries.size() * (sizeof(bool) + sizeof(char)); + if (bulk_size < out_size) { //Source buffer is smaller than total output size GKFS_DATA->spdlogger()->error("{}() Entries do not fit source buffer", __func__); out.err = ENOBUFS; @@ -339,11 +339,11 @@ static hg_return_t rpc_srv_get_dirents(hg_handle_t handle) { //Serialize output data on local buffer auto out_buff = std::make_unique(out_size); - char * out_buff_ptr = out_buff.get(); + char* out_buff_ptr = out_buff.get(); auto bool_ptr = reinterpret_cast(out_buff_ptr); char* names_ptr = out_buff_ptr + entries.size(); - for(auto const& e: entries){ + for (auto const& e: entries) { *bool_ptr = e.second; bool_ptr++; @@ -352,7 +352,8 @@ static hg_return_t rpc_srv_get_dirents(hg_handle_t handle) { names_ptr += e.first.size() + 1; } - ret = margo_bulk_create(mid, 1, reinterpret_cast(&out_buff_ptr), &out_size, HG_BULK_READ_ONLY, &bulk_handle); + ret = margo_bulk_create(mid, 1, reinterpret_cast(&out_buff_ptr), &out_size, HG_BULK_READ_ONLY, + &bulk_handle); if (ret != HG_SUCCESS) { GKFS_DATA->spdlogger()->error("{}() Failed to create bulk handle", __func__); out.err = EBUSY; diff --git a/src/daemon/handler/h_preload.cpp b/src/daemon/handler/h_preload.cpp index e0b1f0a9e..b41066464 100644 --- a/src/daemon/handler/h_preload.cpp +++ b/src/daemon/handler/h_preload.cpp @@ -14,7 +14,8 @@ #include #include -#include "global/rpc/rpc_types.hpp" + +#include using namespace std; diff --git a/src/daemon/main.cpp b/src/daemon/main.cpp index 67d47bf4b..ee567293e 100644 --- a/src/daemon/main.cpp +++ b/src/daemon/main.cpp @@ -119,7 +119,7 @@ void destroy_enviroment() { if (!GKFS_DATA->hosts_file().empty()) { GKFS_DATA->spdlogger()->debug("{}() Removing hosts file", __func__); try { - destroy_hosts_file(); + gkfs::util::destroy_hosts_file(); } catch (const bfs::filesystem_error& e) { GKFS_DATA->spdlogger()->debug("{}() hosts file not found", __func__); } @@ -235,26 +235,6 @@ void register_server_rpcs(margo_instance_id mid) { MARGO_REGISTER(mid, gkfs::hg_tag::chunk_stat, rpc_chunk_stat_in_t, rpc_chunk_stat_out_t, rpc_srv_chunk_stat); } -void populate_hosts_file() { - const auto& hosts_file = GKFS_DATA->hosts_file(); - GKFS_DATA->spdlogger()->debug("{}() Populating hosts file: '{}'", __func__, hosts_file); - ofstream lfstream(hosts_file, ios::out | ios::app); - if (!lfstream) { - throw runtime_error( - fmt::format("Failed to open hosts file '{}': {}", hosts_file, strerror(errno))); - } - lfstream << fmt::format("{} {}", get_my_hostname(true), RPC_DATA->self_addr_str()) << std::endl; - if (!lfstream) { - throw runtime_error( - fmt::format("Failed to write on hosts file '{}': {}", hosts_file, strerror(errno))); - } - lfstream.close(); -} - -void destroy_hosts_file() { - std::remove(GKFS_DATA->hosts_file().c_str()); -} - void shutdown_handler(int dummy) { GKFS_DATA->spdlogger()->info("{}() Received signal: '{}'", __func__, strsignal(dummy)); shutdown_please.notify_all(); diff --git a/src/daemon/ops/metadentry.cpp b/src/daemon/ops/metadentry.cpp index 968b47aa5..3286f1671 100644 --- a/src/daemon/ops/metadentry.cpp +++ b/src/daemon/ops/metadentry.cpp @@ -88,6 +88,6 @@ void update_metadentry(const string& path, Metadata& md) { GKFS_DATA->mdb()->update(path, path, md.serialize()); } -std::vector> get_dirents(const std::string& dir){ +std::vector> get_dirents(const std::string& dir) { return GKFS_DATA->mdb()->get_dirents(dir); } \ No newline at end of file diff --git a/src/daemon/util.cpp b/src/daemon/util.cpp index c0e520493..c79fd97f5 100644 --- a/src/daemon/util.cpp +++ b/src/daemon/util.cpp @@ -12,6 +12,7 @@ */ #include #include + #include #include diff --git a/src/global/env_util.cpp b/src/global/env_util.cpp index 60078c6eb..14f3e91db 100644 --- a/src/global/env_util.cpp +++ b/src/global/env_util.cpp @@ -11,15 +11,16 @@ SPDX-License-Identifier: MIT */ +#include + #include #include -#include namespace gkfs { namespace env { std::string -get_var(const std::string& name, +get_var(const std::string& name, const std::string& default_value) { const char* const val = ::secure_getenv(name.c_str()); diff --git a/src/global/log_util.cpp b/src/global/log_util.cpp index 8105b8691..758e904ab 100644 --- a/src/global/log_util.cpp +++ b/src/global/log_util.cpp @@ -14,7 +14,6 @@ #include #include -#include #include #include #include @@ -48,7 +47,7 @@ spdlog::level::level_enum get_spdlog_level(string level_str) { } spdlog::level::level_enum get_spdlog_level(unsigned long level) { - switch(level) { + switch (level) { case 0: return spdlog::level::off; case 1: @@ -67,26 +66,26 @@ spdlog::level::level_enum get_spdlog_level(unsigned long level) { } void setup_loggers(const vector& loggers_name, - spdlog::level::level_enum level, const string& path) { + spdlog::level::level_enum level, const string& path) { - /* Create common sink */ - auto file_sink = make_shared(path); + /* Create common sink */ + auto file_sink = make_shared(path); - /* Create and configure loggers */ - auto loggers = list>(); - for(const auto& name: loggers_name){ - auto logger = make_shared(name, file_sink); - logger->flush_on(spdlog::level::trace); - loggers.push_back(logger); - } + /* Create and configure loggers */ + auto loggers = list>(); + for (const auto& name: loggers_name) { + auto logger = make_shared(name, file_sink); + logger->flush_on(spdlog::level::trace); + loggers.push_back(logger); + } - /* register loggers */ - for(const auto& logger: loggers){ - spdlog::register_logger(logger); - } + /* register loggers */ + for (const auto& logger: loggers) { + spdlog::register_logger(logger); + } - // set logger format - spdlog::set_pattern("[%C-%m-%d %H:%M:%S.%f] %P [%L][%n] %v"); + // set logger format + spdlog::set_pattern("[%C-%m-%d %H:%M:%S.%f] %P [%L][%n] %v"); - spdlog::set_level(level); + spdlog::set_level(level); } diff --git a/src/global/metadata.cpp b/src/global/metadata.cpp index 45669cf36..7ae72ded9 100644 --- a/src/global/metadata.cpp +++ b/src/global/metadata.cpp @@ -15,8 +15,11 @@ #include #include + +extern "C" { #include #include +} #include #include @@ -25,29 +28,27 @@ static const char MSP = '|'; // metadata separator Metadata::Metadata(const mode_t mode) : - atime_(), - mtime_(), - ctime_(), - mode_(mode), - link_count_(0), - size_(0), - blocks_(0) -{ + atime_(), + mtime_(), + ctime_(), + mode_(mode), + link_count_(0), + size_(0), + blocks_(0) { assert(S_ISDIR(mode_) || S_ISREG(mode_)); } #ifdef HAS_SYMLINKS Metadata::Metadata(const mode_t mode, const std::string& target_path) : - atime_(), - mtime_(), - ctime_(), - mode_(mode), - link_count_(0), - size_(0), - blocks_(0), - target_path_(target_path) -{ + atime_(), + mtime_(), + ctime_(), + mode_(mode), + link_count_(0), + size_(0), + blocks_(0), + target_path_(target_path) { assert(S_ISLNK(mode_) || S_ISDIR(mode_) || S_ISREG(mode_)); // target_path should be there only if this is a link assert(target_path_.empty() || S_ISLNK(mode_)); @@ -119,8 +120,7 @@ Metadata::Metadata(const std::string& binary_str) { assert(*ptr == '\0'); } -std::string Metadata::serialize() const -{ +std::string Metadata::serialize() const { std::string s; // The order is important. don't change. s += fmt::format_int(mode_).c_str(); // add mandatory mode diff --git a/src/global/path_util.cpp b/src/global/path_util.cpp index 4f123749a..3e8251f59 100644 --- a/src/global/path_util.cpp +++ b/src/global/path_util.cpp @@ -12,11 +12,11 @@ */ #include -#include +//#include #include #include #include -#include +//#include bool is_relative_path(const std::string& path) { @@ -52,7 +52,7 @@ bool has_trailing_slash(const std::string& path) { * prepend_path("/tmp/prefix", "./my/path") == "/tmp/prefix/./my/path" * ``` */ -std::string prepend_path(const std::string& prefix_path, const char * raw_path) { +std::string prepend_path(const std::string& prefix_path, const char* raw_path) { assert(!has_trailing_slash(prefix_path)); std::size_t raw_len = std::strlen(raw_path); std::string res; @@ -73,12 +73,12 @@ std::string prepend_path(const std::string& prefix_path, const char * raw_path) std::vector split_path(const std::string& path) { std::vector tokens; size_t start = std::string::npos; - size_t end = (path.front() != PSP)? 0 : 1; - while(end != std::string::npos && end < path.size()) { + size_t end = (path.front() != PSP) ? 0 : 1; + while (end != std::string::npos && end < path.size()) { start = end; end = path.find(PSP, start); tokens.push_back(path.substr(start, end - start)); - if(end != std::string::npos) { + if (end != std::string::npos) { ++end; } } @@ -86,9 +86,6 @@ std::vector split_path(const std::string& path) { } - - - /* Make an absolute path relative to a root path * * Convert @absolute_path into a relative one with respect to the given @root_path. @@ -101,7 +98,7 @@ std::string path_to_relative(const std::string& root_path, const std::string& ab assert(!has_trailing_slash(root_path)); auto diff_its = std::mismatch(absolute_path.cbegin(), absolute_path.cend(), root_path.cbegin()); - if(diff_its.second != root_path.cend()){ + if (diff_its.second != root_path.cend()) { // complete path doesn't start with root_path return {}; } @@ -112,16 +109,16 @@ std::string path_to_relative(const std::string& root_path, const std::string& ab auto rel_it_end = absolute_path.cend(); // relative path start exactly after the root_path prefix - assert((size_t)(rel_it_begin - absolute_path.cbegin()) == root_path.size()); + assert((size_t) (rel_it_begin - absolute_path.cbegin()) == root_path.size()); - if(rel_it_begin == rel_it_end) { + if (rel_it_begin == rel_it_end) { //relative path is empty, @absolute_path was equal to @root_path return {'/'}; } // remove the trailing slash from relative path - if(has_trailing_slash(absolute_path) && - rel_it_begin != rel_it_end - 1) { // the relative path is longer then 1 char ('/') + if (has_trailing_slash(absolute_path) && + rel_it_begin != rel_it_end - 1) { // the relative path is longer then 1 char ('/') --rel_it_end; } @@ -134,7 +131,7 @@ std::string dirname(const std::string& path) { auto parent_path_size = path.find_last_of(PSP); assert(parent_path_size != std::string::npos); - if(parent_path_size == 0) { + if (parent_path_size == 0) { // parent is '/' parent_path_size = 1; } diff --git a/src/global/rpc/distributor.cpp b/src/global/rpc/distributor.cpp index 68c8ea592..2bce921a9 100644 --- a/src/global/rpc/distributor.cpp +++ b/src/global/rpc/distributor.cpp @@ -13,13 +13,14 @@ #include +using namespace std; + SimpleHashDistributor:: SimpleHashDistributor(Host localhost, unsigned int hosts_size) : - localhost_(localhost), - hosts_size_(hosts_size), - all_hosts_(hosts_size) -{ - std::iota(all_hosts_.begin(), all_hosts_.end(), 0); + localhost_(localhost), + hosts_size_(hosts_size), + all_hosts_(hosts_size) { + ::iota(all_hosts_.begin(), all_hosts_.end(), 0); } Host SimpleHashDistributor:: @@ -28,24 +29,21 @@ localhost() const { } Host SimpleHashDistributor:: -locate_data(const std::string& path, const ChunkID& chnk_id) const { - return str_hash(path + std::to_string(chnk_id)) % hosts_size_; +locate_data(const string& path, const ChunkID& chnk_id) const { + return str_hash(path + ::to_string(chnk_id)) % hosts_size_; } Host SimpleHashDistributor:: -locate_file_metadata(const std::string& path) const { +locate_file_metadata(const string& path) const { return str_hash(path) % hosts_size_; } - -std::vector SimpleHashDistributor:: -locate_directory_metadata(const std::string& path) const { +::vector SimpleHashDistributor:: +locate_directory_metadata(const string& path) const { return all_hosts_; } - -LocalOnlyDistributor::LocalOnlyDistributor(Host localhost) : localhost_(localhost) -{} +LocalOnlyDistributor::LocalOnlyDistributor(Host localhost) : localhost_(localhost) {} Host LocalOnlyDistributor:: localhost() const { @@ -53,16 +51,16 @@ localhost() const { } Host LocalOnlyDistributor:: -locate_data(const std::string& path, const ChunkID& chnk_id) const { +locate_data(const string& path, const ChunkID& chnk_id) const { return localhost_; } Host LocalOnlyDistributor:: -locate_file_metadata(const std::string& path) const { +locate_file_metadata(const string& path) const { return localhost_; } -std::vector LocalOnlyDistributor:: -locate_directory_metadata(const std::string& path) const { +::vector LocalOnlyDistributor:: +locate_directory_metadata(const string& path) const { return {localhost_}; } diff --git a/src/global/rpc/rpc_utils.cpp b/src/global/rpc/rpc_utils.cpp index 3b25642c7..c632bfe1b 100644 --- a/src/global/rpc/rpc_utils.cpp +++ b/src/global/rpc/rpc_utils.cpp @@ -13,10 +13,13 @@ #include + +extern "C" { #include -#include #include #include +} + #include using namespace std; @@ -35,16 +38,16 @@ hg_bool_t bool_to_merc_bool(const bool state) { * Returns the machine's hostname * @return */ -std::string get_my_hostname(bool short_hostname) { +string get_my_hostname(bool short_hostname) { char hostname[1024]; auto ret = gethostname(hostname, 1024); if (ret == 0) { - std::string hostname_s(hostname); + string hostname_s(hostname); if (!short_hostname) return hostname_s; // get short hostname auto pos = hostname_s.find("."s); - if (pos != std::string::npos) + if (pos != string::npos) hostname_s = hostname_s.substr(0, pos); return hostname_s; } else @@ -52,7 +55,7 @@ std::string get_my_hostname(bool short_hostname) { } -string get_host_by_name(const string & hostname) { +string get_host_by_name(const string& hostname) { int err = 0; struct addrinfo hints; memset(&hints, 0, sizeof(struct addrinfo)); @@ -60,15 +63,15 @@ string get_host_by_name(const string & hostname) { hints.ai_family = AF_UNSPEC; hints.ai_socktype = SOCK_RAW; - struct addrinfo * addr = nullptr; + struct addrinfo* addr = nullptr; err = getaddrinfo( - hostname.c_str(), - nullptr, - &hints, - &addr - ); - if(err) { + hostname.c_str(), + nullptr, + &hints, + &addr + ); + if (err) { throw runtime_error("Error getting address info for '" + hostname + "': " + gai_strerror(err)); } @@ -77,28 +80,13 @@ string get_host_by_name(const string & hostname) { err = getnameinfo( addr->ai_addr, addr->ai_addrlen, - addr_str, INET6_ADDRSTRLEN, + addr_str, INET6_ADDRSTRLEN, nullptr, 0, (NI_NUMERICHOST | NI_NOFQDN) - ); + ); if (err) { throw runtime_error("Error on getnameinfo(): "s + gai_strerror(err)); } freeaddrinfo(addr); return addr_str; -} - -/** - * checks if a Mercury handle's address is shared memory - * @param mid - * @param addr - * @return bool - */ -bool is_handle_sm(margo_instance_id mid, const hg_addr_t& addr) { - hg_size_t size = 128; - char addr_cstr[128]; - if (margo_addr_to_string(mid, addr_cstr, &size, addr) != HG_SUCCESS) - return false; - string addr_str(addr_cstr); - return addr_str.substr(0, 5) == "na+sm"; } \ No newline at end of file -- GitLab From 7897d84f87b174e9f800388b8a4c696f7010034b Mon Sep 17 00:00:00 2001 From: Marc Vef Date: Sun, 9 Feb 2020 12:57:12 +0100 Subject: [PATCH 03/25] gkfs function prefix -> gkfs::func namespace --- include/client/gkfs_functions.hpp | 93 ++++++++----- src/client/gkfs_functions.cpp | 210 ++++++++++++++---------------- src/client/hooks.cpp | 54 ++++---- src/client/resolve.cpp | 8 +- 4 files changed, 187 insertions(+), 178 deletions(-) diff --git a/include/client/gkfs_functions.hpp b/include/client/gkfs_functions.hpp index 7750e9e7d..532696867 100644 --- a/include/client/gkfs_functions.hpp +++ b/include/client/gkfs_functions.hpp @@ -17,73 +17,96 @@ #include #include -std::shared_ptr gkfs_metadata(const std::string& path, bool follow_links = false); +struct linux_dirent { + unsigned long d_ino; + unsigned long d_off; + unsigned short d_reclen; + char d_name[1]; +}; -int check_parent_dir(const std::string& path); +struct linux_dirent64 { + unsigned long long d_ino; + unsigned long long d_off; + unsigned short d_reclen; + unsigned char d_type; + char d_name[1]; +}; -int gkfs_open(const std::string& path, mode_t mode, int flags); +using sys_statfs = struct statfs; +using sys_statvfs = struct statvfs; -int gkfs_mk_node(const std::string& path, mode_t mode); +namespace gkfs { + namespace func { -int gkfs_rm_node(const std::string& path); -int gkfs_access(const std::string& path, int mask, bool follow_links = true); + std::shared_ptr metadata(const std::string& path, bool follow_links = false); -int gkfs_stat(const std::string& path, struct stat* buf, bool follow_links = true); + int check_parent_dir(const std::string& path); -int gkfs_statvfs(struct statvfs* buf); + int open(const std::string& path, mode_t mode, int flags); -int gkfs_statfs(struct statfs* buf); + int mk_node(const std::string& path, mode_t mode); -off64_t gkfs_lseek(unsigned int fd, off64_t offset, unsigned int whence); + int rm_node(const std::string& path); -off64_t gkfs_lseek(std::shared_ptr gkfs_fd, off64_t offset, unsigned int whence); + int access(const std::string& path, int mask, bool follow_links = true); -int gkfs_truncate(const std::string& path, off_t offset); + int stat(const std::string& path, struct stat* buf, bool follow_links = true); -int gkfs_truncate(const std::string& path, off_t old_size, off_t new_size); + int statfs(sys_statfs* buf); -int gkfs_dup(int oldfd); + int statvfs(sys_statvfs* buf); -int gkfs_dup2(int oldfd, int newfd); + off64_t lseek(unsigned int fd, off64_t offset, unsigned int whence); + + off64_t lseek(std::shared_ptr gkfs_fd, off64_t offset, unsigned int whence); + + int truncate(const std::string& path, off_t offset); + + int truncate(const std::string& path, off_t old_size, off_t new_size); + + int dup(int oldfd); + + int dup2(int oldfd, int newfd); #ifdef HAS_SYMLINKS -int gkfs_mk_symlink(const std::string& path, const std::string& target_path); + int mk_symlink(const std::string& path, const std::string& target_path); -int gkfs_readlink(const std::string& path, char* buf, int bufsize); + int readlink(const std::string& path, char* buf, int bufsize); #endif + ssize_t pwrite(std::shared_ptr file, + const char* buf, size_t count, off64_t offset); -ssize_t gkfs_pwrite(std::shared_ptr file, - const char* buf, size_t count, off64_t offset); - -ssize_t gkfs_pwrite_ws(int fd, const void* buf, size_t count, off64_t offset); + ssize_t pwrite_ws(int fd, const void* buf, size_t count, off64_t offset); -ssize_t gkfs_write(int fd, const void* buf, size_t count); + ssize_t write(int fd, const void* buf, size_t count); -ssize_t gkfs_pwritev(int fd, const struct iovec* iov, int iovcnt, off_t offset); + ssize_t pwritev(int fd, const struct iovec* iov, int iovcnt, off_t offset); -ssize_t gkfs_writev(int fd, const struct iovec* iov, int iovcnt); + ssize_t writev(int fd, const struct iovec* iov, int iovcnt); -ssize_t gkfs_pread(std::shared_ptr file, char* buf, size_t count, off64_t offset); + ssize_t pread(std::shared_ptr file, char* buf, size_t count, off64_t offset); -ssize_t gkfs_pread_ws(int fd, void* buf, size_t count, off64_t offset); + ssize_t pread_ws(int fd, void* buf, size_t count, off64_t offset); -ssize_t gkfs_read(int fd, void* buf, size_t count); + ssize_t read(int fd, void* buf, size_t count); -int gkfs_opendir(const std::string& path); + int opendir(const std::string& path); -int getdents(unsigned int fd, - struct linux_dirent* dirp, - unsigned int count); + int getdents(unsigned int fd, + struct linux_dirent* dirp, + unsigned int count); -int getdents64(unsigned int fd, - struct linux_dirent64* dirp, - unsigned int count); + int getdents64(unsigned int fd, + struct linux_dirent64* dirp, + unsigned int count); -int gkfs_rmdir(const std::string& path); + int rmdir(const std::string& path); + } +} #endif //GEKKOFS_GKFS_FUNCTIONS_HPP diff --git a/src/client/gkfs_functions.cpp b/src/client/gkfs_functions.cpp index 548fe3287..101e4b841 100644 --- a/src/client/gkfs_functions.cpp +++ b/src/client/gkfs_functions.cpp @@ -32,24 +32,51 @@ extern "C" { #define __ALIGN_KERNEL(x, a) __ALIGN_KERNEL_MASK(x, (typeof(x))(a) - 1) #define ALIGN(x, a) __ALIGN_KERNEL((x), (a)) -struct linux_dirent { - unsigned long d_ino; - unsigned long d_off; - unsigned short d_reclen; - char d_name[1]; -}; - -struct linux_dirent64 { - unsigned long long d_ino; - unsigned long long d_off; - unsigned short d_reclen; - unsigned char d_type; - char d_name[1]; -}; - using namespace std; -int gkfs_open(const std::string& path, mode_t mode, int flags) { +std::shared_ptr gkfs::func::metadata(const string& path, bool follow_links) { + std::string attr; + auto err = rpc_send::stat(path, attr); + if (err) { + return nullptr; + } +#ifdef HAS_SYMLINKS + if (follow_links) { + Metadata md{attr}; + while (md.is_link()) { + err = rpc_send::stat(md.target_path(), attr); + if (err) { + return nullptr; + } + md = Metadata{attr}; + } + } +#endif + return make_shared(attr); +} + +int gkfs::func::check_parent_dir(const std::string& path) { +#if CREATE_CHECK_PARENTS + auto p_comp = dirname(path); + auto md = gkfs::func::metadata(p_comp); + if (!md) { + if (errno == ENOENT) { + LOG(DEBUG, "Parent component does not exist: '{}'", p_comp); + } else { + LOG(ERROR, "Failed to get metadata for parent component '{}': {}", path, strerror(errno)); + } + return -1; + } + if (!S_ISDIR(md->mode())) { + LOG(DEBUG, "Parent component is not a directory: '{}'", p_comp); + errno = ENOTDIR; + return -1; + } +#endif // CREATE_CHECK_PARENTS + return 0; +} + +int gkfs::func::open(const std::string& path, mode_t mode, int flags) { if (flags & O_PATH) { LOG(ERROR, "`O_PATH` flag is not supported"); @@ -64,7 +91,7 @@ int gkfs_open(const std::string& path, mode_t mode, int flags) { } bool exists = true; - auto md = gkfs_metadata(path); + auto md = gkfs::func::metadata(path); if (!md) { if (errno == ENOENT) { exists = false; @@ -91,7 +118,7 @@ int gkfs_open(const std::string& path, mode_t mode, int flags) { } // no access check required here. If one is using our FS they have the permissions. - if (gkfs_mk_node(path, mode | S_IFREG)) { + if (gkfs::func::mk_node(path, mode | S_IFREG)) { LOG(ERROR, "Error creating non-existent file: '{}'", strerror(errno)); return -1; } @@ -111,12 +138,12 @@ int gkfs_open(const std::string& path, mode_t mode, int flags) { errno = ELOOP; return -1; } - return gkfs_open(md->target_path(), mode, flags); + return gkfs::func::open(md->target_path(), mode, flags); } #endif if (S_ISDIR(md->mode())) { - return gkfs_opendir(path); + return gkfs::func::opendir(path); } @@ -124,7 +151,7 @@ int gkfs_open(const std::string& path, mode_t mode, int flags) { assert(S_ISREG(md->mode())); if ((flags & O_TRUNC) && ((flags & O_RDWR) || (flags & O_WRONLY))) { - if (gkfs_truncate(path, md->size(), 0)) { + if (gkfs::func::truncate(path, md->size(), 0)) { LOG(ERROR, "Error truncating file"); return -1; } @@ -134,28 +161,7 @@ int gkfs_open(const std::string& path, mode_t mode, int flags) { return CTX->file_map()->add(std::make_shared(path, flags)); } -int check_parent_dir(const std::string& path) { -#if CREATE_CHECK_PARENTS - auto p_comp = dirname(path); - auto md = gkfs_metadata(p_comp); - if (!md) { - if (errno == ENOENT) { - LOG(DEBUG, "Parent component does not exist: '{}'", p_comp); - } else { - LOG(ERROR, "Failed to get metadata for parent component '{}': {}", path, strerror(errno)); - } - return -1; - } - if (!S_ISDIR(md->mode())) { - LOG(DEBUG, "Parent component is not a directory: '{}'", p_comp); - errno = ENOTDIR; - return -1; - } -#endif // CREATE_CHECK_PARENTS - return 0; -} - -int gkfs_mk_node(const std::string& path, mode_t mode) { +int gkfs::func::mk_node(const std::string& path, mode_t mode) { //file type must be set switch (mode & S_IFMT) { @@ -189,8 +195,8 @@ int gkfs_mk_node(const std::string& path, mode_t mode) { * @param path * @return */ -int gkfs_rm_node(const std::string& path) { - auto md = gkfs_metadata(path); +int gkfs::func::rm_node(const std::string& path) { + auto md = gkfs::func::metadata(path); if (!md) { return -1; } @@ -198,8 +204,8 @@ int gkfs_rm_node(const std::string& path) { return rpc_send::rm_node(path, !has_data, md->size()); } -int gkfs_access(const std::string& path, const int mask, bool follow_links) { - auto md = gkfs_metadata(path, follow_links); +int gkfs::func::access(const std::string& path, const int mask, bool follow_links) { + auto md = gkfs::func::metadata(path, follow_links); if (!md) { errno = ENOENT; return -1; @@ -207,8 +213,8 @@ int gkfs_access(const std::string& path, const int mask, bool follow_links) { return 0; } -int gkfs_stat(const string& path, struct stat* buf, bool follow_links) { - auto md = gkfs_metadata(path, follow_links); +int gkfs::func::stat(const string& path, struct stat* buf, bool follow_links) { + auto md = gkfs::func::metadata(path, follow_links); if (!md) { return -1; } @@ -216,28 +222,7 @@ int gkfs_stat(const string& path, struct stat* buf, bool follow_links) { return 0; } -std::shared_ptr gkfs_metadata(const string& path, bool follow_links) { - std::string attr; - auto err = rpc_send::stat(path, attr); - if (err) { - return nullptr; - } -#ifdef HAS_SYMLINKS - if (follow_links) { - Metadata md{attr}; - while (md.is_link()) { - err = rpc_send::stat(md.target_path(), attr); - if (err) { - return nullptr; - } - md = Metadata{attr}; - } - } -#endif - return make_shared(attr); -} - -int gkfs_statfs(struct statfs* buf) { +int gkfs::func::statfs(sys_statfs* buf) { auto blk_stat = rpc_send::chunk_stat(); buf->f_type = 0; buf->f_bsize = blk_stat.chunk_size; @@ -254,7 +239,7 @@ int gkfs_statfs(struct statfs* buf) { return 0; } -int gkfs_statvfs(struct statvfs* buf) { +int gkfs::func::statvfs(sys_statvfs* buf) { init_ld_env_if_needed(); auto blk_stat = rpc_send::chunk_stat(); buf->f_bsize = blk_stat.chunk_size; @@ -272,11 +257,11 @@ int gkfs_statvfs(struct statvfs* buf) { return 0; } -off_t gkfs_lseek(unsigned int fd, off_t offset, unsigned int whence) { - return gkfs_lseek(CTX->file_map()->get(fd), offset, whence); +off_t gkfs::func::lseek(unsigned int fd, off_t offset, unsigned int whence) { + return gkfs::func::lseek(CTX->file_map()->get(fd), offset, whence); } -off_t gkfs_lseek(shared_ptr gkfs_fd, off_t offset, unsigned int whence) { +off_t gkfs::func::lseek(shared_ptr gkfs_fd, off_t offset, unsigned int whence) { switch (whence) { case SEEK_SET: gkfs_fd->pos(offset); @@ -312,7 +297,7 @@ off_t gkfs_lseek(shared_ptr gkfs_fd, off_t offset, unsigned int whence return gkfs_fd->pos(); } -int gkfs_truncate(const std::string& path, off_t old_size, off_t new_size) { +int gkfs::func::truncate(const std::string& path, off_t old_size, off_t new_size) { assert(new_size >= 0); assert(new_size <= old_size); @@ -332,7 +317,7 @@ int gkfs_truncate(const std::string& path, off_t old_size, off_t new_size) { return 0; } -int gkfs_truncate(const std::string& path, off_t length) { +int gkfs::func::truncate(const std::string& path, off_t length) { /* TODO CONCURRENCY: * At the moment we first ask the length to the metadata-server in order to * know which data-server have data to be deleted. @@ -347,7 +332,7 @@ int gkfs_truncate(const std::string& path, off_t length) { return -1; } - auto md = gkfs_metadata(path, true); + auto md = gkfs::func::metadata(path, true); if (!md) { return -1; } @@ -357,18 +342,18 @@ int gkfs_truncate(const std::string& path, off_t length) { errno = EINVAL; return -1; } - return gkfs_truncate(path, size, length); + return gkfs::func::truncate(path, size, length); } -int gkfs_dup(const int oldfd) { +int gkfs::func::dup(const int oldfd) { return CTX->file_map()->dup(oldfd); } -int gkfs_dup2(const int oldfd, const int newfd) { +int gkfs::func::dup2(const int oldfd, const int newfd) { return CTX->file_map()->dup2(oldfd, newfd); } -ssize_t gkfs_pwrite(std::shared_ptr file, const char* buf, size_t count, off64_t offset) { +ssize_t gkfs::func::pwrite(std::shared_ptr file, const char* buf, size_t count, off64_t offset) { if (file->type() != FileType::regular) { assert(file->type() == FileType::directory); LOG(WARNING, "Cannot read from directory"); @@ -392,9 +377,9 @@ ssize_t gkfs_pwrite(std::shared_ptr file, const char* buf, size_t coun return ret; // return written size or -1 as error } -ssize_t gkfs_pwrite_ws(int fd, const void* buf, size_t count, off64_t offset) { +ssize_t gkfs::func::pwrite_ws(int fd, const void* buf, size_t count, off64_t offset) { auto file = CTX->file_map()->get(fd); - return gkfs_pwrite(file, reinterpret_cast(buf), count, offset); + return gkfs::func::pwrite(file, reinterpret_cast(buf), count, offset); } /* Write counts bytes starting from current file position @@ -402,12 +387,12 @@ ssize_t gkfs_pwrite_ws(int fd, const void* buf, size_t count, off64_t offset) { * * Same as write syscall. */ -ssize_t gkfs_write(int fd, const void* buf, size_t count) { +ssize_t gkfs::func::write(int fd, const void* buf, size_t count) { auto gkfs_fd = CTX->file_map()->get(fd); auto pos = gkfs_fd->pos(); //retrieve the current offset if (gkfs_fd->get_flag(OpenFile_flags::append)) - gkfs_lseek(gkfs_fd, 0, SEEK_END); - auto ret = gkfs_pwrite(gkfs_fd, reinterpret_cast(buf), count, pos); + gkfs::func::lseek(gkfs_fd, 0, SEEK_END); + auto ret = gkfs::func::pwrite(gkfs_fd, reinterpret_cast(buf), count, pos); // Update offset in file descriptor in the file map if (ret > 0) { gkfs_fd->pos(pos + count); @@ -415,7 +400,7 @@ ssize_t gkfs_write(int fd, const void* buf, size_t count) { return ret; } -ssize_t gkfs_pwritev(int fd, const struct iovec* iov, int iovcnt, off_t offset) { +ssize_t gkfs::func::pwritev(int fd, const struct iovec* iov, int iovcnt, off_t offset) { auto file = CTX->file_map()->get(fd); auto pos = offset; // keep truck of current position @@ -427,7 +412,7 @@ ssize_t gkfs_pwritev(int fd, const struct iovec* iov, int iovcnt, off_t offset) continue; } auto buf = (iov + i)->iov_base; - ret = gkfs_pwrite(file, reinterpret_cast(buf), count, pos); + ret = gkfs::func::pwrite(file, reinterpret_cast(buf), count, pos); if (ret == -1) { break; } @@ -445,11 +430,11 @@ ssize_t gkfs_pwritev(int fd, const struct iovec* iov, int iovcnt, off_t offset) return written; } -ssize_t gkfs_writev(int fd, const struct iovec* iov, int iovcnt) { +ssize_t gkfs::func::writev(int fd, const struct iovec* iov, int iovcnt) { auto gkfs_fd = CTX->file_map()->get(fd); auto pos = gkfs_fd->pos(); // retrieve the current offset - auto ret = gkfs_pwritev(fd, iov, iovcnt, pos); + auto ret = gkfs::func::pwritev(fd, iov, iovcnt, pos); assert(ret != 0); if (ret < 0) { return -1; @@ -458,7 +443,7 @@ ssize_t gkfs_writev(int fd, const struct iovec* iov, int iovcnt) { return ret; } -ssize_t gkfs_pread(std::shared_ptr file, char* buf, size_t count, off64_t offset) { +ssize_t gkfs::func::pread(std::shared_ptr file, char* buf, size_t count, off64_t offset) { if (file->type() != FileType::regular) { assert(file->type() == FileType::directory); LOG(WARNING, "Cannot read from directory"); @@ -478,10 +463,10 @@ ssize_t gkfs_pread(std::shared_ptr file, char* buf, size_t count, off6 return ret; // return read size or -1 as error } -ssize_t gkfs_read(int fd, void* buf, size_t count) { +ssize_t gkfs::func::read(int fd, void* buf, size_t count) { auto gkfs_fd = CTX->file_map()->get(fd); auto pos = gkfs_fd->pos(); //retrieve the current offset - auto ret = gkfs_pread(gkfs_fd, reinterpret_cast(buf), count, pos); + auto ret = gkfs::func::pread(gkfs_fd, reinterpret_cast(buf), count, pos); // Update offset in file descriptor in the file map if (ret > 0) { gkfs_fd->pos(pos + ret); @@ -489,14 +474,14 @@ ssize_t gkfs_read(int fd, void* buf, size_t count) { return ret; } -ssize_t gkfs_pread_ws(int fd, void* buf, size_t count, off64_t offset) { +ssize_t gkfs::func::pread_ws(int fd, void* buf, size_t count, off64_t offset) { auto gkfs_fd = CTX->file_map()->get(fd); - return gkfs_pread(gkfs_fd, reinterpret_cast(buf), count, offset); + return gkfs::func::pread(gkfs_fd, reinterpret_cast(buf), count, offset); } -int gkfs_opendir(const std::string& path) { +int gkfs::func::opendir(const std::string& path) { - auto md = gkfs_metadata(path); + auto md = gkfs::func::metadata(path); if (!md) { return -1; } @@ -511,8 +496,8 @@ int gkfs_opendir(const std::string& path) { return CTX->file_map()->add(open_dir); } -int gkfs_rmdir(const std::string& path) { - auto md = gkfs_metadata(path); +int gkfs::func::rmdir(const std::string& path) { + auto md = gkfs::func::metadata(path); if (!md) { LOG(DEBUG, "Path '{}' does not exist: ", path); errno = ENOENT; @@ -533,10 +518,9 @@ int gkfs_rmdir(const std::string& path) { return rpc_send::rm_node(path, true, 0); } - -int getdents(unsigned int fd, - struct linux_dirent* dirp, - unsigned int count) { +int gkfs::func::getdents(unsigned int fd, + struct linux_dirent* dirp, + unsigned int count) { auto open_dir = CTX->file_map()->get_dir(fd); if (open_dir == nullptr) { @@ -587,9 +571,9 @@ int getdents(unsigned int fd, } -int getdents64(unsigned int fd, - struct linux_dirent64* dirp, - unsigned int count) { +int gkfs::func::getdents64(unsigned int fd, + struct linux_dirent64* dirp, + unsigned int count) { auto open_dir = CTX->file_map()->get_dir(fd); if (open_dir == nullptr) { @@ -640,14 +624,14 @@ int getdents64(unsigned int fd, #ifdef HAS_SYMLINKS -int gkfs_mk_symlink(const std::string& path, const std::string& target_path) { +int gkfs::func::mk_symlink(const std::string& path, const std::string& target_path) { init_ld_env_if_needed(); /* The following check is not POSIX compliant. * In POSIX the target is not checked at all. * Here if the target is a directory we raise a NOTSUP error. * So that application know we don't support link to directory. */ - auto target_md = gkfs_metadata(target_path, false); + auto target_md = gkfs::func::metadata(target_path, false); if (target_md != nullptr) { auto trg_mode = target_md->mode(); if (!(S_ISREG(trg_mode) || S_ISLNK(trg_mode))) { @@ -662,7 +646,7 @@ int gkfs_mk_symlink(const std::string& path, const std::string& target_path) { return -1; } - auto link_md = gkfs_metadata(path, false); + auto link_md = gkfs::func::metadata(path, false); if (link_md != nullptr) { LOG(DEBUG, "Link exists: '{}'", path); errno = EEXIST; @@ -672,9 +656,9 @@ int gkfs_mk_symlink(const std::string& path, const std::string& target_path) { return rpc_send::mk_symlink(path, target_path); } -int gkfs_readlink(const std::string& path, char* buf, int bufsize) { +int gkfs::func::readlink(const std::string& path, char* buf, int bufsize) { init_ld_env_if_needed(); - auto md = gkfs_metadata(path, false); + auto md = gkfs::func::metadata(path, false); if (md == nullptr) { LOG(DEBUG, "Named link doesn't exist"); return -1; @@ -697,3 +681,5 @@ int gkfs_readlink(const std::string& path, char* buf, int bufsize) { } #endif + +#pragma clang diagnostic pop \ No newline at end of file diff --git a/src/client/hooks.cpp b/src/client/hooks.cpp index 04bcbdf0c..aa4c6172c 100644 --- a/src/client/hooks.cpp +++ b/src/client/hooks.cpp @@ -51,7 +51,7 @@ int hook_openat(int dirfd, const char* cpath, int flags, mode_t mode) { return -ENOTDIR; case RelativizeStatus::internal: - return with_errno(gkfs_open(resolved, mode, flags)); + return with_errno(gkfs::func::open(resolved, mode, flags)); default: LOG(ERROR, "{}() relativize status unknown: {}", __func__); @@ -85,7 +85,7 @@ int hook_stat(const char* path, struct stat* buf) { std::string rel_path; if (CTX->relativize_path(path, rel_path, false)) { - return with_errno(gkfs_stat(rel_path, buf)); + return with_errno(gkfs::func::stat(rel_path, buf)); } return syscall_no_intercept(SYS_stat, rel_path.c_str(), buf); } @@ -97,7 +97,7 @@ int hook_lstat(const char* path, struct stat* buf) { std::string rel_path; if (CTX->relativize_path(path, rel_path)) { - return with_errno(gkfs_stat(rel_path, buf)); + return with_errno(gkfs::func::stat(rel_path, buf)); } return syscall_no_intercept(SYS_lstat, rel_path.c_str(), buf); } @@ -109,7 +109,7 @@ int hook_fstat(unsigned int fd, struct stat* buf) { if (CTX->file_map()->exist(fd)) { auto path = CTX->file_map()->get(fd)->path(); - return with_errno(gkfs_stat(path, buf)); + return with_errno(gkfs::func::stat(path, buf)); } return syscall_no_intercept(SYS_fstat, fd, buf); } @@ -137,7 +137,7 @@ int hook_fstatat(int dirfd, const char* cpath, struct stat* buf, int flags) { return -ENOTDIR; case RelativizeStatus::internal: - return with_errno(gkfs_stat(resolved, buf)); + return with_errno(gkfs::func::stat(resolved, buf)); default: LOG(ERROR, "{}() relativize status unknown: {}", __func__); @@ -151,7 +151,7 @@ int hook_read(unsigned int fd, void* buf, size_t count) { __func__, fd, fmt::ptr(buf), count); if (CTX->file_map()->exist(fd)) { - return with_errno(gkfs_read(fd, buf, count)); + return with_errno(gkfs::func::read(fd, buf, count)); } return syscall_no_intercept(SYS_read, fd, buf, count); } @@ -162,7 +162,7 @@ int hook_pread(unsigned int fd, char* buf, size_t count, loff_t pos) { __func__, fd, fmt::ptr(buf), count, pos); if (CTX->file_map()->exist(fd)) { - return with_errno(gkfs_pread_ws(fd, buf, count, pos)); + return with_errno(gkfs::func::pread_ws(fd, buf, count, pos)); } /* Since kernel 2.6: pread() became pread64(), and pwrite() became pwrite64(). */ return syscall_no_intercept(SYS_pread64, fd, buf, count, pos); @@ -174,7 +174,7 @@ int hook_write(unsigned int fd, const char* buf, size_t count) { __func__, fd, fmt::ptr(buf), count); if (CTX->file_map()->exist(fd)) { - return with_errno(gkfs_write(fd, buf, count)); + return with_errno(gkfs::func::write(fd, buf, count)); } return syscall_no_intercept(SYS_write, fd, buf, count); } @@ -185,7 +185,7 @@ int hook_pwrite(unsigned int fd, const char* buf, size_t count, loff_t pos) { __func__, fd, fmt::ptr(buf), count, pos); if (CTX->file_map()->exist(fd)) { - return with_errno(gkfs_pwrite_ws(fd, buf, count, pos)); + return with_errno(gkfs::func::pwrite_ws(fd, buf, count, pos)); } /* Since kernel 2.6: pread() became pread64(), and pwrite() became pwrite64(). */ return syscall_no_intercept(SYS_pwrite64, fd, buf, count, pos); @@ -197,7 +197,7 @@ int hook_writev(unsigned long fd, const struct iovec* iov, unsigned long iovcnt) __func__, fd, fmt::ptr(iov), iovcnt); if (CTX->file_map()->exist(fd)) { - return with_errno(gkfs_writev(fd, iov, iovcnt)); + return with_errno(gkfs::func::writev(fd, iov, iovcnt)); } return syscall_no_intercept(SYS_writev, fd, iov, iovcnt); } @@ -240,9 +240,9 @@ int hook_unlinkat(int dirfd, const char* cpath, int flags) { case RelativizeStatus::internal: if (flags & AT_REMOVEDIR) { - return with_errno(gkfs_rmdir(resolved)); + return with_errno(gkfs::func::rmdir(resolved)); } else { - return with_errno(gkfs_rm_node(resolved)); + return with_errno(gkfs::func::rm_node(resolved)); } default: @@ -292,7 +292,7 @@ int hook_access(const char* path, int mask) { std::string rel_path; if (CTX->relativize_path(path, rel_path)) { - auto ret = gkfs_access(rel_path, mask); + auto ret = gkfs::func::access(rel_path, mask); if (ret < 0) { return -errno; } @@ -319,7 +319,7 @@ int hook_faccessat(int dirfd, const char* cpath, int mode) { return -ENOTDIR; case RelativizeStatus::internal: - return with_errno(gkfs_access(resolved, mode)); + return with_errno(gkfs::func::access(resolved, mode)); default: LOG(ERROR, "{}() relativize status unknown: {}", __func__); @@ -333,7 +333,7 @@ off_t hook_lseek(unsigned int fd, off_t offset, unsigned int whence) { __func__, fd, offset, whence); if (CTX->file_map()->exist(fd)) { - auto off_ret = gkfs_lseek(fd, static_cast(offset), whence); + auto off_ret = gkfs::func::lseek(fd, static_cast(offset), whence); if (off_ret > std::numeric_limits::max()) { return -EOVERFLOW; } else if (off_ret < 0) { @@ -352,7 +352,7 @@ int hook_truncate(const char* path, long length) { std::string rel_path; if (CTX->relativize_path(path, rel_path)) { - return with_errno(gkfs_truncate(rel_path, length)); + return with_errno(gkfs::func::truncate(rel_path, length)); } return syscall_no_intercept(SYS_truncate, rel_path.c_str(), length); } @@ -364,7 +364,7 @@ int hook_ftruncate(unsigned int fd, unsigned long length) { if (CTX->file_map()->exist(fd)) { auto path = CTX->file_map()->get(fd)->path(); - return with_errno(gkfs_truncate(path, length)); + return with_errno(gkfs::func::truncate(path, length)); } return syscall_no_intercept(SYS_ftruncate, fd, length); } @@ -375,7 +375,7 @@ int hook_dup(unsigned int fd) { __func__, fd); if (CTX->file_map()->exist(fd)) { - return with_errno(gkfs_dup(fd)); + return with_errno(gkfs::func::dup(fd)); } return syscall_no_intercept(SYS_dup, fd); } @@ -386,7 +386,7 @@ int hook_dup2(unsigned int oldfd, unsigned int newfd) { __func__, oldfd, newfd); if (CTX->file_map()->exist(oldfd)) { - return with_errno(gkfs_dup2(oldfd, newfd)); + return with_errno(gkfs::func::dup2(oldfd, newfd)); } return syscall_no_intercept(SYS_dup2, oldfd, newfd); } @@ -411,7 +411,7 @@ int hook_getdents(unsigned int fd, struct linux_dirent* dirp, unsigned int count __func__, fd, fmt::ptr(dirp), count); if (CTX->file_map()->exist(fd)) { - return with_errno(getdents(fd, dirp, count)); + return with_errno(gkfs::func::getdents(fd, dirp, count)); } return syscall_no_intercept(SYS_getdents, fd, dirp, count); } @@ -423,7 +423,7 @@ int hook_getdents64(unsigned int fd, struct linux_dirent64* dirp, unsigned int c __func__, fd, fmt::ptr(dirp), count); if (CTX->file_map()->exist(fd)) { - return with_errno(getdents64(fd, dirp, count)); + return with_errno(gkfs::func::getdents64(fd, dirp, count)); } return syscall_no_intercept(SYS_getdents64, fd, dirp, count); } @@ -447,7 +447,7 @@ int hook_mkdirat(int dirfd, const char* cpath, mode_t mode) { return -ENOTDIR; case RelativizeStatus::internal: - return with_errno(gkfs_mk_node(resolved, mode | S_IFDIR)); + return with_errno(gkfs::func::mk_node(resolved, mode | S_IFDIR)); default: LOG(ERROR, "{}() relativize status unknown: {}", __func__); @@ -503,7 +503,7 @@ int hook_chdir(const char* path) { bool internal = CTX->relativize_path(path, rel_path); if (internal) { //path falls in our namespace - auto md = gkfs_metadata(rel_path); + auto md = gkfs::func::metadata(rel_path); if (md == nullptr) { LOG(ERROR, "{}() path does not exists", __func__); return -ENOENT; @@ -619,11 +619,11 @@ int hook_fcntl(unsigned int fd, unsigned int cmd, unsigned long arg) { case F_DUPFD: LOG(DEBUG, "{}() F_DUPFD on fd {}", __func__, fd); - return with_errno(gkfs_dup(fd)); + return with_errno(gkfs::func::dup(fd)); case F_DUPFD_CLOEXEC: LOG(DEBUG, "{}() F_DUPFD_CLOEXEC on fd {}", __func__, fd); - ret = gkfs_dup(fd); + ret = gkfs::func::dup(fd); if (ret == -1) { return -errno; } @@ -736,7 +736,7 @@ int hook_statfs(const char* path, struct statfs* buf) { std::string rel_path; if (CTX->relativize_path(path, rel_path)) { - return with_errno(gkfs_statfs(buf)); + return with_errno(gkfs::func::statfs(buf)); } return syscall_no_intercept(SYS_statfs, rel_path.c_str(), buf); } @@ -747,7 +747,7 @@ int hook_fstatfs(unsigned int fd, struct statfs* buf) { __func__, fd, fmt::ptr(buf)); if (CTX->file_map()->exist(fd)) { - return with_errno(gkfs_statfs(buf)); + return with_errno(gkfs::func::statfs(buf)); } return syscall_no_intercept(SYS_fstatfs, fd, buf); } diff --git a/src/client/resolve.cpp b/src/client/resolve.cpp index 3d43661bd..b8151b182 100644 --- a/src/client/resolve.cpp +++ b/src/client/resolve.cpp @@ -21,10 +21,10 @@ #include #include #include +#include extern "C" { #include -#include #include } @@ -99,7 +99,7 @@ bool resolve_path(const std::string& path, std::string& resolved, bool resolve_l } } - struct stat st; + struct stat st{}; const std::vector& mnt_components = CTX->mountdir_components(); unsigned int matched_components = 0; // matched number of component in mountdir unsigned int resolved_components = 0; @@ -135,7 +135,7 @@ bool resolve_path(const std::string& path, std::string& resolved, bool resolve_l } if (comp_size == 2 && path.at(start) == '.' && path.at(start + 1) == '.') { // component is '..' we need to rollback resolved path - if (resolved.size() > 0) { + if (!resolved.empty()) { resolved.erase(last_slash_pos); /* TODO Optimization * the previous slash position should be stored. @@ -206,7 +206,7 @@ bool resolve_path(const std::string& path, std::string& resolved, bool resolve_l return true; } - if (resolved.size() == 0) { + if (resolved.empty()) { resolved.push_back(PSP); } LOG(DEBUG, "external: \"{}\"", resolved); -- GitLab From fc80c695cc8971c2aba3c891665c10bdfaff196b Mon Sep 17 00:00:00 2001 From: Marc Vef Date: Mon, 10 Feb 2020 10:55:12 +0100 Subject: [PATCH 04/25] namespace usage unification for rpc_send --- include/client/rpc/ld_rpc_data_ws.hpp | 6 +- src/client/rpc/hg_rpcs.cpp | 9 +- src/client/rpc/ld_rpc_data_ws.cpp | 731 +++++++++++++------------- src/client/rpc/ld_rpc_management.cpp | 72 ++- src/client/rpc/ld_rpc_metadentry.cpp | 689 ++++++++++++------------ 5 files changed, 724 insertions(+), 783 deletions(-) diff --git a/include/client/rpc/ld_rpc_data_ws.hpp b/include/client/rpc/ld_rpc_data_ws.hpp index cc619670d..e07d0168e 100644 --- a/include/client/rpc/ld_rpc_data_ws.hpp +++ b/include/client/rpc/ld_rpc_data_ws.hpp @@ -18,15 +18,15 @@ namespace rpc_send { - ssize_t write(const std::string& path, const void* buf, bool append_flag, off64_t in_offset, - size_t write_size, int64_t updated_metadentry_size); - struct ChunkStat { unsigned long chunk_size; unsigned long chunk_total; unsigned long chunk_free; }; + ssize_t write(const std::string& path, const void* buf, bool append_flag, off64_t in_offset, + size_t write_size, int64_t updated_metadentry_size); + ssize_t read(const std::string& path, void* buf, off64_t offset, size_t read_size); int trunc_data(const std::string& path, size_t current_size, size_t new_size); diff --git a/src/client/rpc/hg_rpcs.cpp b/src/client/rpc/hg_rpcs.cpp index 6e0264671..a7a01df6d 100644 --- a/src/client/rpc/hg_rpcs.cpp +++ b/src/client/rpc/hg_rpcs.cpp @@ -14,13 +14,10 @@ #include #include -namespace hermes { namespace detail { - //============================================================================== // register request types so that they can be used by users and the engine // -void -register_user_request_types() { +void hermes::detail::register_user_request_types() { (void) registered_requests().add(); (void) registered_requests().add(); (void) registered_requests().add(); @@ -40,6 +37,4 @@ register_user_request_types() { (void) registered_requests().add(); (void) registered_requests().add(); -} - -}} // namespace hermes::detail +} \ No newline at end of file diff --git a/src/client/rpc/ld_rpc_data_ws.cpp b/src/client/rpc/ld_rpc_data_ws.cpp index 776c75019..6c133b3db 100644 --- a/src/client/rpc/ld_rpc_data_ws.cpp +++ b/src/client/rpc/ld_rpc_data_ws.cpp @@ -13,465 +13,452 @@ #include #include -#include -#include #include #include -#include +#include +#include -namespace rpc_send { +#include - using namespace std; +using namespace std; // TODO If we decide to keep this functionality with one segment, the function can be merged mostly. // Code is mostly redundant - /** - * Sends an RPC request to a specific node to pull all chunks that belong to him - */ - ssize_t write(const string& path, const void* buf, const bool append_flag, - const off64_t in_offset, const size_t write_size, - const int64_t updated_metadentry_size) { - - assert(write_size > 0); - - // Calculate chunkid boundaries and numbers so that daemons know in - // which interval to look for chunks - off64_t offset = append_flag ? - in_offset : - (updated_metadentry_size - write_size); - - auto chnk_start = chnk_id_for_offset(offset, gkfs_config::rpc::chunksize); - auto chnk_end = chnk_id_for_offset((offset + write_size) - 1, gkfs_config::rpc::chunksize); - - // Collect all chunk ids within count that have the same destination so - // that those are send in one rpc bulk transfer - std::map> target_chnks{}; - // contains the target ids, used to access the target_chnks map. - // First idx is chunk with potential offset - std::vector targets{}; - - // targets for the first and last chunk as they need special treatment - uint64_t chnk_start_target = 0; - uint64_t chnk_end_target = 0; - - for (uint64_t chnk_id = chnk_start; chnk_id <= chnk_end; chnk_id++) { - auto target = CTX->distributor()->locate_data(path, chnk_id); - - if (target_chnks.count(target) == 0) { - target_chnks.insert( - std::make_pair(target, std::vector{chnk_id})); - targets.push_back(target); - } else { - target_chnks[target].push_back(chnk_id); - } +/** + * Sends an RPC request to a specific node to pull all chunks that belong to him + */ +ssize_t rpc_send::write(const string& path, const void* buf, const bool append_flag, + const off64_t in_offset, const size_t write_size, + const int64_t updated_metadentry_size) { + + assert(write_size > 0); + + // Calculate chunkid boundaries and numbers so that daemons know in + // which interval to look for chunks + off64_t offset = append_flag ? in_offset : (updated_metadentry_size - write_size); + + auto chnk_start = chnk_id_for_offset(offset, gkfs_config::rpc::chunksize); + auto chnk_end = chnk_id_for_offset((offset + write_size) - 1, gkfs_config::rpc::chunksize); + + // Collect all chunk ids within count that have the same destination so + // that those are send in one rpc bulk transfer + std::map> target_chnks{}; + // contains the target ids, used to access the target_chnks map. + // First idx is chunk with potential offset + std::vector targets{}; + + // targets for the first and last chunk as they need special treatment + uint64_t chnk_start_target = 0; + uint64_t chnk_end_target = 0; + + for (uint64_t chnk_id = chnk_start; chnk_id <= chnk_end; chnk_id++) { + auto target = CTX->distributor()->locate_data(path, chnk_id); + + if (target_chnks.count(target) == 0) { + target_chnks.insert(std::make_pair(target, std::vector{chnk_id})); + targets.push_back(target); + } else { + target_chnks[target].push_back(chnk_id); + } - // set first and last chnk targets - if (chnk_id == chnk_start) { - chnk_start_target = target; - } + // set first and last chnk targets + if (chnk_id == chnk_start) { + chnk_start_target = target; + } - if (chnk_id == chnk_end) { - chnk_end_target = target; - } + if (chnk_id == chnk_end) { + chnk_end_target = target; } + } - // some helper variables for async RPC - std::vector bufseq{ - hermes::mutable_buffer{const_cast(buf), write_size}, - }; + // some helper variables for async RPC + std::vector bufseq{ + hermes::mutable_buffer{const_cast(buf), write_size}, + }; - // expose user buffers so that they can serve as RDMA data sources - // (these are automatically "unexposed" when the destructor is called) - hermes::exposed_memory local_buffers; + // expose user buffers so that they can serve as RDMA data sources + // (these are automatically "unexposed" when the destructor is called) + hermes::exposed_memory local_buffers; - try { - local_buffers = - ld_network_service->expose(bufseq, hermes::access_mode::read_only); + try { + local_buffers = ld_network_service->expose(bufseq, hermes::access_mode::read_only); - } catch (const std::exception& ex) { - LOG(ERROR, "Failed to expose buffers for RMA"); - errno = EBUSY; - return -1; - } + } catch (const std::exception& ex) { + LOG(ERROR, "Failed to expose buffers for RMA"); + errno = EBUSY; + return -1; + } - std::vector> handles; + std::vector> handles; - // Issue non-blocking RPC requests and wait for the result later - // - // TODO(amiranda): This could be simplified by adding a vector of inputs - // to async_engine::broadcast(). This would allow us to avoid manually - // looping over handles as we do below - for (const auto& target : targets) { + // Issue non-blocking RPC requests and wait for the result later + // + // TODO(amiranda): This could be simplified by adding a vector of inputs + // to async_engine::broadcast(). This would allow us to avoid manually + // looping over handles as we do below + for (const auto& target : targets) { - // total chunk_size for target - auto total_chunk_size = target_chnks[target].size() * gkfs_config::rpc::chunksize; + // total chunk_size for target + auto total_chunk_size = target_chnks[target].size() * gkfs_config::rpc::chunksize; - // receiver of first chunk must subtract the offset from first chunk - if (target == chnk_start_target) { - total_chunk_size -= chnk_lpad(offset, gkfs_config::rpc::chunksize); - } + // receiver of first chunk must subtract the offset from first chunk + if (target == chnk_start_target) { + total_chunk_size -= chnk_lpad(offset, gkfs_config::rpc::chunksize); + } - // receiver of last chunk must subtract - if (target == chnk_end_target) { - total_chunk_size -= chnk_rpad(offset + write_size, gkfs_config::rpc::chunksize); - } + // receiver of last chunk must subtract + if (target == chnk_end_target) { + total_chunk_size -= chnk_rpad(offset + write_size, gkfs_config::rpc::chunksize); + } - auto endp = CTX->hosts().at(target); - - try { - - LOG(DEBUG, "Sending RPC ..."); - - gkfs::rpc::write_data::input in( - path, - // first offset in targets is the chunk with - // a potential offset - chnk_lpad(offset, gkfs_config::rpc::chunksize), - target, - CTX->hosts().size(), - // number of chunks handled by that destination - target_chnks[target].size(), - // chunk start id of this write - chnk_start, - // chunk end id of this write - chnk_end, - // total size to write - total_chunk_size, - local_buffers); - - // TODO(amiranda): add a post() with RPC_TIMEOUT to hermes so that - // we can retry for RPC_TRIES (see old commits with margo) - // TODO(amiranda): hermes will eventually provide a post(endpoint) - // returning one result and a broadcast(endpoint_set) returning a - // result_set. When that happens we can remove the .at(0) :/ - handles.emplace_back( - ld_network_service->post(endp, in)); - - LOG(DEBUG, "host: {}, path: \"{}\", chunks: {}, size: {}, offset: {}", - target, path, in.chunk_n(), total_chunk_size, in.offset()); - - } catch (const std::exception& ex) { - LOG(ERROR, "Unable to send non-blocking rpc for " - "path \"{}\" [peer: {}]", path, target); - errno = EBUSY; - return -1; - } + auto endp = CTX->hosts().at(target); + + try { + + LOG(DEBUG, "Sending RPC ..."); + + gkfs::rpc::write_data::input in( + path, + // first offset in targets is the chunk with + // a potential offset + chnk_lpad(offset, gkfs_config::rpc::chunksize), + target, + CTX->hosts().size(), + // number of chunks handled by that destination + target_chnks[target].size(), + // chunk start id of this write + chnk_start, + // chunk end id of this write + chnk_end, + // total size to write + total_chunk_size, + local_buffers); + + // TODO(amiranda): add a post() with RPC_TIMEOUT to hermes so that + // we can retry for RPC_TRIES (see old commits with margo) + // TODO(amiranda): hermes will eventually provide a post(endpoint) + // returning one result and a broadcast(endpoint_set) returning a + // result_set. When that happens we can remove the .at(0) :/ + handles.emplace_back(ld_network_service->post(endp, in)); + + LOG(DEBUG, "host: {}, path: \"{}\", chunks: {}, size: {}, offset: {}", + target, path, in.chunk_n(), total_chunk_size, in.offset()); + + } catch (const std::exception& ex) { + LOG(ERROR, "Unable to send non-blocking rpc for " + "path \"{}\" [peer: {}]", path, target); + errno = EBUSY; + return -1; } + } + + // Wait for RPC responses and then get response and add it to out_size + // which is the written size All potential outputs are served to free + // resources regardless of errors, although an errorcode is set. + bool error = false; + ssize_t out_size = 0; + std::size_t idx = 0; + + for (const auto& h : handles) { + try { + // XXX We might need a timeout here to not wait forever for an + // output that never comes? + auto out = h.get().at(0); - // Wait for RPC responses and then get response and add it to out_size - // which is the written size All potential outputs are served to free - // resources regardless of errors, although an errorcode is set. - bool error = false; - ssize_t out_size = 0; - std::size_t idx = 0; - - for (const auto& h : handles) { - try { - // XXX We might need a timeout here to not wait forever for an - // output that never comes? - auto out = h.get().at(0); - - if (out.err() != 0) { - LOG(ERROR, "Daemon reported error: {}", out.err()); - error = true; - errno = out.err(); - } - - out_size += static_cast(out.io_size()); - - } catch (const std::exception& ex) { - LOG(ERROR, "Failed to get rpc output for path \"{}\" [peer: {}]", - path, targets[idx]); + if (out.err() != 0) { + LOG(ERROR, "Daemon reported error: {}", out.err()); error = true; - errno = EIO; + errno = out.err(); } - ++idx; + out_size += static_cast(out.io_size()); + + } catch (const std::exception& ex) { + LOG(ERROR, "Failed to get rpc output for path \"{}\" [peer: {}]", + path, targets[idx]); + error = true; + errno = EIO; } - return error ? -1 : out_size; + ++idx; } + return error ? -1 : out_size; +} + /** * Sends an RPC request to a specific node to push all chunks that belong to him */ - ssize_t read(const string& path, void* buf, const off64_t offset, const size_t read_size) { - - // Calculate chunkid boundaries and numbers so that daemons know in which - // interval to look for chunks - auto chnk_start = chnk_id_for_offset(offset, gkfs_config::rpc::chunksize); - auto chnk_end = chnk_id_for_offset((offset + read_size - 1), gkfs_config::rpc::chunksize); - - // Collect all chunk ids within count that have the same destination so - // that those are send in one rpc bulk transfer - std::map> target_chnks{}; - // contains the recipient ids, used to access the target_chnks map. - // First idx is chunk with potential offset - std::vector targets{}; - - // targets for the first and last chunk as they need special treatment - uint64_t chnk_start_target = 0; - uint64_t chnk_end_target = 0; - - for (uint64_t chnk_id = chnk_start; chnk_id <= chnk_end; chnk_id++) { - auto target = CTX->distributor()->locate_data(path, chnk_id); - - if (target_chnks.count(target) == 0) { - target_chnks.insert( - std::make_pair(target, std::vector{chnk_id})); - targets.push_back(target); - } else { - target_chnks[target].push_back(chnk_id); - } +ssize_t rpc_send::read(const string& path, void* buf, const off64_t offset, const size_t read_size) { + + // Calculate chunkid boundaries and numbers so that daemons know in which + // interval to look for chunks + auto chnk_start = chnk_id_for_offset(offset, gkfs_config::rpc::chunksize); + auto chnk_end = chnk_id_for_offset((offset + read_size - 1), gkfs_config::rpc::chunksize); + + // Collect all chunk ids within count that have the same destination so + // that those are send in one rpc bulk transfer + std::map> target_chnks{}; + // contains the recipient ids, used to access the target_chnks map. + // First idx is chunk with potential offset + std::vector targets{}; + + // targets for the first and last chunk as they need special treatment + uint64_t chnk_start_target = 0; + uint64_t chnk_end_target = 0; + + for (uint64_t chnk_id = chnk_start; chnk_id <= chnk_end; chnk_id++) { + auto target = CTX->distributor()->locate_data(path, chnk_id); + + if (target_chnks.count(target) == 0) { + target_chnks.insert(std::make_pair(target, std::vector{chnk_id})); + targets.push_back(target); + } else { + target_chnks[target].push_back(chnk_id); + } - // set first and last chnk targets - if (chnk_id == chnk_start) { - chnk_start_target = target; - } + // set first and last chnk targets + if (chnk_id == chnk_start) { + chnk_start_target = target; + } - if (chnk_id == chnk_end) { - chnk_end_target = target; - } + if (chnk_id == chnk_end) { + chnk_end_target = target; } + } - // some helper variables for async RPCs - std::vector bufseq{ - hermes::mutable_buffer{buf, read_size}, - }; + // some helper variables for async RPCs + std::vector bufseq{ + hermes::mutable_buffer{buf, read_size}, + }; - // expose user buffers so that they can serve as RDMA data targets - // (these are automatically "unexposed" when the destructor is called) - hermes::exposed_memory local_buffers; + // expose user buffers so that they can serve as RDMA data targets + // (these are automatically "unexposed" when the destructor is called) + hermes::exposed_memory local_buffers; - try { - local_buffers = - ld_network_service->expose(bufseq, hermes::access_mode::write_only); + try { + local_buffers = ld_network_service->expose(bufseq, hermes::access_mode::write_only); - } catch (const std::exception& ex) { - LOG(ERROR, "Failed to expose buffers for RMA"); - errno = EBUSY; - return -1; - } + } catch (const std::exception& ex) { + LOG(ERROR, "Failed to expose buffers for RMA"); + errno = EBUSY; + return -1; + } - std::vector> handles; + std::vector> handles; - // Issue non-blocking RPC requests and wait for the result later - // - // TODO(amiranda): This could be simplified by adding a vector of inputs - // to async_engine::broadcast(). This would allow us to avoid manually - // looping over handles as we do below - for (const auto& target : targets) { + // Issue non-blocking RPC requests and wait for the result later + // + // TODO(amiranda): This could be simplified by adding a vector of inputs + // to async_engine::broadcast(). This would allow us to avoid manually + // looping over handles as we do below + for (const auto& target : targets) { - // total chunk_size for target - auto total_chunk_size = target_chnks[target].size() * gkfs_config::rpc::chunksize; + // total chunk_size for target + auto total_chunk_size = target_chnks[target].size() * gkfs_config::rpc::chunksize; - // receiver of first chunk must subtract the offset from first chunk - if (target == chnk_start_target) { - total_chunk_size -= chnk_lpad(offset, gkfs_config::rpc::chunksize); - } + // receiver of first chunk must subtract the offset from first chunk + if (target == chnk_start_target) { + total_chunk_size -= chnk_lpad(offset, gkfs_config::rpc::chunksize); + } - // receiver of last chunk must subtract - if (target == chnk_end_target) { - total_chunk_size -= chnk_rpad(offset + read_size, gkfs_config::rpc::chunksize); - } + // receiver of last chunk must subtract + if (target == chnk_end_target) { + total_chunk_size -= chnk_rpad(offset + read_size, gkfs_config::rpc::chunksize); + } - auto endp = CTX->hosts().at(target); - - try { - - LOG(DEBUG, "Sending RPC ..."); - - gkfs::rpc::read_data::input in( - path, - // first offset in targets is the chunk with - // a potential offset - chnk_lpad(offset, gkfs_config::rpc::chunksize), - target, - CTX->hosts().size(), - // number of chunks handled by that destination - target_chnks[target].size(), - // chunk start id of this write - chnk_start, - // chunk end id of this write - chnk_end, - // total size to write - total_chunk_size, - local_buffers); - - // TODO(amiranda): add a post() with RPC_TIMEOUT to hermes so that - // we can retry for RPC_TRIES (see old commits with margo) - // TODO(amiranda): hermes will eventually provide a post(endpoint) - // returning one result and a broadcast(endpoint_set) returning a - // result_set. When that happens we can remove the .at(0) :/ - handles.emplace_back( - ld_network_service->post(endp, in)); - - LOG(DEBUG, "host: {}, path: {}, chunks: {}, size: {}, offset: {}", - target, path, in.chunk_n(), total_chunk_size, in.offset()); - - } catch (const std::exception& ex) { - LOG(ERROR, "Unable to send non-blocking rpc for path \"{}\" " - "[peer: {}]", path, target); - errno = EBUSY; - return -1; - } + auto endp = CTX->hosts().at(target); + + try { + + LOG(DEBUG, "Sending RPC ..."); + + gkfs::rpc::read_data::input in( + path, + // first offset in targets is the chunk with + // a potential offset + chnk_lpad(offset, gkfs_config::rpc::chunksize), + target, + CTX->hosts().size(), + // number of chunks handled by that destination + target_chnks[target].size(), + // chunk start id of this write + chnk_start, + // chunk end id of this write + chnk_end, + // total size to write + total_chunk_size, + local_buffers); + + // TODO(amiranda): add a post() with RPC_TIMEOUT to hermes so that + // we can retry for RPC_TRIES (see old commits with margo) + // TODO(amiranda): hermes will eventually provide a post(endpoint) + // returning one result and a broadcast(endpoint_set) returning a + // result_set. When that happens we can remove the .at(0) :/ + handles.emplace_back( + ld_network_service->post(endp, in)); + + LOG(DEBUG, "host: {}, path: {}, chunks: {}, size: {}, offset: {}", + target, path, in.chunk_n(), total_chunk_size, in.offset()); + + } catch (const std::exception& ex) { + LOG(ERROR, "Unable to send non-blocking rpc for path \"{}\" " + "[peer: {}]", path, target); + errno = EBUSY; + return -1; } + } - // Wait for RPC responses and then get response and add it to out_size - // which is the read size. All potential outputs are served to free - // resources regardless of errors, although an errorcode is set. - bool error = false; - ssize_t out_size = 0; - std::size_t idx = 0; - - for (const auto& h : handles) { - try { - // XXX We might need a timeout here to not wait forever for an - // output that never comes? - auto out = h.get().at(0); - - if (out.err() != 0) { - LOG(ERROR, "Daemon reported error: {}", out.err()); - error = true; - errno = out.err(); - } - - out_size += static_cast(out.io_size()); - - } catch (const std::exception& ex) { - LOG(ERROR, "Failed to get rpc output for path \"{}\" [peer: {}]", - path, targets[idx]); + // Wait for RPC responses and then get response and add it to out_size + // which is the read size. All potential outputs are served to free + // resources regardless of errors, although an errorcode is set. + bool error = false; + ssize_t out_size = 0; + std::size_t idx = 0; + + for (const auto& h : handles) { + try { + // XXX We might need a timeout here to not wait forever for an + // output that never comes? + auto out = h.get().at(0); + + if (out.err() != 0) { + LOG(ERROR, "Daemon reported error: {}", out.err()); error = true; - errno = EIO; + errno = out.err(); } - ++idx; + out_size += static_cast(out.io_size()); + + } catch (const std::exception& ex) { + LOG(ERROR, "Failed to get rpc output for path \"{}\" [peer: {}]", + path, targets[idx]); + error = true; + errno = EIO; } - return error ? -1 : out_size; + ++idx; } - int trunc_data(const std::string& path, size_t current_size, size_t new_size) { + return error ? -1 : out_size; +} - assert(current_size > new_size); - bool error = false; +int rpc_send::trunc_data(const std::string& path, size_t current_size, size_t new_size) { - // Find out which data servers need to delete data chunks in order to - // contact only them - const unsigned int chunk_start = chnk_id_for_offset(new_size, gkfs_config::rpc::chunksize); - const unsigned int chunk_end = - chnk_id_for_offset(current_size - new_size - 1, gkfs_config::rpc::chunksize); + assert(current_size > new_size); + bool error = false; - std::unordered_set hosts; - for (unsigned int chunk_id = chunk_start; chunk_id <= chunk_end; ++chunk_id) { - hosts.insert(CTX->distributor()->locate_data(path, chunk_id)); - } + // Find out which data servers need to delete data chunks in order to + // contact only them + const unsigned int chunk_start = chnk_id_for_offset(new_size, gkfs_config::rpc::chunksize); + const unsigned int chunk_end = chnk_id_for_offset(current_size - new_size - 1, gkfs_config::rpc::chunksize); - std::vector> handles; + std::unordered_set hosts; + for (unsigned int chunk_id = chunk_start; chunk_id <= chunk_end; ++chunk_id) { + hosts.insert(CTX->distributor()->locate_data(path, chunk_id)); + } - for (const auto& host: hosts) { + std::vector> handles; - auto endp = CTX->hosts().at(host); + for (const auto& host: hosts) { - try { - LOG(DEBUG, "Sending RPC ..."); + auto endp = CTX->hosts().at(host); - gkfs::rpc::trunc_data::input in(path, new_size); + try { + LOG(DEBUG, "Sending RPC ..."); - // TODO(amiranda): add a post() with RPC_TIMEOUT to hermes so that - // we can retry for RPC_TRIES (see old commits with margo) - // TODO(amiranda): hermes will eventually provide a post(endpoint) - // returning one result and a broadcast(endpoint_set) returning a - // result_set. When that happens we can remove the .at(0) :/ - handles.emplace_back( - ld_network_service->post(endp, in)); + gkfs::rpc::trunc_data::input in(path, new_size); - } catch (const std::exception& ex) { - // TODO(amiranda): we should cancel all previously posted requests - // here, unfortunately, Hermes does not support it yet :/ - LOG(ERROR, "Failed to send request to host: {}", host); - errno = EIO; - return -1; - } + // TODO(amiranda): add a post() with RPC_TIMEOUT to hermes so that + // we can retry for RPC_TRIES (see old commits with margo) + // TODO(amiranda): hermes will eventually provide a post(endpoint) + // returning one result and a broadcast(endpoint_set) returning a + // result_set. When that happens we can remove the .at(0) :/ + handles.emplace_back(ld_network_service->post(endp, in)); + } catch (const std::exception& ex) { + // TODO(amiranda): we should cancel all previously posted requests + // here, unfortunately, Hermes does not support it yet :/ + LOG(ERROR, "Failed to send request to host: {}", host); + errno = EIO; + return -1; } - // Wait for RPC responses and then get response - for (const auto& h : handles) { - - try { - // XXX We might need a timeout here to not wait forever for an - // output that never comes? - auto out = h.get().at(0); - - if (out.err() != 0) { - LOG(ERROR, "received error response: {}", out.err()); - error = true; - errno = EIO; - } - } catch (const std::exception& ex) { - LOG(ERROR, "while getting rpc output"); + } + + // Wait for RPC responses and then get response + for (const auto& h : handles) { + + try { + // XXX We might need a timeout here to not wait forever for an + // output that never comes? + auto out = h.get().at(0); + + if (out.err() != 0) { + LOG(ERROR, "received error response: {}", out.err()); error = true; errno = EIO; } + } catch (const std::exception& ex) { + LOG(ERROR, "while getting rpc output"); + error = true; + errno = EIO; } - - return error ? -1 : 0; } - ChunkStat chunk_stat() { + return error ? -1 : 0; +} + +rpc_send::ChunkStat rpc_send::chunk_stat() { - std::vector> handles; + std::vector> handles; - for (const auto& endp : CTX->hosts()) { - try { - LOG(DEBUG, "Sending RPC to host: {}", endp.to_string()); + for (const auto& endp : CTX->hosts()) { + try { + LOG(DEBUG, "Sending RPC to host: {}", endp.to_string()); - gkfs::rpc::chunk_stat::input in(0); + gkfs::rpc::chunk_stat::input in(0); - // TODO(amiranda): add a post() with RPC_TIMEOUT to hermes so that - // we can retry for RPC_TRIES (see old commits with margo) - // TODO(amiranda): hermes will eventually provide a post(endpoint) - // returning one result and a broadcast(endpoint_set) returning a - // result_set. When that happens we can remove the .at(0) :/ - handles.emplace_back( - ld_network_service->post(endp, in)); + // TODO(amiranda): add a post() with RPC_TIMEOUT to hermes so that + // we can retry for RPC_TRIES (see old commits with margo) + // TODO(amiranda): hermes will eventually provide a post(endpoint) + // returning one result and a broadcast(endpoint_set) returning a + // result_set. When that happens we can remove the .at(0) :/ + handles.emplace_back(ld_network_service->post(endp, in)); - } catch (const std::exception& ex) { - // TODO(amiranda): we should cancel all previously posted requests - // here, unfortunately, Hermes does not support it yet :/ - LOG(ERROR, "Failed to send request to host: {}", endp.to_string()); - throw std::runtime_error("Failed to forward non-blocking rpc request"); - } + } catch (const std::exception& ex) { + // TODO(amiranda): we should cancel all previously posted requests + // here, unfortunately, Hermes does not support it yet :/ + LOG(ERROR, "Failed to send request to host: {}", endp.to_string()); + throw std::runtime_error("Failed to forward non-blocking rpc request"); } + } - unsigned long chunk_size = gkfs_config::rpc::chunksize; - unsigned long chunk_total = 0; - unsigned long chunk_free = 0; + unsigned long chunk_size = gkfs_config::rpc::chunksize; + unsigned long chunk_total = 0; + unsigned long chunk_free = 0; - // wait for RPC responses - for (std::size_t i = 0; i < handles.size(); ++i) { + // wait for RPC responses + for (std::size_t i = 0; i < handles.size(); ++i) { - gkfs::rpc::chunk_stat::output out; + gkfs::rpc::chunk_stat::output out; - try { - // XXX We might need a timeout here to not wait forever for an - // output that never comes? - out = handles[i].get().at(0); + try { + // XXX We might need a timeout here to not wait forever for an + // output that never comes? + out = handles[i].get().at(0); - assert(out.chunk_size() == chunk_size); - chunk_total += out.chunk_total(); - chunk_free += out.chunk_free(); + assert(out.chunk_size() == chunk_size); + chunk_total += out.chunk_total(); + chunk_free += out.chunk_free(); - } catch (const std::exception& ex) { - throw std::runtime_error( - fmt::format("Failed to get rpc output for target host: {}]", i)); - } + } catch (const std::exception& ex) { + throw std::runtime_error( + fmt::format("Failed to get rpc output for target host: {}]", i)); } - - return {chunk_size, chunk_total, chunk_free}; } -} // end namespace rpc_send + return {chunk_size, chunk_total, chunk_free}; +} diff --git a/src/client/rpc/ld_rpc_management.cpp b/src/client/rpc/ld_rpc_management.cpp index d3132e3dd..e8244db80 100644 --- a/src/client/rpc/ld_rpc_management.cpp +++ b/src/client/rpc/ld_rpc_management.cpp @@ -18,47 +18,41 @@ #include +/** +* Gets fs configuration information from the running daemon and transfers it to the memory of the library +* @return +*/ +bool rpc_send::get_fs_config() { + + auto endp = CTX->hosts().at(CTX->local_host_id()); + gkfs::rpc::fs_config::output out; + + try { + LOG(DEBUG, "Retrieving file system configurations from daemon"); + // TODO(amiranda): add a post() with RPC_TIMEOUT to hermes so that we can retry + // for RPC_TRIES (see old commits with margo) + // TODO(amiranda): hermes will eventually provide a post(endpoint) + // returning one result and a broadcast(endpoint_set) returning a + // result_set. When that happens we can remove the .at(0) :/ + out = ld_network_service->post(endp).get().at(0); + } catch (const std::exception& ex) { + LOG(ERROR, "Retrieving fs configurations from daemon"); + return false; + } -namespace rpc_send { - - /** - * Gets fs configuration information from the running daemon and transfers it to the memory of the library - * @return - */ - bool get_fs_config() { - - auto endp = CTX->hosts().at(CTX->local_host_id()); - gkfs::rpc::fs_config::output out; - - try { - LOG(DEBUG, "Retrieving file system configurations from daemon"); - // TODO(amiranda): add a post() with RPC_TIMEOUT to hermes so that we can retry - // for RPC_TRIES (see old commits with margo) - // TODO(amiranda): hermes will eventually provide a post(endpoint) - // returning one result and a broadcast(endpoint_set) returning a - // result_set. When that happens we can remove the .at(0) :/ - out = ld_network_service->post(endp).get().at(0); - } catch (const std::exception& ex) { - LOG(ERROR, "Retrieving fs configurations from daemon"); - return false; - } - - CTX->mountdir(out.mountdir()); - LOG(INFO, "Mountdir: '{}'", CTX->mountdir()); - - CTX->fs_conf()->rootdir = out.rootdir(); - CTX->fs_conf()->atime_state = out.atime_state(); - CTX->fs_conf()->mtime_state = out.mtime_state(); - CTX->fs_conf()->ctime_state = out.ctime_state(); - CTX->fs_conf()->link_cnt_state = out.link_cnt_state(); - CTX->fs_conf()->blocks_state = out.blocks_state(); - CTX->fs_conf()->uid = out.uid(); - CTX->fs_conf()->gid = out.gid(); - - LOG(DEBUG, "Got response with mountdir {}", out.mountdir()); + CTX->mountdir(out.mountdir()); + LOG(INFO, "Mountdir: '{}'", CTX->mountdir()); - return true; - } + CTX->fs_conf()->rootdir = out.rootdir(); + CTX->fs_conf()->atime_state = out.atime_state(); + CTX->fs_conf()->mtime_state = out.mtime_state(); + CTX->fs_conf()->ctime_state = out.ctime_state(); + CTX->fs_conf()->link_cnt_state = out.link_cnt_state(); + CTX->fs_conf()->blocks_state = out.blocks_state(); + CTX->fs_conf()->uid = out.uid(); + CTX->fs_conf()->gid = out.gid(); + LOG(DEBUG, "Got response with mountdir {}", out.mountdir()); + return true; } diff --git a/src/client/rpc/ld_rpc_metadentry.cpp b/src/client/rpc/ld_rpc_metadentry.cpp index 3336a5fad..b00b7decb 100644 --- a/src/client/rpc/ld_rpc_metadentry.cpp +++ b/src/client/rpc/ld_rpc_metadentry.cpp @@ -22,79 +22,108 @@ #include #include -namespace rpc_send { +using namespace std; + +int rpc_send::mk_node(const std::string& path, const mode_t mode) { + + int err = EUNKNOWN; + auto endp = CTX->hosts().at( + CTX->distributor()->locate_file_metadata(path)); + + try { + LOG(DEBUG, "Sending RPC ..."); + // TODO(amiranda): add a post() with RPC_TIMEOUT to hermes so that we can + // retry for RPC_TRIES (see old commits with margo) + // TODO(amiranda): hermes will eventually provide a post(endpoint) + // returning one result and a broadcast(endpoint_set) returning a + // result_set. When that happens we can remove the .at(0) :/ + auto out = ld_network_service->post(endp, path, mode).get().at(0); + err = out.err(); + LOG(DEBUG, "Got response success: {}", err); + + if (out.err()) { + errno = out.err(); + return -1; + } - using namespace std; + } catch (const std::exception& ex) { + LOG(ERROR, "while getting rpc output"); + errno = EBUSY; + return -1; + } - int mk_node(const std::string& path, const mode_t mode) { + return err; +} - int err = EUNKNOWN; - auto endp = CTX->hosts().at( - CTX->distributor()->locate_file_metadata(path)); +int rpc_send::stat(const std::string& path, string& attr) { - try { - LOG(DEBUG, "Sending RPC ..."); - // TODO(amiranda): add a post() with RPC_TIMEOUT to hermes so that we can - // retry for RPC_TRIES (see old commits with margo) - // TODO(amiranda): hermes will eventually provide a post(endpoint) - // returning one result and a broadcast(endpoint_set) returning a - // result_set. When that happens we can remove the .at(0) :/ - auto out = - ld_network_service->post(endp, path, mode).get().at(0); - err = out.err(); - LOG(DEBUG, "Got response success: {}", err); + auto endp = CTX->hosts().at(CTX->distributor()->locate_file_metadata(path)); - if (out.err()) { - errno = out.err(); - return -1; - } + try { + LOG(DEBUG, "Sending RPC ..."); + // TODO(amiranda): add a post() with RPC_TIMEOUT to hermes so that we can + // retry for RPC_TRIES (see old commits with margo) + // TODO(amiranda): hermes will eventually provide a post(endpoint) + // returning one result and a broadcast(endpoint_set) returning a + // result_set. When that happens we can remove the .at(0) :/ + auto out = ld_network_service->post(endp, path).get().at(0); + LOG(DEBUG, "Got response success: {}", out.err()); - } catch (const std::exception& ex) { - LOG(ERROR, "while getting rpc output"); - errno = EBUSY; + if (out.err() != 0) { + errno = out.err(); return -1; } - return err; + attr = out.db_val(); + return 0; + + } catch (const std::exception& ex) { + LOG(ERROR, "while getting rpc output"); + errno = EBUSY; + return -1; } - int stat(const std::string& path, string& attr) { + return 0; +} - auto endp = CTX->hosts().at( - CTX->distributor()->locate_file_metadata(path)); +int rpc_send::decr_size(const std::string& path, size_t length) { - try { - LOG(DEBUG, "Sending RPC ..."); - // TODO(amiranda): add a post() with RPC_TIMEOUT to hermes so that we can - // retry for RPC_TRIES (see old commits with margo) - // TODO(amiranda): hermes will eventually provide a post(endpoint) - // returning one result and a broadcast(endpoint_set) returning a - // result_set. When that happens we can remove the .at(0) :/ - auto out = - ld_network_service->post(endp, path).get().at(0); - LOG(DEBUG, "Got response success: {}", out.err()); + auto endp = CTX->hosts().at(CTX->distributor()->locate_file_metadata(path)); - if (out.err() != 0) { - errno = out.err(); - return -1; - } + try { - attr = out.db_val(); - return 0; + LOG(DEBUG, "Sending RPC ..."); + // TODO(amiranda): add a post() with RPC_TIMEOUT to hermes so that we can + // retry for RPC_TRIES (see old commits with margo) + // TODO(amiranda): hermes will eventually provide a post(endpoint) + // returning one result and a broadcast(endpoint_set) returning a + // result_set. When that happens we can remove the .at(0) :/ + auto out = ld_network_service->post(endp, path, length).get().at(0); - } catch (const std::exception& ex) { - LOG(ERROR, "while getting rpc output"); - errno = EBUSY; + LOG(DEBUG, "Got response success: {}", out.err()); + + if (out.err() != 0) { + errno = out.err(); return -1; } return 0; + + } catch (const std::exception& ex) { + LOG(ERROR, "while getting rpc output"); + errno = EBUSY; + return -1; } +} - int decr_size(const std::string& path, size_t length) { +int rpc_send::rm_node(const std::string& path, const bool remove_metadentry_only, const ssize_t size) { - auto endp = CTX->hosts().at( - CTX->distributor()->locate_file_metadata(path)); + // if only the metadentry should be removed, send one rpc to the + // metadentry's responsible node to remove the metadata + // else, send an rpc to all hosts and thus broadcast chunk_removal. + if (remove_metadentry_only) { + + auto endp = CTX->hosts().at(CTX->distributor()->locate_file_metadata(path)); try { @@ -104,9 +133,7 @@ namespace rpc_send { // TODO(amiranda): hermes will eventually provide a post(endpoint) // returning one result and a broadcast(endpoint_set) returning a // result_set. When that happens we can remove the .at(0) :/ - auto out = - ld_network_service->post( - endp, path, length).get().at(0); + auto out = ld_network_service->post(endp, path).get().at(0); LOG(DEBUG, "Got response success: {}", out.err()); @@ -122,406 +149,344 @@ namespace rpc_send { errno = EBUSY; return -1; } - } - int rm_node(const std::string& path, const bool remove_metadentry_only, const ssize_t size) { + return 0; + } - // if only the metadentry should be removed, send one rpc to the - // metadentry's responsible node to remove the metadata - // else, send an rpc to all hosts and thus broadcast chunk_removal. - if (remove_metadentry_only) { + std::vector> handles; - auto endp = CTX->hosts().at( - CTX->distributor()->locate_file_metadata(path)); + // Small files + if (static_cast(size / gkfs_config::rpc::chunksize) < CTX->hosts().size()) { - try { + auto endp = CTX->hosts().at(CTX->distributor()->locate_file_metadata(path)); - LOG(DEBUG, "Sending RPC ..."); - // TODO(amiranda): add a post() with RPC_TIMEOUT to hermes so that we can - // retry for RPC_TRIES (see old commits with margo) - // TODO(amiranda): hermes will eventually provide a post(endpoint) - // returning one result and a broadcast(endpoint_set) returning a - // result_set. When that happens we can remove the .at(0) :/ - auto out = - ld_network_service->post(endp, path).get().at(0); + try { + LOG(DEBUG, "Sending RPC to host: {}", endp.to_string()); + gkfs::rpc::remove::input in(path); + handles.emplace_back(ld_network_service->post(endp, in)); - LOG(DEBUG, "Got response success: {}", out.err()); + uint64_t chnk_start = 0; + uint64_t chnk_end = size / gkfs_config::rpc::chunksize; - if (out.err() != 0) { - errno = out.err(); - return -1; - } + for (uint64_t chnk_id = chnk_start; chnk_id <= chnk_end; chnk_id++) { + const auto target = CTX->hosts().at( + CTX->distributor()->locate_data(path, chnk_id)); - return 0; + LOG(DEBUG, "Sending RPC to host: {}", target.to_string()); - } catch (const std::exception& ex) { - LOG(ERROR, "while getting rpc output"); - errno = EBUSY; - return -1; + handles.emplace_back(ld_network_service->post(target, in)); } - - return 0; + } catch (const std::exception& ex) { + LOG(ERROR, "Failed to send reduced remove requests"); + throw std::runtime_error( + "Failed to forward non-blocking rpc request"); } - - std::vector> handles; - - // Small files - if (static_cast(size / gkfs_config::rpc::chunksize) < CTX->hosts().size()) { - - auto endp = CTX->hosts().at( - CTX->distributor()->locate_file_metadata(path)); - + } else { // "Big" files + for (const auto& endp : CTX->hosts()) { try { LOG(DEBUG, "Sending RPC to host: {}", endp.to_string()); - gkfs::rpc::remove::input in(path); - handles.emplace_back( - ld_network_service->post(endp, in)); - uint64_t chnk_start = 0; - uint64_t chnk_end = size / gkfs_config::rpc::chunksize; + gkfs::rpc::remove::input in(path); - for (uint64_t chnk_id = chnk_start; chnk_id <= chnk_end; chnk_id++) { - const auto target = CTX->hosts().at( - CTX->distributor()->locate_data(path, chnk_id)); + // TODO(amiranda): add a post() with RPC_TIMEOUT to hermes so that + // we can retry for RPC_TRIES (see old commits with margo) + // TODO(amiranda): hermes will eventually provide a post(endpoint) + // returning one result and a broadcast(endpoint_set) returning a + // result_set. When that happens we can remove the .at(0) :/ + // + // - LOG(DEBUG, "Sending RPC to host: {}", target.to_string()); + handles.emplace_back(ld_network_service->post(endp, in)); - handles.emplace_back( - ld_network_service->post(target, in)); - } } catch (const std::exception& ex) { - LOG(ERROR, "Failed to send reduced remove requests"); + // TODO(amiranda): we should cancel all previously posted requests + // here, unfortunately, Hermes does not support it yet :/ + LOG(ERROR, "Failed to send request to host: {}", + endp.to_string()); throw std::runtime_error( "Failed to forward non-blocking rpc request"); } - } else { // "Big" files - for (const auto& endp : CTX->hosts()) { - try { - LOG(DEBUG, "Sending RPC to host: {}", endp.to_string()); - - gkfs::rpc::remove::input in(path); - - // TODO(amiranda): add a post() with RPC_TIMEOUT to hermes so that - // we can retry for RPC_TRIES (see old commits with margo) - // TODO(amiranda): hermes will eventually provide a post(endpoint) - // returning one result and a broadcast(endpoint_set) returning a - // result_set. When that happens we can remove the .at(0) :/ - // - // - - handles.emplace_back( - ld_network_service->post(endp, in)); - - } catch (const std::exception& ex) { - // TODO(amiranda): we should cancel all previously posted requests - // here, unfortunately, Hermes does not support it yet :/ - LOG(ERROR, "Failed to send request to host: {}", - endp.to_string()); - throw std::runtime_error( - "Failed to forward non-blocking rpc request"); - } - } - } - // wait for RPC responses - bool got_error = false; - - for (const auto& h : handles) { - - try { - // XXX We might need a timeout here to not wait forever for an - // output that never comes? - auto out = h.get().at(0); - - if (out.err() != 0) { - LOG(ERROR, "received error response: {}", out.err()); - got_error = true; - errno = out.err(); - } - } catch (const std::exception& ex) { - LOG(ERROR, "while getting rpc output"); - got_error = true; - errno = EBUSY; - } } - - return got_error ? -1 : 0; } + // wait for RPC responses + bool got_error = false; - - int update_metadentry(const string& path, const Metadata& md, const MetadentryUpdateFlags& md_flags) { - - auto endp = CTX->hosts().at( - CTX->distributor()->locate_file_metadata(path)); + for (const auto& h : handles) { try { - - LOG(DEBUG, "Sending RPC ..."); - // TODO(amiranda): add a post() with RPC_TIMEOUT to hermes so that we can - // retry for RPC_TRIES (see old commits with margo) - // TODO(amiranda): hermes will eventually provide a post(endpoint) - // returning one result and a broadcast(endpoint_set) returning a - // result_set. When that happens we can remove the .at(0) :/ - auto out = - ld_network_service->post( - endp, - path, - (md_flags.link_count ? md.link_count() : 0), - /* mode */ 0, - /* uid */ 0, - /* gid */ 0, - (md_flags.size ? md.size() : 0), - (md_flags.blocks ? md.blocks() : 0), - (md_flags.atime ? md.atime() : 0), - (md_flags.mtime ? md.mtime() : 0), - (md_flags.ctime ? md.ctime() : 0), - bool_to_merc_bool(md_flags.link_count), - /* mode_flag */ false, - bool_to_merc_bool(md_flags.size), - bool_to_merc_bool(md_flags.blocks), - bool_to_merc_bool(md_flags.atime), - bool_to_merc_bool(md_flags.mtime), - bool_to_merc_bool(md_flags.ctime)).get().at(0); - - LOG(DEBUG, "Got response success: {}", out.err()); + // XXX We might need a timeout here to not wait forever for an + // output that never comes? + auto out = h.get().at(0); if (out.err() != 0) { + LOG(ERROR, "received error response: {}", out.err()); + got_error = true; errno = out.err(); - return -1; } - - return 0; - } catch (const std::exception& ex) { LOG(ERROR, "while getting rpc output"); + got_error = true; errno = EBUSY; - return -1; } } - int update_metadentry_size(const string& path, const size_t size, const off64_t offset, const bool append_flag, - off64_t& ret_size) { + return got_error ? -1 : 0; +} + + +int rpc_send::update_metadentry(const string& path, const Metadata& md, const MetadentryUpdateFlags& md_flags) { + + auto endp = CTX->hosts().at(CTX->distributor()->locate_file_metadata(path)); + + try { + + LOG(DEBUG, "Sending RPC ..."); + // TODO(amiranda): add a post() with RPC_TIMEOUT to hermes so that we can + // retry for RPC_TRIES (see old commits with margo) + // TODO(amiranda): hermes will eventually provide a post(endpoint) + // returning one result and a broadcast(endpoint_set) returning a + // result_set. When that happens we can remove the .at(0) :/ + auto out = ld_network_service->post( + endp, + path, + (md_flags.link_count ? md.link_count() : 0), + /* mode */ 0, + /* uid */ 0, + /* gid */ 0, + (md_flags.size ? md.size() : 0), + (md_flags.blocks ? md.blocks() : 0), + (md_flags.atime ? md.atime() : 0), + (md_flags.mtime ? md.mtime() : 0), + (md_flags.ctime ? md.ctime() : 0), + bool_to_merc_bool(md_flags.link_count), + /* mode_flag */ false, + bool_to_merc_bool(md_flags.size), + bool_to_merc_bool(md_flags.blocks), + bool_to_merc_bool(md_flags.atime), + bool_to_merc_bool(md_flags.mtime), + bool_to_merc_bool(md_flags.ctime)).get().at(0); + + LOG(DEBUG, "Got response success: {}", out.err()); + + if (out.err() != 0) { + errno = out.err(); + return -1; + } - auto endp = CTX->hosts().at( - CTX->distributor()->locate_file_metadata(path)); + return 0; - try { + } catch (const std::exception& ex) { + LOG(ERROR, "while getting rpc output"); + errno = EBUSY; + return -1; + } +} - LOG(DEBUG, "Sending RPC ..."); - // TODO(amiranda): add a post() with RPC_TIMEOUT to hermes so that we can - // retry for RPC_TRIES (see old commits with margo) - // TODO(amiranda): hermes will eventually provide a post(endpoint) - // returning one result and a broadcast(endpoint_set) returning a - // result_set. When that happens we can remove the .at(0) :/ - auto out = - ld_network_service->post( - endp, path, size, offset, - bool_to_merc_bool(append_flag)).get().at(0); +int +rpc_send::update_metadentry_size(const string& path, const size_t size, const off64_t offset, const bool append_flag, + off64_t& ret_size) { - LOG(DEBUG, "Got response success: {}", out.err()); + auto endp = CTX->hosts().at(CTX->distributor()->locate_file_metadata(path)); - if (out.err() != 0) { - errno = out.err(); - return -1; - } + try { - ret_size = out.ret_size(); - return out.err(); + LOG(DEBUG, "Sending RPC ..."); + // TODO(amiranda): add a post() with RPC_TIMEOUT to hermes so that we can + // retry for RPC_TRIES (see old commits with margo) + // TODO(amiranda): hermes will eventually provide a post(endpoint) + // returning one result and a broadcast(endpoint_set) returning a + // result_set. When that happens we can remove the .at(0) :/ + auto out = ld_network_service->post( + endp, path, size, offset, + bool_to_merc_bool(append_flag)).get().at(0); - return 0; + LOG(DEBUG, "Got response success: {}", out.err()); - } catch (const std::exception& ex) { - LOG(ERROR, "while getting rpc output"); - errno = EBUSY; - ret_size = 0; - return EUNKNOWN; + if (out.err() != 0) { + errno = out.err(); + return -1; } + + ret_size = out.ret_size(); + return out.err(); + + return 0; + + } catch (const std::exception& ex) { + LOG(ERROR, "while getting rpc output"); + errno = EBUSY; + ret_size = 0; + return EUNKNOWN; } +} - int get_metadentry_size(const std::string& path, off64_t& ret_size) { +int rpc_send::get_metadentry_size(const std::string& path, off64_t& ret_size) { - auto endp = CTX->hosts().at( - CTX->distributor()->locate_file_metadata(path)); + auto endp = CTX->hosts().at(CTX->distributor()->locate_file_metadata(path)); - try { + try { - LOG(DEBUG, "Sending RPC ..."); - // TODO(amiranda): add a post() with RPC_TIMEOUT to hermes so that we can - // retry for RPC_TRIES (see old commits with margo) - // TODO(amiranda): hermes will eventually provide a post(endpoint) - // returning one result and a broadcast(endpoint_set) returning a - // result_set. When that happens we can remove the .at(0) :/ - auto out = - ld_network_service->post( - endp, path).get().at(0); + LOG(DEBUG, "Sending RPC ..."); + // TODO(amiranda): add a post() with RPC_TIMEOUT to hermes so that we can + // retry for RPC_TRIES (see old commits with margo) + // TODO(amiranda): hermes will eventually provide a post(endpoint) + // returning one result and a broadcast(endpoint_set) returning a + // result_set. When that happens we can remove the .at(0) :/ + auto out = ld_network_service->post(endp, path).get().at(0); - LOG(DEBUG, "Got response success: {}", out.err()); + LOG(DEBUG, "Got response success: {}", out.err()); - ret_size = out.ret_size(); - return out.err(); + ret_size = out.ret_size(); + return out.err(); - } catch (const std::exception& ex) { - LOG(ERROR, "while getting rpc output"); - errno = EBUSY; - ret_size = 0; - return EUNKNOWN; - } + } catch (const std::exception& ex) { + LOG(ERROR, "while getting rpc output"); + errno = EBUSY; + ret_size = 0; + return EUNKNOWN; } +} /** * Sends an RPC request to a specific node to push all chunks that belong to him */ - void get_dirents(OpenDir& open_dir) { - - auto const root_dir = open_dir.path(); - auto const targets = - CTX->distributor()->locate_directory_metadata(root_dir); - - /* preallocate receiving buffer. The actual size is not known yet. - * - * On C++14 make_unique function also zeroes the newly allocated buffer. - * It turns out that this operation is increadibly slow for such a big - * buffer. Moreover we don't need a zeroed buffer here. - */ - auto large_buffer = - std::unique_ptr(new char[gkfs_config::rpc::dirents_buff_size]); - - //XXX there is a rounding error here depending on the number of targets... - const std::size_t per_host_buff_size = - gkfs_config::rpc::dirents_buff_size / targets.size(); - - // expose local buffers for RMA from servers - std::vector exposed_buffers; - exposed_buffers.reserve(targets.size()); - - for (std::size_t i = 0; i < targets.size(); ++i) { - try { - exposed_buffers.emplace_back( - ld_network_service->expose( - std::vector{ - hermes::mutable_buffer{ - large_buffer.get() + (i * per_host_buff_size), - per_host_buff_size - } - }, - hermes::access_mode::write_only)); - } catch (const std::exception& ex) { - throw std::runtime_error("Failed to expose buffers for RMA"); - } +void rpc_send::get_dirents(OpenDir& open_dir) { + + auto const root_dir = open_dir.path(); + auto const targets = CTX->distributor()->locate_directory_metadata(root_dir); + + /* preallocate receiving buffer. The actual size is not known yet. + * + * On C++14 make_unique function also zeroes the newly allocated buffer. + * It turns out that this operation is increadibly slow for such a big + * buffer. Moreover we don't need a zeroed buffer here. + */ + auto large_buffer = std::unique_ptr(new char[gkfs_config::rpc::dirents_buff_size]); + + //XXX there is a rounding error here depending on the number of targets... + const std::size_t per_host_buff_size = gkfs_config::rpc::dirents_buff_size / targets.size(); + + // expose local buffers for RMA from servers + std::vector exposed_buffers; + exposed_buffers.reserve(targets.size()); + + for (std::size_t i = 0; i < targets.size(); ++i) { + try { + exposed_buffers.emplace_back(ld_network_service->expose( + std::vector{ + hermes::mutable_buffer{ + large_buffer.get() + (i * per_host_buff_size), + per_host_buff_size + } + }, + hermes::access_mode::write_only)); + } catch (const std::exception& ex) { + throw std::runtime_error("Failed to expose buffers for RMA"); } + } - // send RPCs - std::vector> handles; + // send RPCs + std::vector> handles; - for (std::size_t i = 0; i < targets.size(); ++i) { + for (std::size_t i = 0; i < targets.size(); ++i) { - LOG(DEBUG, "target_host: {}", targets[i]); + LOG(DEBUG, "target_host: {}", targets[i]); - // Setup rpc input parameters for each host - auto endp = CTX->hosts().at(targets[i]); + // Setup rpc input parameters for each host + auto endp = CTX->hosts().at(targets[i]); - gkfs::rpc::get_dirents::input in(root_dir, exposed_buffers[i]); + gkfs::rpc::get_dirents::input in(root_dir, exposed_buffers[i]); - try { + try { - LOG(DEBUG, "Sending RPC to host: {}", targets[i]); - handles.emplace_back( - ld_network_service->post(endp, in)); - } catch (const std::exception& ex) { - LOG(ERROR, "Unable to send non-blocking get_dirents() " - "on {} [peer: {}]", root_dir, targets[i]); - throw std::runtime_error("Failed to post non-blocking RPC request"); - } + LOG(DEBUG, "Sending RPC to host: {}", targets[i]); + handles.emplace_back(ld_network_service->post(endp, in)); + } catch (const std::exception& ex) { + LOG(ERROR, "Unable to send non-blocking get_dirents() " + "on {} [peer: {}]", root_dir, targets[i]); + throw std::runtime_error("Failed to post non-blocking RPC request"); } + } - // wait for RPC responses - for (std::size_t i = 0; i < handles.size(); ++i) { + // wait for RPC responses + for (std::size_t i = 0; i < handles.size(); ++i) { - gkfs::rpc::get_dirents::output out; + gkfs::rpc::get_dirents::output out; - try { - // XXX We might need a timeout here to not wait forever for an - // output that never comes? - out = handles[i].get().at(0); - - if (out.err() != 0) { - throw std::runtime_error( - fmt::format("Failed to retrieve dir entries from " - "host '{}'. Error '{}', path '{}'", - targets[i], strerror(out.err()), root_dir)); - } - } catch (const std::exception& ex) { + try { + // XXX We might need a timeout here to not wait forever for an + // output that never comes? + out = handles[i].get().at(0); + + if (out.err() != 0) { throw std::runtime_error( - fmt::format("Failed to get rpc output.. [path: {}, " - "target host: {}]", root_dir, targets[i])); + fmt::format("Failed to retrieve dir entries from " + "host '{}'. Error '{}', path '{}'", + targets[i], strerror(out.err()), root_dir)); } + } catch (const std::exception& ex) { + throw std::runtime_error( + fmt::format("Failed to get rpc output.. [path: {}, " + "target host: {}]", root_dir, targets[i])); + } - // each server wrote information to its pre-defined region in - // large_buffer, recover it by computing the base_address for each - // particular server and adding the appropriate offsets - assert(exposed_buffers[i].count() == 1); - void* base_ptr = exposed_buffers[i].begin()->data(); + // each server wrote information to its pre-defined region in + // large_buffer, recover it by computing the base_address for each + // particular server and adding the appropriate offsets + assert(exposed_buffers[i].count() == 1); + void* base_ptr = exposed_buffers[i].begin()->data(); - bool* bool_ptr = reinterpret_cast(base_ptr); - char* names_ptr = reinterpret_cast(base_ptr) + - (out.dirents_size() * sizeof(bool)); + bool* bool_ptr = reinterpret_cast(base_ptr); + char* names_ptr = reinterpret_cast(base_ptr) + + (out.dirents_size() * sizeof(bool)); - for (std::size_t j = 0; j < out.dirents_size(); j++) { + for (std::size_t j = 0; j < out.dirents_size(); j++) { - FileType ftype = (*bool_ptr) ? - FileType::directory : - FileType::regular; - bool_ptr++; + FileType ftype = (*bool_ptr) ? FileType::directory : FileType::regular; + bool_ptr++; - // Check that we are not outside the recv_buff for this specific host - assert((names_ptr - reinterpret_cast(base_ptr)) > 0); - assert( - static_cast( - names_ptr - reinterpret_cast(base_ptr)) < - per_host_buff_size); + // Check that we are not outside the recv_buff for this specific host + assert((names_ptr - reinterpret_cast(base_ptr)) > 0); + assert(static_cast(names_ptr - reinterpret_cast(base_ptr)) < per_host_buff_size); - auto name = std::string(names_ptr); - names_ptr += name.size() + 1; + auto name = std::string(names_ptr); + names_ptr += name.size() + 1; - open_dir.add(name, ftype); - } + open_dir.add(name, ftype); } } +} #ifdef HAS_SYMLINKS - int mk_symlink(const std::string& path, const std::string& target_path) { - - auto endp = CTX->hosts().at( - CTX->distributor()->locate_file_metadata(path)); +int rpc_send::mk_symlink(const std::string& path, const std::string& target_path) { - try { + auto endp = CTX->hosts().at(CTX->distributor()->locate_file_metadata(path)); - LOG(DEBUG, "Sending RPC ..."); - // TODO(amiranda): add a post() with RPC_TIMEOUT to hermes so that we can - // retry for RPC_TRIES (see old commits with margo) - // TODO(amiranda): hermes will eventually provide a post(endpoint) - // returning one result and a broadcast(endpoint_set) returning a - // result_set. When that happens we can remove the .at(0) :/ - auto out = - ld_network_service->post( - endp, path, target_path).get().at(0); + try { - LOG(DEBUG, "Got response success: {}", out.err()); + LOG(DEBUG, "Sending RPC ..."); + // TODO(amiranda): add a post() with RPC_TIMEOUT to hermes so that we can + // retry for RPC_TRIES (see old commits with margo) + // TODO(amiranda): hermes will eventually provide a post(endpoint) + // returning one result and a broadcast(endpoint_set) returning a + // result_set. When that happens we can remove the .at(0) :/ + auto out = ld_network_service->post(endp, path, target_path).get().at(0); - if (out.err() != 0) { - errno = out.err(); - return -1; - } - - return 0; + LOG(DEBUG, "Got response success: {}", out.err()); - } catch (const std::exception& ex) { - LOG(ERROR, "while getting rpc output"); - errno = EBUSY; + if (out.err() != 0) { + errno = out.err(); return -1; } + + return 0; + + } catch (const std::exception& ex) { + LOG(ERROR, "while getting rpc output"); + errno = EBUSY; + return -1; } +} #endif - -} //end namespace rpc_send -- GitLab From 77eadb1c430412b9b08331b9f93814e4d494755b Mon Sep 17 00:00:00 2001 From: Marc Vef Date: Mon, 10 Feb 2020 11:14:24 +0100 Subject: [PATCH 05/25] daemon: metadentry namespace introduced --- include/daemon/ops/metadentry.hpp | 21 ++++--- src/client/gkfs_functions.cpp | 4 +- src/client/rpc/ld_rpc_metadentry.cpp | 3 +- src/daemon/handler/h_metadentry.cpp | 18 +++--- src/daemon/main.cpp | 2 +- src/daemon/ops/metadentry.cpp | 89 ++++++++++++++++------------ 6 files changed, 77 insertions(+), 60 deletions(-) diff --git a/include/daemon/ops/metadentry.hpp b/include/daemon/ops/metadentry.hpp index d9a6cb0ac..dcde355fe 100644 --- a/include/daemon/ops/metadentry.hpp +++ b/include/daemon/ops/metadentry.hpp @@ -18,20 +18,25 @@ #include #include -void create_metadentry(const std::string& path, Metadata& md); +namespace gkfs { + namespace metadentry { -std::string get_metadentry_str(const std::string& path); + Metadata get(const std::string& path); -Metadata get_metadentry(const std::string& path); + std::string get_str(const std::string& path); -void remove_node(const std::string& path); + size_t get_size(const std::string& path); -size_t get_metadentry_size(const std::string& path); + std::vector> get_dirents(const std::string& dir); -void update_metadentry_size(const std::string& path, size_t io_size, off_t offset, bool append); + void create(const std::string& path, Metadata& md); -void update_metadentry(const std::string& path, Metadata& md); + void update(const std::string& path, Metadata& md); -std::vector> get_dirents(const std::string& dir); + void update_size(const std::string& path, size_t io_size, off_t offset, bool append); + + void remove_node(const std::string& path); + } +} #endif //GEKKOFS_METADENTRY_HPP diff --git a/src/client/gkfs_functions.cpp b/src/client/gkfs_functions.cpp index 101e4b841..0c5248a92 100644 --- a/src/client/gkfs_functions.cpp +++ b/src/client/gkfs_functions.cpp @@ -680,6 +680,4 @@ int gkfs::func::readlink(const std::string& path, char* buf, int bufsize) { return path_size; } -#endif - -#pragma clang diagnostic pop \ No newline at end of file +#endif \ No newline at end of file diff --git a/src/client/rpc/ld_rpc_metadentry.cpp b/src/client/rpc/ld_rpc_metadentry.cpp index b00b7decb..582d92f39 100644 --- a/src/client/rpc/ld_rpc_metadentry.cpp +++ b/src/client/rpc/ld_rpc_metadentry.cpp @@ -27,8 +27,7 @@ using namespace std; int rpc_send::mk_node(const std::string& path, const mode_t mode) { int err = EUNKNOWN; - auto endp = CTX->hosts().at( - CTX->distributor()->locate_file_metadata(path)); + auto endp = CTX->hosts().at(CTX->distributor()->locate_file_metadata(path)); try { LOG(DEBUG, "Sending RPC ..."); diff --git a/src/daemon/handler/h_metadentry.cpp b/src/daemon/handler/h_metadentry.cpp index 7f7a3a3b8..9b04888b4 100644 --- a/src/daemon/handler/h_metadentry.cpp +++ b/src/daemon/handler/h_metadentry.cpp @@ -33,7 +33,7 @@ static hg_return_t rpc_srv_mk_node(hg_handle_t handle) { Metadata md(in.mode); try { // create metadentry - create_metadentry(in.path, md); + gkfs::metadentry::create(in.path, md); out.err = 0; } catch (const std::exception& e) { GKFS_DATA->spdlogger()->error("{}() Failed to create metadentry: '{}'", __func__, e.what()); @@ -65,7 +65,7 @@ static hg_return_t rpc_srv_stat(hg_handle_t handle) { try { // get the metadata - val = get_metadentry_str(in.path); + val = gkfs::metadentry::get_str(in.path); out.db_val = val.c_str(); out.err = 0; GKFS_DATA->spdlogger()->debug("{}() Sending output mode '{}'", __func__, out.db_val); @@ -137,7 +137,7 @@ static hg_return_t rpc_srv_rm_node(hg_handle_t handle) { try { // Remove metadentry if exists on the node // and remove all chunks for that file - remove_node(in.path); + gkfs::metadentry::remove_node(in.path); out.err = 0; } catch (const NotFoundException& e) { /* The metadentry was not found on this node, @@ -180,7 +180,7 @@ static hg_return_t rpc_srv_update_metadentry(hg_handle_t handle) { // do update try { - Metadata md = get_metadentry(in.path); + Metadata md = gkfs::metadentry::get(in.path); if (in.block_flag == HG_TRUE) md.blocks(in.blocks); if (in.nlink_flag == HG_TRUE) @@ -193,7 +193,7 @@ static hg_return_t rpc_srv_update_metadentry(hg_handle_t handle) { md.mtime(in.mtime); if (in.ctime_flag == HG_TRUE) md.ctime(in.ctime); - update_metadentry(in.path, md); + gkfs::metadentry::update(in.path, md); out.err = 0; } catch (const std::exception& e) { //TODO handle NotFoundException @@ -228,7 +228,7 @@ static hg_return_t rpc_srv_update_metadentry_size(hg_handle_t handle) { in.offset, in.append); try { - update_metadentry_size(in.path, in.size, in.offset, (in.append == HG_TRUE)); + gkfs::metadentry::update_size(in.path, in.size, in.offset, (in.append == HG_TRUE)); out.err = 0; //TODO the actual size of the file could be different after the size update // do to concurrency on size @@ -268,7 +268,7 @@ static hg_return_t rpc_srv_get_metadentry_size(hg_handle_t handle) { // do update try { - out.ret_size = get_metadentry_size(in.path); + out.ret_size = gkfs::metadentry::get_size(in.path); out.err = 0; } catch (const NotFoundException& e) { GKFS_DATA->spdlogger()->debug("{}() Entry not found: '{}'", __func__, in.path); @@ -313,7 +313,7 @@ static hg_return_t rpc_srv_get_dirents(hg_handle_t handle) { auto bulk_size = margo_bulk_get_size(in.bulk_handle); //Get directory entries from local DB - std::vector> entries = get_dirents(in.path); + std::vector> entries = gkfs::metadentry::get_dirents(in.path); out.dirents_size = entries.size(); @@ -396,7 +396,7 @@ static hg_return_t rpc_srv_mk_symlink(hg_handle_t handle) { try { Metadata md = {LINK_MODE, in.target_path}; // create metadentry - create_metadentry(in.path, md); + gkfs::metadentry::create(in.path, md); out.err = 0; } catch (const std::exception& e) { GKFS_DATA->spdlogger()->error("{}() Failed to create metadentry: {}", __func__, e.what()); diff --git a/src/daemon/main.cpp b/src/daemon/main.cpp index ee567293e..1b25c9730 100644 --- a/src/daemon/main.cpp +++ b/src/daemon/main.cpp @@ -92,7 +92,7 @@ void init_environment() { // Create metadentry for root directory Metadata root_md{S_IFDIR | S_IRWXU | S_IRWXG | S_IRWXO}; try { - create_metadentry("/", root_md); + gkfs::metadentry::create("/", root_md); } catch (const std::exception& e) { throw runtime_error("Failed to write root metadentry to KV store: "s + e.what()); } diff --git a/src/daemon/ops/metadentry.cpp b/src/daemon/ops/metadentry.cpp index 3286f1671..b2842453b 100644 --- a/src/daemon/ops/metadentry.cpp +++ b/src/daemon/ops/metadentry.cpp @@ -18,12 +18,50 @@ using namespace std; +/** + * Returns the metadata of an object at a specific path. The metadata can be of dummy values if configured + * @param path + * @param attr + * @return + */ +Metadata gkfs::metadentry::get(const std::string& path) { + return Metadata(get_str(path)); +} + +/** + * Get metadentry string only for path + * @param path + * @return + */ +std::string gkfs::metadentry::get_str(const std::string& path) { + return GKFS_DATA->mdb()->get(path); +} + +/** + * Gets the size of a metadentry + * @param path + * @param ret_size (return val) + * @return err + */ +size_t gkfs::metadentry::get_size(const string& path) { + return get(path).size(); +} + +/** + * Returns a vector of directory entries for given directory + * @param dir + * @return + */ +std::vector> gkfs::metadentry::get_dirents(const std::string& dir) { + return GKFS_DATA->mdb()->get_dirents(dir); +} + /** * Creates metadata (if required) and dentry at the same time * @param path * @param mode */ -void create_metadentry(const std::string& path, Metadata& md) { +void gkfs::metadentry::create(const std::string& path, Metadata& md) { // update metadata object based on what metadata is needed if (GKFS_DATA->atime_state() || GKFS_DATA->mtime_state() || GKFS_DATA->ctime_state()) { @@ -40,38 +78,13 @@ void create_metadentry(const std::string& path, Metadata& md) { GKFS_DATA->mdb()->put(path, md.serialize()); } -std::string get_metadentry_str(const std::string& path) { - return GKFS_DATA->mdb()->get(path); -} - /** - * Returns the metadata of an object at a specific path. The metadata can be of dummy values if configured - * @param path - * @param attr - * @return - */ -Metadata get_metadentry(const std::string& path) { - return Metadata(get_metadentry_str(path)); -} - -/** - * Remove metadentry if exists and try to remove all chunks for path + * Update metadentry by given Metadata object and path * @param path - * @return + * @param md */ -void remove_node(const string& path) { - GKFS_DATA->mdb()->remove(path); // remove metadentry - GKFS_DATA->storage()->destroy_chunk_space(path); // destroys all chunks for the path on this node -} - -/** - * Gets the size of a metadentry - * @param path - * @param ret_size (return val) - * @return err - */ -size_t get_metadentry_size(const string& path) { - return get_metadentry(path).size(); +void gkfs::metadentry::update(const string& path, Metadata& md) { + GKFS_DATA->mdb()->update(path, path, md.serialize()); } /** @@ -80,14 +93,16 @@ size_t get_metadentry_size(const string& path) { * @param io_size * @return the updated size */ -void update_metadentry_size(const string& path, size_t io_size, off64_t offset, bool append) { +void gkfs::metadentry::update_size(const string& path, size_t io_size, off64_t offset, bool append) { GKFS_DATA->mdb()->increase_size(path, io_size + offset, append); } -void update_metadentry(const string& path, Metadata& md) { - GKFS_DATA->mdb()->update(path, path, md.serialize()); +/** + * Remove metadentry if exists and try to remove all chunks for path + * @param path + * @return + */ +void gkfs::metadentry::remove_node(const string& path) { + GKFS_DATA->mdb()->remove(path); // remove metadentry + GKFS_DATA->storage()->destroy_chunk_space(path); // destroys all chunks for the path on this node } - -std::vector> get_dirents(const std::string& dir) { - return GKFS_DATA->mdb()->get_dirents(dir); -} \ No newline at end of file -- GitLab From 4e34ac00127dd4c42653d3c6b94953be714f74d0 Mon Sep 17 00:00:00 2001 From: Marc Vef Date: Mon, 10 Feb 2020 13:01:51 +0100 Subject: [PATCH 06/25] add namespaces: gkfs::path, gkfs::path_util. Unify: gkfs::logging, gkfs::env --- include/client/resolve.hpp | 23 +++-- include/global/env_util.hpp | 8 +- include/global/log_util.hpp | 12 ++- include/global/path_util.hpp | 23 +++-- src/client/gkfs_functions.cpp | 6 +- src/client/hooks.cpp | 12 +-- src/client/preload.cpp | 2 +- src/client/preload_context.cpp | 20 ++-- src/client/resolve.cpp | 108 +++++++++++----------- src/daemon/backend/data/chunk_storage.cpp | 6 +- src/daemon/backend/metadata/db.cpp | 8 +- src/daemon/main.cpp | 6 +- src/global/env_util.cpp | 13 +-- src/global/log_util.cpp | 10 +- src/global/path_util.cpp | 67 +++++++------- 15 files changed, 172 insertions(+), 152 deletions(-) diff --git a/include/client/resolve.hpp b/include/client/resolve.hpp index 35532fe23..2cd832dde 100644 --- a/include/client/resolve.hpp +++ b/include/client/resolve.hpp @@ -12,17 +12,26 @@ */ #include +#include -bool resolve_path(const std::string& path, std::string& resolved, bool resolve_last_link = true); +namespace gkfs { + namespace path { -std::string get_sys_cwd(); + unsigned int match_components(const std::string& path, unsigned int& path_components, + const std::vector& components); -void set_sys_cwd(const std::string& path); + bool resolve(const std::string& path, std::string& resolved, bool resolve_last_link = true); -void set_env_cwd(const std::string& path); + std::string get_sys_cwd(); -void unset_env_cwd(); + void set_sys_cwd(const std::string& path); -void init_cwd(); + void set_env_cwd(const std::string& path); -void set_cwd(const std::string& path, bool internal); + void unset_env_cwd(); + + void init_cwd(); + + void set_cwd(const std::string& path, bool internal); + } +} diff --git a/include/global/env_util.hpp b/include/global/env_util.hpp index 993699b20..c8ad464d4 100644 --- a/include/global/env_util.hpp +++ b/include/global/env_util.hpp @@ -17,13 +17,11 @@ #include namespace gkfs { -namespace env { + namespace env { -std::string -get_var(const std::string& name, - const std::string& default_value = ""); + std::string get_var(const std::string& name, const std::string& default_value = ""); -} // namespace env + } // namespace env } // namespace gkfs #endif // GKFS_COMMON_ENV_UTIL_HPP diff --git a/include/global/log_util.hpp b/include/global/log_util.hpp index c2f81f49a..71a67356e 100644 --- a/include/global/log_util.hpp +++ b/include/global/log_util.hpp @@ -16,11 +16,15 @@ #include -spdlog::level::level_enum get_spdlog_level(std::string level_str); +namespace gkfs { + namespace logging { -spdlog::level::level_enum get_spdlog_level(unsigned long level); + spdlog::level::level_enum get_level(std::string level_str); -void setup_loggers(const std::vector& loggers, - spdlog::level::level_enum level, const std::string& path); + spdlog::level::level_enum get_level(unsigned long level); + + void setup(const std::vector& loggers, spdlog::level::level_enum level, const std::string& path); + } +} #endif diff --git a/include/global/path_util.hpp b/include/global/path_util.hpp index bdcc42acf..9b637c416 100644 --- a/include/global/path_util.hpp +++ b/include/global/path_util.hpp @@ -17,20 +17,27 @@ #include #include -constexpr unsigned int PATH_MAX_LEN = 4096; // 4k chars +namespace gkfs { + namespace path_util { -constexpr char PSP = '/'; // PATH SEPARATOR + constexpr unsigned int max_length = 4096; // 4k chars -bool is_relative_path(const std::string& path); + constexpr char separator = '/'; // PATH SEPARATOR -bool is_absolute_path(const std::string& path); + bool is_relative(const std::string& path); -bool has_trailing_slash(const std::string& path); + bool is_absolute(const std::string& path); -std::string prepend_path(const std::string& path, const char* raw_path); + bool has_trailing_slash(const std::string& path); -std::string dirname(const std::string& path); + std::string prepend_path(const std::string& path, const char* raw_path); -std::vector split_path(const std::string& path); + std::string absolute_to_relative(const std::string& root_path, const std::string& absolute_path); // unused ATM + + std::string dirname(const std::string& path); + + std::vector split_path(const std::string& path); + } +} #endif //GEKKOFS_PATH_UTIL_HPP diff --git a/src/client/gkfs_functions.cpp b/src/client/gkfs_functions.cpp index 0c5248a92..97c333cf5 100644 --- a/src/client/gkfs_functions.cpp +++ b/src/client/gkfs_functions.cpp @@ -57,7 +57,7 @@ std::shared_ptr gkfs::func::metadata(const string& path, bool follow_l int gkfs::func::check_parent_dir(const std::string& path) { #if CREATE_CHECK_PARENTS - auto p_comp = dirname(path); + auto p_comp = gkfs::path_util::dirname(path); auto md = gkfs::func::metadata(p_comp); if (!md) { if (errno == ENOENT) { @@ -232,7 +232,7 @@ int gkfs::func::statfs(sys_statfs* buf) { buf->f_files = 0; buf->f_ffree = 0; buf->f_fsid = {0, 0}; - buf->f_namelen = PATH_MAX_LEN; + buf->f_namelen = gkfs::path_util::max_length; buf->f_frsize = 0; buf->f_flags = ST_NOATIME | ST_NODIRATIME | ST_NOSUID | ST_NODEV | ST_SYNCHRONOUS; @@ -250,7 +250,7 @@ int gkfs::func::statvfs(sys_statvfs* buf) { buf->f_ffree = 0; buf->f_favail = 0; buf->f_fsid = 0; - buf->f_namemax = PATH_MAX_LEN; + buf->f_namemax = gkfs::path_util::max_length; buf->f_frsize = 0; buf->f_flag = ST_NOATIME | ST_NODIRATIME | ST_NOSUID | ST_NODEV | ST_SYNCHRONOUS; diff --git a/src/client/hooks.cpp b/src/client/hooks.cpp index aa4c6172c..bda184b7f 100644 --- a/src/client/hooks.cpp +++ b/src/client/hooks.cpp @@ -515,13 +515,13 @@ int hook_chdir(const char* path) { //TODO get complete path from relativize_path instead of // removing mountdir and then adding again here rel_path.insert(0, CTX->mountdir()); - if (has_trailing_slash(rel_path)) { + if (gkfs::path_util::has_trailing_slash(rel_path)) { // open_dir is '/' rel_path.pop_back(); } } try { - set_cwd(rel_path, internal); + gkfs::path::set_cwd(rel_path, internal); } catch (const std::system_error& se) { return -(se.code().value()); } @@ -543,12 +543,12 @@ int hook_fchdir(unsigned int fd) { } std::string new_path = CTX->mountdir() + open_dir->path(); - if (has_trailing_slash(new_path)) { + if (gkfs::path_util::has_trailing_slash(new_path)) { // open_dir is '/' new_path.pop_back(); } try { - set_cwd(new_path, true); + gkfs::path::set_cwd(new_path, true); } catch (const std::system_error& se) { return -(se.code().value()); } @@ -559,8 +559,8 @@ int hook_fchdir(unsigned int fd) { std::system_category(), "Failed to change directory (fchdir syscall)"); } - unset_env_cwd(); - CTX->cwd(get_sys_cwd()); + gkfs::path::unset_env_cwd(); + CTX->cwd(gkfs::path::get_sys_cwd()); } return 0; } diff --git a/src/client/preload.cpp b/src/client/preload.cpp index f9e2e8434..daf92168a 100644 --- a/src/client/preload.cpp +++ b/src/client/preload.cpp @@ -151,7 +151,7 @@ void init_preload() { CTX->protect_user_fds(); log_prog_name(); - init_cwd(); + gkfs::path::init_cwd(); LOG(DEBUG, "Current working directory: '{}'", CTX->cwd()); init_ld_env_if_needed(); diff --git a/src/client/preload_context.cpp b/src/client/preload_context.cpp index a7f702353..f0bafa6f1 100644 --- a/src/client/preload_context.cpp +++ b/src/client/preload_context.cpp @@ -79,9 +79,9 @@ PreloadContext::init_logging() { } void PreloadContext::mountdir(const std::string& path) { - assert(is_absolute_path(path)); - assert(!has_trailing_slash(path)); - mountdir_components_ = split_path(path); + assert(gkfs::path_util::is_absolute(path)); + assert(!gkfs::path_util::has_trailing_slash(path)); + mountdir_components_ = gkfs::path_util::split_path(path); mountdir_ = path; } @@ -136,11 +136,11 @@ RelativizeStatus PreloadContext::relativize_fd_path(int dirfd, std::string path; - if (raw_path[0] != PSP) { + if (raw_path[0] != gkfs::path_util::separator) { // path is relative if (dirfd == AT_FDCWD) { // path is relative to cwd - path = prepend_path(cwd_, raw_path); + path = gkfs::path_util::prepend_path(cwd_, raw_path); } else { if (!ofm_->exist(dirfd)) { return RelativizeStatus::fd_unknown; @@ -152,14 +152,14 @@ RelativizeStatus PreloadContext::relativize_fd_path(int dirfd, } path = mountdir_; path.append(dir->path()); - path.push_back(PSP); + path.push_back(gkfs::path_util::separator); path.append(raw_path); } } else { path = raw_path; } - if (resolve_path(path, relative_path, resolve_last_link)) { + if (gkfs::path::resolve(path, relative_path, resolve_last_link)) { return RelativizeStatus::internal; } return RelativizeStatus::external; @@ -176,15 +176,15 @@ bool PreloadContext::relativize_path(const char* raw_path, std::string& relative std::string path; - if (raw_path[0] != PSP) { + if (raw_path[0] != gkfs::path_util::separator) { /* Path is not absolute, we need to prepend CWD; * First reserve enough space to minimize memory copy */ - path = prepend_path(cwd_, raw_path); + path = gkfs::path_util::prepend_path(cwd_, raw_path); } else { path = raw_path; } - return resolve_path(path, relative_path, resolve_last_link); + return gkfs::path::resolve(path, relative_path, resolve_last_link); } const std::shared_ptr& PreloadContext::file_map() const { diff --git a/src/client/resolve.cpp b/src/client/resolve.cpp index b8151b182..94a1b8975 100644 --- a/src/client/resolve.cpp +++ b/src/client/resolve.cpp @@ -11,7 +11,7 @@ SPDX-License-Identifier: MIT */ - +#include #include #include #include @@ -28,9 +28,13 @@ extern "C" { #include } -static const std::string excluded_paths[2] = {"sys/", "proc/"}; +using namespace std; + +static const string excluded_paths[2] = {"sys/", "proc/"}; + +namespace p_util = gkfs::path_util; -/* Match components in path +/** Match components in path * * Returns the number of consecutive components at start of `path` * that match the ones in `components` vector. @@ -44,20 +48,20 @@ static const std::string excluded_paths[2] = {"sys/", "proc/"}; * tot_comp == 4; * ``` */ -unsigned int path_match_components(const std::string& path, unsigned int& path_components, - const std::vector& components) { +unsigned int gkfs::path::match_components(const string& path, unsigned int& path_components, + const ::vector& components) { unsigned int matched = 0; unsigned int processed_components = 0; - std::string::size_type comp_size = 0; // size of current component - std::string::size_type start = 0; // start index of curr component - std::string::size_type end = 0; // end index of curr component (last processed Path Separator "PSP") + string::size_type comp_size = 0; // size of current component + string::size_type start = 0; // start index of curr component + string::size_type end = 0; // end index of curr component (last processed Path Separator "separator") while (++end < path.size()) { start = end; // Find next component - end = path.find(PSP, start); - if (end == std::string::npos) { + end = path.find(p_util::separator, start); + if (end == string::npos) { end = path.size(); } @@ -72,7 +76,7 @@ unsigned int path_match_components(const std::string& path, unsigned int& path_c return matched; } -/* Resolve path to its canonical representation +/** Resolve path to its canonical representation * * Populate `resolved` with the canonical representation of `path`. * @@ -84,12 +88,12 @@ unsigned int path_match_components(const std::string& path, unsigned int& path_c * returns true if the resolved path fall inside GekkoFS namespace, * and false otherwise. */ -bool resolve_path(const std::string& path, std::string& resolved, bool resolve_last_link) { +bool gkfs::path::resolve(const string& path, string& resolved, bool resolve_last_link) { LOG(DEBUG, "path: \"{}\", resolved: \"{}\", resolve_last_link: {}", path, resolved, resolve_last_link); - assert(is_absolute_path(path)); + assert(p_util::is_absolute(path)); for (auto& excl_path: excluded_paths) { if (path.compare(1, excl_path.length(), excl_path) == 0) { @@ -100,13 +104,13 @@ bool resolve_path(const std::string& path, std::string& resolved, bool resolve_l } struct stat st{}; - const std::vector& mnt_components = CTX->mountdir_components(); + const ::vector& mnt_components = CTX->mountdir_components(); unsigned int matched_components = 0; // matched number of component in mountdir unsigned int resolved_components = 0; - std::string::size_type comp_size = 0; // size of current component - std::string::size_type start = 0; // start index of curr component - std::string::size_type end = 0; // end index of curr component (last processed Path Separator "PSP") - std::string::size_type last_slash_pos = 0; // index of last slash in resolved path + string::size_type comp_size = 0; // size of current component + string::size_type start = 0; // start index of curr component + string::size_type end = 0; // end index of curr component (last processed Path Separator "separator") + string::size_type last_slash_pos = 0; // index of last slash in resolved path resolved.clear(); resolved.reserve(path.size()); @@ -114,13 +118,13 @@ bool resolve_path(const std::string& path, std::string& resolved, bool resolve_l start = end; /* Skip sequence of multiple path-separators. */ - while (start < path.size() && path[start] == PSP) { + while (start < path.size() && path[start] == p_util::separator) { ++start; } // Find next component - end = path.find(PSP, start); - if (end == std::string::npos) { + end = path.find(p_util::separator, start); + if (end == string::npos) { end = path.size(); } comp_size = end - start; @@ -141,7 +145,7 @@ bool resolve_path(const std::string& path, std::string& resolved, bool resolve_l * the previous slash position should be stored. * The following search could be avoided. */ - last_slash_pos = resolved.find_last_of(PSP); + last_slash_pos = resolved.find_last_of(p_util::separator); } if (resolved_components > 0) { if (matched_components == resolved_components) { @@ -153,7 +157,7 @@ bool resolve_path(const std::string& path, std::string& resolved, bool resolve_l } // add `/` to the reresolved path - resolved.push_back(PSP); + resolved.push_back(p_util::separator); last_slash_pos = resolved.size() - 1; resolved.append(path, start, comp_size); @@ -167,30 +171,30 @@ bool resolve_path(const std::string& path, std::string& resolved, bool resolve_l LOG(DEBUG, "path \"{}\" does not exist", resolved); - resolved.append(path, end, std::string::npos); + resolved.append(path, end, string::npos); return false; } if (S_ISLNK(st.st_mode)) { if (!resolve_last_link && end == path.size()) { continue; } - auto link_resolved = std::unique_ptr(new char[PATH_MAX]); + auto link_resolved = ::unique_ptr(new char[PATH_MAX]); if (realpath(resolved.c_str(), link_resolved.get()) == nullptr) { LOG(ERROR, "Failed to get realpath for link \"{}\". " "Error: {}", resolved, ::strerror(errno)); - resolved.append(path, end, std::string::npos); + resolved.append(path, end, string::npos); return false; } // substituute resolved with new link path resolved = link_resolved.get(); - matched_components = path_match_components(resolved, resolved_components, mnt_components); + matched_components = match_components(resolved, resolved_components, mnt_components); // set matched counter to value coherent with the new path - last_slash_pos = resolved.find_last_of(PSP); + last_slash_pos = resolved.find_last_of(p_util::separator); continue; } else if ((!S_ISDIR(st.st_mode)) && (end != path.size())) { - resolved.append(path, end, std::string::npos); + resolved.append(path, end, string::npos); return false; } } else { @@ -207,70 +211,70 @@ bool resolve_path(const std::string& path, std::string& resolved, bool resolve_l } if (resolved.empty()) { - resolved.push_back(PSP); + resolved.push_back(p_util::separator); } LOG(DEBUG, "external: \"{}\"", resolved); return false; } -std::string get_sys_cwd() { - char temp[PATH_MAX_LEN]; - if (long ret = syscall_no_intercept(SYS_getcwd, temp, PATH_MAX_LEN) < 0) { - throw std::system_error(syscall_error_code(ret), - std::system_category(), +string gkfs::path::get_sys_cwd() { + char temp[p_util::max_length]; + if (long ret = syscall_no_intercept(SYS_getcwd, temp, p_util::max_length) < 0) { + throw ::system_error(syscall_error_code(ret), + ::system_category(), "Failed to retrieve current working directory"); } // getcwd could return "(unreachable)" in some cases - if (temp[0] != PSP) { - throw std::runtime_error( + if (temp[0] != p_util::separator) { + throw ::runtime_error( "Current working directory is unreachable"); } return {temp}; } -void set_sys_cwd(const std::string& path) { +void gkfs::path::set_sys_cwd(const string& path) { LOG(DEBUG, "Changing working directory to \"{}\"", path); if (long ret = syscall_no_intercept(SYS_chdir, path.c_str())) { LOG(ERROR, "Failed to change working directory: {}", - std::strerror(syscall_error_code(ret))); - throw std::system_error(syscall_error_code(ret), - std::system_category(), + ::strerror(syscall_error_code(ret))); + throw ::system_error(syscall_error_code(ret), + ::system_category(), "Failed to set system current working directory"); } } -void set_env_cwd(const std::string& path) { +void gkfs::path::set_env_cwd(const string& path) { LOG(DEBUG, "Setting {} to \"{}\"", gkfs::env::CWD, path); if (setenv(gkfs::env::CWD, path.c_str(), 1)) { LOG(ERROR, "Failed while setting {}: {}", - gkfs::env::CWD, std::strerror(errno)); - throw std::system_error(errno, - std::system_category(), + gkfs::env::CWD, ::strerror(errno)); + throw ::system_error(errno, + ::system_category(), "Failed to set environment current working directory"); } } -void unset_env_cwd() { +void gkfs::path::unset_env_cwd() { LOG(DEBUG, "Clearing {}()", gkfs::env::CWD); if (unsetenv(gkfs::env::CWD)) { LOG(ERROR, "Failed to clear {}: {}", - gkfs::env::CWD, std::strerror(errno)); + gkfs::env::CWD, ::strerror(errno)); - throw std::system_error(errno, - std::system_category(), + throw ::system_error(errno, + ::system_category(), "Failed to unset environment current working directory"); } } -void init_cwd() { - const char* env_cwd = std::getenv(gkfs::env::CWD); +void gkfs::path::init_cwd() { + const char* env_cwd = ::getenv(gkfs::env::CWD); if (env_cwd != nullptr) { CTX->cwd(env_cwd); } else { @@ -278,7 +282,7 @@ void init_cwd() { } } -void set_cwd(const std::string& path, bool internal) { +void gkfs::path::set_cwd(const string& path, bool internal) { if (internal) { set_sys_cwd(CTX->mountdir()); set_env_cwd(path); diff --git a/src/daemon/backend/data/chunk_storage.cpp b/src/daemon/backend/data/chunk_storage.cpp index 94000b621..095582e72 100644 --- a/src/daemon/backend/data/chunk_storage.cpp +++ b/src/daemon/backend/data/chunk_storage.cpp @@ -26,7 +26,7 @@ namespace bfs = boost::filesystem; using namespace std; string ChunkStorage::absolute(const string& internal_path) const { - assert(is_relative_path(internal_path)); + assert(gkfs::path_util::is_relative(internal_path)); return root_path + '/' + internal_path; } @@ -34,7 +34,7 @@ ChunkStorage::ChunkStorage(const string& path, const size_t chunksize) : root_path(path), chunksize(chunksize) { //TODO check path: absolute, exists, permission to write etc... - assert(is_absolute_path(root_path)); + assert(gkfs::path_util::is_absolute(root_path)); /* Initialize logger */ log = spdlog::get(LOGGER_NAME); @@ -44,7 +44,7 @@ ChunkStorage::ChunkStorage(const string& path, const size_t chunksize) : } string ChunkStorage::get_chunks_dir(const string& file_path) { - assert(is_absolute_path(file_path)); + assert(gkfs::path_util::is_absolute(file_path)); string chunk_dir = file_path.substr(1); ::replace(chunk_dir.begin(), chunk_dir.end(), '/', ':'); return chunk_dir; diff --git a/src/daemon/backend/metadata/db.cpp b/src/daemon/backend/metadata/db.cpp index a280be8cc..c1ffdc6a5 100644 --- a/src/daemon/backend/metadata/db.cpp +++ b/src/daemon/backend/metadata/db.cpp @@ -60,8 +60,8 @@ std::string MetadataDB::get(const std::string& key) const { } void MetadataDB::put(const std::string& key, const std::string& val) { - assert(is_absolute_path(key)); - assert(key == "/" || !has_trailing_slash(key)); + assert(gkfs::path_util::is_absolute(key)); + assert(key == "/" || !gkfs::path_util::has_trailing_slash(key)); auto cop = CreateOperand(val); auto s = db->Merge(write_opts, key, cop.serialize()); @@ -133,9 +133,9 @@ void MetadataDB::decrease_size(const std::string& key, size_t size) { */ std::vector> MetadataDB::get_dirents(const std::string& dir) const { auto root_path = dir; - assert(is_absolute_path(root_path)); + assert(gkfs::path_util::is_absolute(root_path)); //add trailing slash if missing - if (!has_trailing_slash(root_path) && root_path.size() != 1) { + if (!gkfs::path_util::has_trailing_slash(root_path) && root_path.size() != 1) { //add trailing slash only if missing and is not the root_folder "/" root_path.push_back('/'); } diff --git a/src/daemon/main.cpp b/src/daemon/main.cpp index 1b25c9730..f19a5d1ab 100644 --- a/src/daemon/main.cpp +++ b/src/daemon/main.cpp @@ -250,13 +250,13 @@ void initialize_loggers() { path = env_path; } - spdlog::level::level_enum level = get_spdlog_level(gkfs_config::logging::daemon_log_level); + spdlog::level::level_enum level = gkfs::logging::get_level(gkfs_config::logging::daemon_log_level); // Try to get log path from env variable std::string env_level_key = DAEMON_ENV_PREFIX; env_level_key += "LOG_LEVEL"; char* env_level = getenv(env_level_key.c_str()); if (env_level != nullptr) { - level = get_spdlog_level(env_level); + level = gkfs::logging::get_level(env_level); } auto logger_names = std::vector{ @@ -265,7 +265,7 @@ void initialize_loggers() { "ChunkStorage", }; - setup_loggers(logger_names, level, path); + gkfs::logging::setup(logger_names, level, path); } int main(int argc, const char* argv[]) { diff --git a/src/global/env_util.cpp b/src/global/env_util.cpp index 14f3e91db..0ba2a8278 100644 --- a/src/global/env_util.cpp +++ b/src/global/env_util.cpp @@ -16,16 +16,9 @@ #include #include -namespace gkfs { -namespace env { - -std::string -get_var(const std::string& name, - const std::string& default_value) { +using namespace std; +string gkfs::env::get_var(const string& name, const string& default_value) { const char* const val = ::secure_getenv(name.c_str()); - return val != nullptr ? std::string(val) : default_value; + return val != nullptr ? string(val) : default_value; } - -} // namespace env -} // namespace gkfs diff --git a/src/global/log_util.cpp b/src/global/log_util.cpp index 758e904ab..4196b7fa2 100644 --- a/src/global/log_util.cpp +++ b/src/global/log_util.cpp @@ -20,7 +20,7 @@ using namespace std; -spdlog::level::level_enum get_spdlog_level(string level_str) { +spdlog::level::level_enum gkfs::logging::get_level(string level_str) { char* parse_end; auto level = strtoul(level_str.c_str(), &parse_end, 10); if (parse_end != (level_str.c_str() + level_str.size())) { @@ -43,10 +43,10 @@ spdlog::level::level_enum get_spdlog_level(string level_str) { else throw runtime_error(fmt::format("Error: log level '{}' is invalid. Check help/readme.", level_str)); } else - return get_spdlog_level(level); + return get_level(level); } -spdlog::level::level_enum get_spdlog_level(unsigned long level) { +spdlog::level::level_enum gkfs::logging::get_level(unsigned long level) { switch (level) { case 0: return spdlog::level::off; @@ -65,8 +65,8 @@ spdlog::level::level_enum get_spdlog_level(unsigned long level) { } } -void setup_loggers(const vector& loggers_name, - spdlog::level::level_enum level, const string& path) { +void gkfs::logging::setup(const vector& loggers_name, + spdlog::level::level_enum level, const string& path) { /* Create common sink */ auto file_sink = make_shared(path); diff --git a/src/global/path_util.cpp b/src/global/path_util.cpp index 3e8251f59..4d23b99a1 100644 --- a/src/global/path_util.cpp +++ b/src/global/path_util.cpp @@ -12,35 +12,35 @@ */ #include -//#include + #include #include #include -//#include +using namespace std; -bool is_relative_path(const std::string& path) { +bool gkfs::path_util::is_relative(const string& path) { return (!path.empty()) && - (path.front() != PSP); + (path.front() != separator); } -bool is_absolute_path(const std::string& path) { +bool gkfs::path_util::is_absolute(const string& path) { return (!path.empty()) && - (path.front() == PSP); + (path.front() == separator); } -bool has_trailing_slash(const std::string& path) { - return (!path.empty()) && (path.back() == PSP); +bool gkfs::path_util::has_trailing_slash(const string& path) { + return (!path.empty()) && (path.back() == separator); } -/* Add path prefix to a given C string. +/** Add path prefix to a given C string. * * Returns a string composed by the `prefix_path` * followed by `raw_path`. * * This would return the same of: * ``` - * std::string(raw_path).append(prefix_path); + * string(raw_path).append(prefix_path); * ``` * But it is faster because it avoids to copy the `raw_path` twice. * @@ -52,33 +52,33 @@ bool has_trailing_slash(const std::string& path) { * prepend_path("/tmp/prefix", "./my/path") == "/tmp/prefix/./my/path" * ``` */ -std::string prepend_path(const std::string& prefix_path, const char* raw_path) { +string gkfs::path_util::prepend_path(const string& prefix_path, const char* raw_path) { assert(!has_trailing_slash(prefix_path)); - std::size_t raw_len = std::strlen(raw_path); - std::string res; + ::size_t raw_len = ::strlen(raw_path); + string res; res.reserve(prefix_path.size() + 1 + raw_len); res.append(prefix_path); - res.push_back(PSP); + res.push_back(separator); res.append(raw_path, raw_len); return res; } -/* Split a path into its components +/** Split a path into its components * * Returns a vector of the components of the given path. * * Example: * split_path("/first/second/third") == ["first", "second", "third"] */ -std::vector split_path(const std::string& path) { - std::vector tokens; - size_t start = std::string::npos; - size_t end = (path.front() != PSP) ? 0 : 1; - while (end != std::string::npos && end < path.size()) { +::vector gkfs::path_util::split_path(const string& path) { + ::vector tokens; + size_t start = string::npos; + size_t end = (path.front() != separator) ? 0 : 1; + while (end != string::npos && end < path.size()) { start = end; - end = path.find(PSP, start); + end = path.find(separator, start); tokens.push_back(path.substr(start, end - start)); - if (end != std::string::npos) { + if (end != string::npos) { ++end; } } @@ -86,18 +86,18 @@ std::vector split_path(const std::string& path) { } -/* Make an absolute path relative to a root path +/** Make an absolute path relative to a root path * * Convert @absolute_path into a relative one with respect to the given @root_path. * If @absolute_path do not start at the given @root_path an empty string will be returned. * NOTE: Trailing slash will be stripped from the new constructed relative path. */ -std::string path_to_relative(const std::string& root_path, const std::string& absolute_path) { - assert(is_absolute_path(root_path)); - assert(is_absolute_path(absolute_path)); +string gkfs::path_util::absolute_to_relative(const string& root_path, const string& absolute_path) { + assert(is_absolute(root_path)); + assert(is_absolute(absolute_path)); assert(!has_trailing_slash(root_path)); - auto diff_its = std::mismatch(absolute_path.cbegin(), absolute_path.cend(), root_path.cbegin()); + auto diff_its = ::mismatch(absolute_path.cbegin(), absolute_path.cend(), root_path.cbegin()); if (diff_its.second != root_path.cend()) { // complete path doesn't start with root_path return {}; @@ -125,12 +125,17 @@ std::string path_to_relative(const std::string& root_path, const std::string& ab return {rel_it_begin, rel_it_end}; } -std::string dirname(const std::string& path) { - assert(path.size() > 1 || path.front() == PSP); +/** + * returns the directory name for given path + * @param path + * @return + */ +string gkfs::path_util::dirname(const string& path) { + assert(path.size() > 1 || path.front() == separator); assert(path.size() == 1 || !has_trailing_slash(path)); - auto parent_path_size = path.find_last_of(PSP); - assert(parent_path_size != std::string::npos); + auto parent_path_size = path.find_last_of(separator); + assert(parent_path_size != string::npos); if (parent_path_size == 0) { // parent is '/' parent_path_size = 1; -- GitLab From 29d79fff105eb04a71a08b251f1958f5c7af0585 Mon Sep 17 00:00:00 2001 From: Marc Vef Date: Mon, 10 Feb 2020 13:03:37 +0100 Subject: [PATCH 07/25] rename resolve.{cpp,hpp} to path.{cpp,hpp} --- include/client/{resolve.hpp => path.hpp} | 0 src/client/CMakeLists.txt | 4 ++-- src/client/hooks.cpp | 2 +- src/client/{resolve.cpp => path.cpp} | 2 +- src/client/preload.cpp | 2 +- src/client/preload_context.cpp | 2 +- 6 files changed, 6 insertions(+), 6 deletions(-) rename include/client/{resolve.hpp => path.hpp} (100%) rename src/client/{resolve.cpp => path.cpp} (99%) diff --git a/include/client/resolve.hpp b/include/client/path.hpp similarity index 100% rename from include/client/resolve.hpp rename to include/client/path.hpp diff --git a/src/client/CMakeLists.txt b/src/client/CMakeLists.txt index 9bafe5327..035317f6c 100644 --- a/src/client/CMakeLists.txt +++ b/src/client/CMakeLists.txt @@ -5,10 +5,10 @@ set(PRELOAD_SRC logging.cpp open_file_map.cpp open_dir.cpp + path.cpp preload.cpp preload_context.cpp preload_util.cpp - resolve.cpp ../global/path_util.cpp ../global/rpc/rpc_utils.cpp rpc/hg_rpcs.cpp @@ -27,10 +27,10 @@ set(PRELOAD_HEADERS ../../include/client/make_array.hpp ../../include/client/open_file_map.hpp ../../include/client/open_dir.hpp + ../../include/client/path.hpp ../../include/client/preload.hpp ../../include/client/preload_context.hpp ../../include/client/preload_util.hpp - ../../include/client/resolve.hpp ../../include/client/rpc/hg_rpcs.hpp ../../include/client/rpc/ld_rpc_management.hpp ../../include/client/rpc/ld_rpc_metadentry.hpp diff --git a/src/client/hooks.cpp b/src/client/hooks.cpp index bda184b7f..789312bb2 100644 --- a/src/client/hooks.cpp +++ b/src/client/hooks.cpp @@ -15,7 +15,7 @@ #include #include #include -#include +#include #include #include diff --git a/src/client/resolve.cpp b/src/client/path.cpp similarity index 99% rename from src/client/resolve.cpp rename to src/client/path.cpp index 94a1b8975..391588f35 100644 --- a/src/client/resolve.cpp +++ b/src/client/path.cpp @@ -11,7 +11,7 @@ SPDX-License-Identifier: MIT */ -#include +#include #include #include #include diff --git a/src/client/preload.cpp b/src/client/preload.cpp index daf92168a..1b6a49e81 100644 --- a/src/client/preload.cpp +++ b/src/client/preload.cpp @@ -12,7 +12,7 @@ */ #include -#include +#include #include #include #include diff --git a/src/client/preload_context.cpp b/src/client/preload_context.cpp index f0bafa6f1..a86969021 100644 --- a/src/client/preload_context.cpp +++ b/src/client/preload_context.cpp @@ -18,7 +18,7 @@ #include #include #include -#include +#include #include #include -- GitLab From f00800494da49370e8219b4c3221a23f268171c9 Mon Sep 17 00:00:00 2001 From: Marc Vef Date: Mon, 10 Feb 2020 13:06:22 +0100 Subject: [PATCH 08/25] Copyright update from 2019 to 2020 --- LICENSE | 4 ++-- include/client/env.hpp | 4 ++-- include/client/gkfs_functions.hpp | 4 ++-- include/client/hooks.hpp | 4 ++-- include/client/intercept.hpp | 4 ++-- include/client/logging.hpp | 4 ++-- include/client/make_array.hpp | 4 ++-- include/client/open_dir.hpp | 4 ++-- include/client/open_file_map.hpp | 4 ++-- include/client/path.hpp | 4 ++-- include/client/preload.hpp | 4 ++-- include/client/preload_context.hpp | 4 ++-- include/client/preload_util.hpp | 4 ++-- include/client/rpc/hg_rpcs.hpp | 4 ++-- include/client/rpc/ld_rpc_data_ws.hpp | 4 ++-- include/client/rpc/ld_rpc_management.hpp | 4 ++-- include/client/rpc/ld_rpc_metadentry.hpp | 4 ++-- include/client/syscalls.hpp | 4 ++-- include/client/syscalls/args.hpp | 4 ++-- include/client/syscalls/decoder.hpp | 4 ++-- include/client/syscalls/detail/syscall_info.h | 4 ++-- include/client/syscalls/errno.hpp | 4 ++-- include/client/syscalls/rets.hpp | 4 ++-- include/client/syscalls/syscall.hpp | 4 ++-- include/config.hpp | 4 ++-- include/daemon/backend/data/chunk_storage.hpp | 4 ++-- include/daemon/backend/exceptions.hpp | 4 ++-- include/daemon/backend/metadata/db.hpp | 4 ++-- include/daemon/backend/metadata/merge.hpp | 4 ++-- include/daemon/classes/fs_data.hpp | 4 ++-- include/daemon/classes/rpc_data.hpp | 4 ++-- include/daemon/env.hpp | 4 ++-- include/daemon/handler/rpc_defs.hpp | 4 ++-- include/daemon/main.hpp | 4 ++-- include/daemon/ops/metadentry.hpp | 4 ++-- include/daemon/util.hpp | 4 ++-- include/global/chunk_calc_util.hpp | 4 ++-- include/global/cmake_configure.hpp.in | 4 ++-- include/global/env_util.hpp | 4 ++-- include/global/global_defs.hpp | 4 ++-- include/global/log_util.hpp | 4 ++-- include/global/metadata.hpp | 4 ++-- include/global/path_util.hpp | 4 ++-- include/global/rpc/distributor.hpp | 4 ++-- include/global/rpc/rpc_types.hpp | 4 ++-- include/global/rpc/rpc_utils.hpp | 4 ++-- include/version.hpp.in | 12 ++++++++++++ scripts/license/header | 4 ++-- src/client/gkfs_functions.cpp | 4 ++-- src/client/hooks.cpp | 4 ++-- src/client/intercept.cpp | 4 ++-- src/client/logging.cpp | 4 ++-- src/client/open_dir.cpp | 4 ++-- src/client/open_file_map.cpp | 4 ++-- src/client/path.cpp | 4 ++-- src/client/preload.cpp | 4 ++-- src/client/preload_context.cpp | 4 ++-- src/client/preload_util.cpp | 4 ++-- src/client/rpc/hg_rpcs.cpp | 4 ++-- src/client/rpc/ld_rpc_data_ws.cpp | 4 ++-- src/client/rpc/ld_rpc_management.cpp | 4 ++-- src/client/rpc/ld_rpc_metadentry.cpp | 4 ++-- src/client/syscalls/detail/syscall_info.c | 4 ++-- src/daemon/backend/data/chunk_storage.cpp | 4 ++-- src/daemon/backend/metadata/db.cpp | 4 ++-- src/daemon/backend/metadata/merge.cpp | 4 ++-- src/daemon/classes/fs_data.cpp | 4 ++-- src/daemon/classes/rpc_data.cpp | 4 ++-- src/daemon/handler/h_data.cpp | 4 ++-- src/daemon/handler/h_metadentry.cpp | 4 ++-- src/daemon/handler/h_preload.cpp | 4 ++-- src/daemon/main.cpp | 4 ++-- src/daemon/ops/metadentry.cpp | 4 ++-- src/daemon/util.cpp | 4 ++-- src/global/env_util.cpp | 4 ++-- src/global/log_util.cpp | 4 ++-- src/global/metadata.cpp | 4 ++-- src/global/path_util.cpp | 4 ++-- src/global/rpc/distributor.cpp | 4 ++-- src/global/rpc/rpc_utils.cpp | 4 ++-- 80 files changed, 170 insertions(+), 158 deletions(-) diff --git a/LICENSE b/LICENSE index 346228777..7d39d1e3a 100644 --- a/LICENSE +++ b/LICENSE @@ -3,8 +3,8 @@ License-Text: MIT License -Copyright (c) 2018-2019, Barcelona Supercomputing Center (BSC), Spain -Copyright (c) 2015-2019, Johannes Gutenberg Universitaet Mainz, Germany +Copyright (c) 2018-2020, Barcelona Supercomputing Center (BSC), Spain +Copyright (c) 2015-2020, Johannes Gutenberg Universitaet Mainz, Germany Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal diff --git a/include/client/env.hpp b/include/client/env.hpp index 054596cf9..e438ab7f4 100644 --- a/include/client/env.hpp +++ b/include/client/env.hpp @@ -1,6 +1,6 @@ /* - Copyright 2018-2019, Barcelona Supercomputing Center (BSC), Spain - Copyright 2015-2019, Johannes Gutenberg Universitaet Mainz, Germany + Copyright 2018-2020, Barcelona Supercomputing Center (BSC), Spain + Copyright 2015-2020, Johannes Gutenberg Universitaet Mainz, Germany This software was partially supported by the EC H2020 funded project NEXTGenIO (Project ID: 671951, www.nextgenio.eu). diff --git a/include/client/gkfs_functions.hpp b/include/client/gkfs_functions.hpp index 532696867..be78f5e30 100644 --- a/include/client/gkfs_functions.hpp +++ b/include/client/gkfs_functions.hpp @@ -1,6 +1,6 @@ /* - Copyright 2018-2019, Barcelona Supercomputing Center (BSC), Spain - Copyright 2015-2019, Johannes Gutenberg Universitaet Mainz, Germany + Copyright 2018-2020, Barcelona Supercomputing Center (BSC), Spain + Copyright 2015-2020, Johannes Gutenberg Universitaet Mainz, Germany This software was partially supported by the EC H2020 funded project NEXTGenIO (Project ID: 671951, www.nextgenio.eu). diff --git a/include/client/hooks.hpp b/include/client/hooks.hpp index d1328b5c9..dc0a2179a 100644 --- a/include/client/hooks.hpp +++ b/include/client/hooks.hpp @@ -1,6 +1,6 @@ /* - Copyright 2018-2019, Barcelona Supercomputing Center (BSC), Spain - Copyright 2015-2019, Johannes Gutenberg Universitaet Mainz, Germany + Copyright 2018-2020, Barcelona Supercomputing Center (BSC), Spain + Copyright 2015-2020, Johannes Gutenberg Universitaet Mainz, Germany This software was partially supported by the EC H2020 funded project NEXTGenIO (Project ID: 671951, www.nextgenio.eu). diff --git a/include/client/intercept.hpp b/include/client/intercept.hpp index e392f580b..4748850e5 100644 --- a/include/client/intercept.hpp +++ b/include/client/intercept.hpp @@ -1,6 +1,6 @@ /* - Copyright 2018-2019, Barcelona Supercomputing Center (BSC), Spain - Copyright 2015-2019, Johannes Gutenberg Universitaet Mainz, Germany + Copyright 2018-2020, Barcelona Supercomputing Center (BSC), Spain + Copyright 2015-2020, Johannes Gutenberg Universitaet Mainz, Germany This software was partially supported by the EC H2020 funded project NEXTGenIO (Project ID: 671951, www.nextgenio.eu). diff --git a/include/client/logging.hpp b/include/client/logging.hpp index 3ea36307b..d041db52c 100644 --- a/include/client/logging.hpp +++ b/include/client/logging.hpp @@ -1,6 +1,6 @@ /* - Copyright 2018-2019, Barcelona Supercomputing Center (BSC), Spain - Copyright 2015-2019, Johannes Gutenberg Universitaet Mainz, Germany + Copyright 2018-2020, Barcelona Supercomputing Center (BSC), Spain + Copyright 2015-2020, Johannes Gutenberg Universitaet Mainz, Germany This software was partially supported by the EC H2020 funded project NEXTGenIO (Project ID: 671951, www.nextgenio.eu). diff --git a/include/client/make_array.hpp b/include/client/make_array.hpp index 0c1a84e0a..78868b64a 100644 --- a/include/client/make_array.hpp +++ b/include/client/make_array.hpp @@ -1,6 +1,6 @@ /* - Copyright 2018-2019, Barcelona Supercomputing Center (BSC), Spain - Copyright 2015-2019, Johannes Gutenberg Universitaet Mainz, Germany + Copyright 2018-2020, Barcelona Supercomputing Center (BSC), Spain + Copyright 2015-2020, Johannes Gutenberg Universitaet Mainz, Germany This software was partially supported by the EC H2020 funded project NEXTGenIO (Project ID: 671951, www.nextgenio.eu). diff --git a/include/client/open_dir.hpp b/include/client/open_dir.hpp index fc2fbe81b..4ecb310a6 100644 --- a/include/client/open_dir.hpp +++ b/include/client/open_dir.hpp @@ -1,6 +1,6 @@ /* - Copyright 2018-2019, Barcelona Supercomputing Center (BSC), Spain - Copyright 2015-2019, Johannes Gutenberg Universitaet Mainz, Germany + Copyright 2018-2020, Barcelona Supercomputing Center (BSC), Spain + Copyright 2015-2020, Johannes Gutenberg Universitaet Mainz, Germany This software was partially supported by the EC H2020 funded project NEXTGenIO (Project ID: 671951, www.nextgenio.eu). diff --git a/include/client/open_file_map.hpp b/include/client/open_file_map.hpp index 1e84c49fc..7f9d8e170 100644 --- a/include/client/open_file_map.hpp +++ b/include/client/open_file_map.hpp @@ -1,6 +1,6 @@ /* - Copyright 2018-2019, Barcelona Supercomputing Center (BSC), Spain - Copyright 2015-2019, Johannes Gutenberg Universitaet Mainz, Germany + Copyright 2018-2020, Barcelona Supercomputing Center (BSC), Spain + Copyright 2015-2020, Johannes Gutenberg Universitaet Mainz, Germany This software was partially supported by the EC H2020 funded project NEXTGenIO (Project ID: 671951, www.nextgenio.eu). diff --git a/include/client/path.hpp b/include/client/path.hpp index 2cd832dde..c351aae03 100644 --- a/include/client/path.hpp +++ b/include/client/path.hpp @@ -1,6 +1,6 @@ /* - Copyright 2018-2019, Barcelona Supercomputing Center (BSC), Spain - Copyright 2015-2019, Johannes Gutenberg Universitaet Mainz, Germany + Copyright 2018-2020, Barcelona Supercomputing Center (BSC), Spain + Copyright 2015-2020, Johannes Gutenberg Universitaet Mainz, Germany This software was partially supported by the EC H2020 funded project NEXTGenIO (Project ID: 671951, www.nextgenio.eu). diff --git a/include/client/preload.hpp b/include/client/preload.hpp index a19b03e2e..63d888c89 100644 --- a/include/client/preload.hpp +++ b/include/client/preload.hpp @@ -1,6 +1,6 @@ /* - Copyright 2018-2019, Barcelona Supercomputing Center (BSC), Spain - Copyright 2015-2019, Johannes Gutenberg Universitaet Mainz, Germany + Copyright 2018-2020, Barcelona Supercomputing Center (BSC), Spain + Copyright 2015-2020, Johannes Gutenberg Universitaet Mainz, Germany This software was partially supported by the EC H2020 funded project NEXTGenIO (Project ID: 671951, www.nextgenio.eu). diff --git a/include/client/preload_context.hpp b/include/client/preload_context.hpp index 4f0e3bc5f..e7c0a98b4 100644 --- a/include/client/preload_context.hpp +++ b/include/client/preload_context.hpp @@ -1,6 +1,6 @@ /* - Copyright 2018-2019, Barcelona Supercomputing Center (BSC), Spain - Copyright 2015-2019, Johannes Gutenberg Universitaet Mainz, Germany + Copyright 2018-2020, Barcelona Supercomputing Center (BSC), Spain + Copyright 2015-2020, Johannes Gutenberg Universitaet Mainz, Germany This software was partially supported by the EC H2020 funded project NEXTGenIO (Project ID: 671951, www.nextgenio.eu). diff --git a/include/client/preload_util.hpp b/include/client/preload_util.hpp index a2d0e1553..9771f4ac3 100644 --- a/include/client/preload_util.hpp +++ b/include/client/preload_util.hpp @@ -1,6 +1,6 @@ /* - Copyright 2018-2019, Barcelona Supercomputing Center (BSC), Spain - Copyright 2015-2019, Johannes Gutenberg Universitaet Mainz, Germany + Copyright 2018-2020, Barcelona Supercomputing Center (BSC), Spain + Copyright 2015-2020, Johannes Gutenberg Universitaet Mainz, Germany This software was partially supported by the EC H2020 funded project NEXTGenIO (Project ID: 671951, www.nextgenio.eu). diff --git a/include/client/rpc/hg_rpcs.hpp b/include/client/rpc/hg_rpcs.hpp index 2b7613e28..b8b7a4e63 100644 --- a/include/client/rpc/hg_rpcs.hpp +++ b/include/client/rpc/hg_rpcs.hpp @@ -1,6 +1,6 @@ /* - Copyright 2018-2019, Barcelona Supercomputing Center (BSC), Spain - Copyright 2015-2019, Johannes Gutenberg Universitaet Mainz, Germany + Copyright 2018-2020, Barcelona Supercomputing Center (BSC), Spain + Copyright 2015-2020, Johannes Gutenberg Universitaet Mainz, Germany This software was partially supported by the EC H2020 funded project NEXTGenIO (Project ID: 671951, www.nextgenio.eu). diff --git a/include/client/rpc/ld_rpc_data_ws.hpp b/include/client/rpc/ld_rpc_data_ws.hpp index e07d0168e..c164a87dc 100644 --- a/include/client/rpc/ld_rpc_data_ws.hpp +++ b/include/client/rpc/ld_rpc_data_ws.hpp @@ -1,6 +1,6 @@ /* - Copyright 2018-2019, Barcelona Supercomputing Center (BSC), Spain - Copyright 2015-2019, Johannes Gutenberg Universitaet Mainz, Germany + Copyright 2018-2020, Barcelona Supercomputing Center (BSC), Spain + Copyright 2015-2020, Johannes Gutenberg Universitaet Mainz, Germany This software was partially supported by the EC H2020 funded project NEXTGenIO (Project ID: 671951, www.nextgenio.eu). diff --git a/include/client/rpc/ld_rpc_management.hpp b/include/client/rpc/ld_rpc_management.hpp index f03a68a40..5fd6be799 100644 --- a/include/client/rpc/ld_rpc_management.hpp +++ b/include/client/rpc/ld_rpc_management.hpp @@ -1,6 +1,6 @@ /* - Copyright 2018-2019, Barcelona Supercomputing Center (BSC), Spain - Copyright 2015-2019, Johannes Gutenberg Universitaet Mainz, Germany + Copyright 2018-2020, Barcelona Supercomputing Center (BSC), Spain + Copyright 2015-2020, Johannes Gutenberg Universitaet Mainz, Germany This software was partially supported by the EC H2020 funded project NEXTGenIO (Project ID: 671951, www.nextgenio.eu). diff --git a/include/client/rpc/ld_rpc_metadentry.hpp b/include/client/rpc/ld_rpc_metadentry.hpp index 7985c5439..6dffb2c8c 100644 --- a/include/client/rpc/ld_rpc_metadentry.hpp +++ b/include/client/rpc/ld_rpc_metadentry.hpp @@ -1,6 +1,6 @@ /* - Copyright 2018-2019, Barcelona Supercomputing Center (BSC), Spain - Copyright 2015-2019, Johannes Gutenberg Universitaet Mainz, Germany + Copyright 2018-2020, Barcelona Supercomputing Center (BSC), Spain + Copyright 2015-2020, Johannes Gutenberg Universitaet Mainz, Germany This software was partially supported by the EC H2020 funded project NEXTGenIO (Project ID: 671951, www.nextgenio.eu). diff --git a/include/client/syscalls.hpp b/include/client/syscalls.hpp index 74067a489..aa47d56f3 100644 --- a/include/client/syscalls.hpp +++ b/include/client/syscalls.hpp @@ -1,6 +1,6 @@ /* - Copyright 2018-2019, Barcelona Supercomputing Center (BSC), Spain - Copyright 2015-2019, Johannes Gutenberg Universitaet Mainz, Germany + Copyright 2018-2020, Barcelona Supercomputing Center (BSC), Spain + Copyright 2015-2020, Johannes Gutenberg Universitaet Mainz, Germany This software was partially supported by the EC H2020 funded project NEXTGenIO (Project ID: 671951, www.nextgenio.eu). diff --git a/include/client/syscalls/args.hpp b/include/client/syscalls/args.hpp index 3cda44ea2..e9a9e5add 100644 --- a/include/client/syscalls/args.hpp +++ b/include/client/syscalls/args.hpp @@ -1,6 +1,6 @@ /* - Copyright 2018-2019, Barcelona Supercomputing Center (BSC), Spain - Copyright 2015-2019, Johannes Gutenberg Universitaet Mainz, Germany + Copyright 2018-2020, Barcelona Supercomputing Center (BSC), Spain + Copyright 2015-2020, Johannes Gutenberg Universitaet Mainz, Germany This software was partially supported by the EC H2020 funded project NEXTGenIO (Project ID: 671951, www.nextgenio.eu). diff --git a/include/client/syscalls/decoder.hpp b/include/client/syscalls/decoder.hpp index d7068962d..475136652 100644 --- a/include/client/syscalls/decoder.hpp +++ b/include/client/syscalls/decoder.hpp @@ -1,6 +1,6 @@ /* - Copyright 2018-2019, Barcelona Supercomputing Center (BSC), Spain - Copyright 2015-2019, Johannes Gutenberg Universitaet Mainz, Germany + Copyright 2018-2020, Barcelona Supercomputing Center (BSC), Spain + Copyright 2015-2020, Johannes Gutenberg Universitaet Mainz, Germany This software was partially supported by the EC H2020 funded project NEXTGenIO (Project ID: 671951, www.nextgenio.eu). diff --git a/include/client/syscalls/detail/syscall_info.h b/include/client/syscalls/detail/syscall_info.h index 327cd9dbc..3969a4cd9 100644 --- a/include/client/syscalls/detail/syscall_info.h +++ b/include/client/syscalls/detail/syscall_info.h @@ -1,6 +1,6 @@ /* - Copyright 2018-2019, Barcelona Supercomputing Center (BSC), Spain - Copyright 2015-2019, Johannes Gutenberg Universitaet Mainz, Germany + Copyright 2018-2020, Barcelona Supercomputing Center (BSC), Spain + Copyright 2015-2020, Johannes Gutenberg Universitaet Mainz, Germany This software was partially supported by the EC H2020 funded project NEXTGenIO (Project ID: 671951, www.nextgenio.eu). diff --git a/include/client/syscalls/errno.hpp b/include/client/syscalls/errno.hpp index f1131866b..4a1ae650e 100644 --- a/include/client/syscalls/errno.hpp +++ b/include/client/syscalls/errno.hpp @@ -1,6 +1,6 @@ /* - Copyright 2018-2019, Barcelona Supercomputing Center (BSC), Spain - Copyright 2015-2019, Johannes Gutenberg Universitaet Mainz, Germany + Copyright 2018-2020, Barcelona Supercomputing Center (BSC), Spain + Copyright 2015-2020, Johannes Gutenberg Universitaet Mainz, Germany This software was partially supported by the EC H2020 funded project NEXTGenIO (Project ID: 671951, www.nextgenio.eu). diff --git a/include/client/syscalls/rets.hpp b/include/client/syscalls/rets.hpp index ed31ee230..c9d0dadba 100644 --- a/include/client/syscalls/rets.hpp +++ b/include/client/syscalls/rets.hpp @@ -1,6 +1,6 @@ /* - Copyright 2018-2019, Barcelona Supercomputing Center (BSC), Spain - Copyright 2015-2019, Johannes Gutenberg Universitaet Mainz, Germany + Copyright 2018-2020, Barcelona Supercomputing Center (BSC), Spain + Copyright 2015-2020, Johannes Gutenberg Universitaet Mainz, Germany This software was partially supported by the EC H2020 funded project NEXTGenIO (Project ID: 671951, www.nextgenio.eu). diff --git a/include/client/syscalls/syscall.hpp b/include/client/syscalls/syscall.hpp index 8bd0b1a66..353249fa8 100644 --- a/include/client/syscalls/syscall.hpp +++ b/include/client/syscalls/syscall.hpp @@ -1,6 +1,6 @@ /* - Copyright 2018-2019, Barcelona Supercomputing Center (BSC), Spain - Copyright 2015-2019, Johannes Gutenberg Universitaet Mainz, Germany + Copyright 2018-2020, Barcelona Supercomputing Center (BSC), Spain + Copyright 2015-2020, Johannes Gutenberg Universitaet Mainz, Germany This software was partially supported by the EC H2020 funded project NEXTGenIO (Project ID: 671951, www.nextgenio.eu). diff --git a/include/config.hpp b/include/config.hpp index cee9d19a6..c962cf8fc 100644 --- a/include/config.hpp +++ b/include/config.hpp @@ -1,6 +1,6 @@ /* - Copyright 2018-2019, Barcelona Supercomputing Center (BSC), Spain - Copyright 2015-2019, Johannes Gutenberg Universitaet Mainz, Germany + Copyright 2018-2020, Barcelona Supercomputing Center (BSC), Spain + Copyright 2015-2020, Johannes Gutenberg Universitaet Mainz, Germany This software was partially supported by the EC H2020 funded project NEXTGenIO (Project ID: 671951, www.nextgenio.eu). diff --git a/include/daemon/backend/data/chunk_storage.hpp b/include/daemon/backend/data/chunk_storage.hpp index ebaeb88b2..af7bd4232 100644 --- a/include/daemon/backend/data/chunk_storage.hpp +++ b/include/daemon/backend/data/chunk_storage.hpp @@ -1,6 +1,6 @@ /* - Copyright 2018-2019, Barcelona Supercomputing Center (BSC), Spain - Copyright 2015-2019, Johannes Gutenberg Universitaet Mainz, Germany + Copyright 2018-2020, Barcelona Supercomputing Center (BSC), Spain + Copyright 2015-2020, Johannes Gutenberg Universitaet Mainz, Germany This software was partially supported by the EC H2020 funded project NEXTGenIO (Project ID: 671951, www.nextgenio.eu). diff --git a/include/daemon/backend/exceptions.hpp b/include/daemon/backend/exceptions.hpp index c4273629d..36e80cfc1 100644 --- a/include/daemon/backend/exceptions.hpp +++ b/include/daemon/backend/exceptions.hpp @@ -1,6 +1,6 @@ /* - Copyright 2018-2019, Barcelona Supercomputing Center (BSC), Spain - Copyright 2015-2019, Johannes Gutenberg Universitaet Mainz, Germany + Copyright 2018-2020, Barcelona Supercomputing Center (BSC), Spain + Copyright 2015-2020, Johannes Gutenberg Universitaet Mainz, Germany This software was partially supported by the EC H2020 funded project NEXTGenIO (Project ID: 671951, www.nextgenio.eu). diff --git a/include/daemon/backend/metadata/db.hpp b/include/daemon/backend/metadata/db.hpp index cb10886ff..17097948d 100644 --- a/include/daemon/backend/metadata/db.hpp +++ b/include/daemon/backend/metadata/db.hpp @@ -1,6 +1,6 @@ /* - Copyright 2018-2019, Barcelona Supercomputing Center (BSC), Spain - Copyright 2015-2019, Johannes Gutenberg Universitaet Mainz, Germany + Copyright 2018-2020, Barcelona Supercomputing Center (BSC), Spain + Copyright 2015-2020, Johannes Gutenberg Universitaet Mainz, Germany This software was partially supported by the EC H2020 funded project NEXTGenIO (Project ID: 671951, www.nextgenio.eu). diff --git a/include/daemon/backend/metadata/merge.hpp b/include/daemon/backend/metadata/merge.hpp index 8b1a2791d..db47cd504 100644 --- a/include/daemon/backend/metadata/merge.hpp +++ b/include/daemon/backend/metadata/merge.hpp @@ -1,6 +1,6 @@ /* - Copyright 2018-2019, Barcelona Supercomputing Center (BSC), Spain - Copyright 2015-2019, Johannes Gutenberg Universitaet Mainz, Germany + Copyright 2018-2020, Barcelona Supercomputing Center (BSC), Spain + Copyright 2015-2020, Johannes Gutenberg Universitaet Mainz, Germany This software was partially supported by the EC H2020 funded project NEXTGenIO (Project ID: 671951, www.nextgenio.eu). diff --git a/include/daemon/classes/fs_data.hpp b/include/daemon/classes/fs_data.hpp index d061b2017..3759fc8eb 100644 --- a/include/daemon/classes/fs_data.hpp +++ b/include/daemon/classes/fs_data.hpp @@ -1,6 +1,6 @@ /* - Copyright 2018-2019, Barcelona Supercomputing Center (BSC), Spain - Copyright 2015-2019, Johannes Gutenberg Universitaet Mainz, Germany + Copyright 2018-2020, Barcelona Supercomputing Center (BSC), Spain + Copyright 2015-2020, Johannes Gutenberg Universitaet Mainz, Germany This software was partially supported by the EC H2020 funded project NEXTGenIO (Project ID: 671951, www.nextgenio.eu). diff --git a/include/daemon/classes/rpc_data.hpp b/include/daemon/classes/rpc_data.hpp index 0c7453ecd..1ce278a83 100644 --- a/include/daemon/classes/rpc_data.hpp +++ b/include/daemon/classes/rpc_data.hpp @@ -1,6 +1,6 @@ /* - Copyright 2018-2019, Barcelona Supercomputing Center (BSC), Spain - Copyright 2015-2019, Johannes Gutenberg Universitaet Mainz, Germany + Copyright 2018-2020, Barcelona Supercomputing Center (BSC), Spain + Copyright 2015-2020, Johannes Gutenberg Universitaet Mainz, Germany This software was partially supported by the EC H2020 funded project NEXTGenIO (Project ID: 671951, www.nextgenio.eu). diff --git a/include/daemon/env.hpp b/include/daemon/env.hpp index e1f486203..156184507 100644 --- a/include/daemon/env.hpp +++ b/include/daemon/env.hpp @@ -1,6 +1,6 @@ /* - Copyright 2018-2019, Barcelona Supercomputing Center (BSC), Spain - Copyright 2015-2019, Johannes Gutenberg Universitaet Mainz, Germany + Copyright 2018-2020, Barcelona Supercomputing Center (BSC), Spain + Copyright 2015-2020, Johannes Gutenberg Universitaet Mainz, Germany This software was partially supported by the EC H2020 funded project NEXTGenIO (Project ID: 671951, www.nextgenio.eu). diff --git a/include/daemon/handler/rpc_defs.hpp b/include/daemon/handler/rpc_defs.hpp index ceaeac173..92acb6b78 100644 --- a/include/daemon/handler/rpc_defs.hpp +++ b/include/daemon/handler/rpc_defs.hpp @@ -1,6 +1,6 @@ /* - Copyright 2018-2019, Barcelona Supercomputing Center (BSC), Spain - Copyright 2015-2019, Johannes Gutenberg Universitaet Mainz, Germany + Copyright 2018-2020, Barcelona Supercomputing Center (BSC), Spain + Copyright 2015-2020, Johannes Gutenberg Universitaet Mainz, Germany This software was partially supported by the EC H2020 funded project NEXTGenIO (Project ID: 671951, www.nextgenio.eu). diff --git a/include/daemon/main.hpp b/include/daemon/main.hpp index 4e5a628ed..ee455cbe7 100644 --- a/include/daemon/main.hpp +++ b/include/daemon/main.hpp @@ -1,6 +1,6 @@ /* - Copyright 2018-2019, Barcelona Supercomputing Center (BSC), Spain - Copyright 2015-2019, Johannes Gutenberg Universitaet Mainz, Germany + Copyright 2018-2020, Barcelona Supercomputing Center (BSC), Spain + Copyright 2015-2020, Johannes Gutenberg Universitaet Mainz, Germany This software was partially supported by the EC H2020 funded project NEXTGenIO (Project ID: 671951, www.nextgenio.eu). diff --git a/include/daemon/ops/metadentry.hpp b/include/daemon/ops/metadentry.hpp index dcde355fe..eea41d52f 100644 --- a/include/daemon/ops/metadentry.hpp +++ b/include/daemon/ops/metadentry.hpp @@ -1,6 +1,6 @@ /* - Copyright 2018-2019, Barcelona Supercomputing Center (BSC), Spain - Copyright 2015-2019, Johannes Gutenberg Universitaet Mainz, Germany + Copyright 2018-2020, Barcelona Supercomputing Center (BSC), Spain + Copyright 2015-2020, Johannes Gutenberg Universitaet Mainz, Germany This software was partially supported by the EC H2020 funded project NEXTGenIO (Project ID: 671951, www.nextgenio.eu). diff --git a/include/daemon/util.hpp b/include/daemon/util.hpp index 4aa142dc7..e2a6b910a 100644 --- a/include/daemon/util.hpp +++ b/include/daemon/util.hpp @@ -1,6 +1,6 @@ /* - Copyright 2018-2019, Barcelona Supercomputing Center (BSC), Spain - Copyright 2015-2019, Johannes Gutenberg Universitaet Mainz, Germany + Copyright 2018-2020, Barcelona Supercomputing Center (BSC), Spain + Copyright 2015-2020, Johannes Gutenberg Universitaet Mainz, Germany This software was partially supported by the EC H2020 funded project NEXTGenIO (Project ID: 671951, www.nextgenio.eu). diff --git a/include/global/chunk_calc_util.hpp b/include/global/chunk_calc_util.hpp index 6121b9b1c..10dbf9cde 100644 --- a/include/global/chunk_calc_util.hpp +++ b/include/global/chunk_calc_util.hpp @@ -1,6 +1,6 @@ /* - Copyright 2018-2019, Barcelona Supercomputing Center (BSC), Spain - Copyright 2015-2019, Johannes Gutenberg Universitaet Mainz, Germany + Copyright 2018-2020, Barcelona Supercomputing Center (BSC), Spain + Copyright 2015-2020, Johannes Gutenberg Universitaet Mainz, Germany This software was partially supported by the EC H2020 funded project NEXTGenIO (Project ID: 671951, www.nextgenio.eu). diff --git a/include/global/cmake_configure.hpp.in b/include/global/cmake_configure.hpp.in index 480c90eef..ac06265a1 100644 --- a/include/global/cmake_configure.hpp.in +++ b/include/global/cmake_configure.hpp.in @@ -1,6 +1,6 @@ /* - Copyright 2018-2019, Barcelona Supercomputing Center (BSC), Spain - Copyright 2015-2019, Johannes Gutenberg Universitaet Mainz, Germany + Copyright 2018-2020, Barcelona Supercomputing Center (BSC), Spain + Copyright 2015-2020, Johannes Gutenberg Universitaet Mainz, Germany This software was partially supported by the EC H2020 funded project NEXTGenIO (Project ID: 671951, www.nextgenio.eu). diff --git a/include/global/env_util.hpp b/include/global/env_util.hpp index c8ad464d4..3c4263eb0 100644 --- a/include/global/env_util.hpp +++ b/include/global/env_util.hpp @@ -1,6 +1,6 @@ /* - Copyright 2018-2019, Barcelona Supercomputing Center (BSC), Spain - Copyright 2015-2019, Johannes Gutenberg Universitaet Mainz, Germany + Copyright 2018-2020, Barcelona Supercomputing Center (BSC), Spain + Copyright 2015-2020, Johannes Gutenberg Universitaet Mainz, Germany This software was partially supported by the EC H2020 funded project NEXTGenIO (Project ID: 671951, www.nextgenio.eu). diff --git a/include/global/global_defs.hpp b/include/global/global_defs.hpp index 771733c9a..8c9e22a66 100644 --- a/include/global/global_defs.hpp +++ b/include/global/global_defs.hpp @@ -1,6 +1,6 @@ /* - Copyright 2018-2019, Barcelona Supercomputing Center (BSC), Spain - Copyright 2015-2019, Johannes Gutenberg Universitaet Mainz, Germany + Copyright 2018-2020, Barcelona Supercomputing Center (BSC), Spain + Copyright 2015-2020, Johannes Gutenberg Universitaet Mainz, Germany This software was partially supported by the EC H2020 funded project NEXTGenIO (Project ID: 671951, www.nextgenio.eu). diff --git a/include/global/log_util.hpp b/include/global/log_util.hpp index 71a67356e..8ccfac68d 100644 --- a/include/global/log_util.hpp +++ b/include/global/log_util.hpp @@ -1,6 +1,6 @@ /* - Copyright 2018-2019, Barcelona Supercomputing Center (BSC), Spain - Copyright 2015-2019, Johannes Gutenberg Universitaet Mainz, Germany + Copyright 2018-2020, Barcelona Supercomputing Center (BSC), Spain + Copyright 2015-2020, Johannes Gutenberg Universitaet Mainz, Germany This software was partially supported by the EC H2020 funded project NEXTGenIO (Project ID: 671951, www.nextgenio.eu). diff --git a/include/global/metadata.hpp b/include/global/metadata.hpp index 4d1f2ae99..2936c48c9 100644 --- a/include/global/metadata.hpp +++ b/include/global/metadata.hpp @@ -1,6 +1,6 @@ /* - Copyright 2018-2019, Barcelona Supercomputing Center (BSC), Spain - Copyright 2015-2019, Johannes Gutenberg Universitaet Mainz, Germany + Copyright 2018-2020, Barcelona Supercomputing Center (BSC), Spain + Copyright 2015-2020, Johannes Gutenberg Universitaet Mainz, Germany This software was partially supported by the EC H2020 funded project NEXTGenIO (Project ID: 671951, www.nextgenio.eu). diff --git a/include/global/path_util.hpp b/include/global/path_util.hpp index 9b637c416..6729376c4 100644 --- a/include/global/path_util.hpp +++ b/include/global/path_util.hpp @@ -1,6 +1,6 @@ /* - Copyright 2018-2019, Barcelona Supercomputing Center (BSC), Spain - Copyright 2015-2019, Johannes Gutenberg Universitaet Mainz, Germany + Copyright 2018-2020, Barcelona Supercomputing Center (BSC), Spain + Copyright 2015-2020, Johannes Gutenberg Universitaet Mainz, Germany This software was partially supported by the EC H2020 funded project NEXTGenIO (Project ID: 671951, www.nextgenio.eu). diff --git a/include/global/rpc/distributor.hpp b/include/global/rpc/distributor.hpp index 2bced7ae7..79ac371ae 100644 --- a/include/global/rpc/distributor.hpp +++ b/include/global/rpc/distributor.hpp @@ -1,6 +1,6 @@ /* - Copyright 2018-2019, Barcelona Supercomputing Center (BSC), Spain - Copyright 2015-2019, Johannes Gutenberg Universitaet Mainz, Germany + Copyright 2018-2020, Barcelona Supercomputing Center (BSC), Spain + Copyright 2015-2020, Johannes Gutenberg Universitaet Mainz, Germany This software was partially supported by the EC H2020 funded project NEXTGenIO (Project ID: 671951, www.nextgenio.eu). diff --git a/include/global/rpc/rpc_types.hpp b/include/global/rpc/rpc_types.hpp index 41505cce5..5fbc68375 100644 --- a/include/global/rpc/rpc_types.hpp +++ b/include/global/rpc/rpc_types.hpp @@ -1,6 +1,6 @@ /* - Copyright 2018-2019, Barcelona Supercomputing Center (BSC), Spain - Copyright 2015-2019, Johannes Gutenberg Universitaet Mainz, Germany + Copyright 2018-2020, Barcelona Supercomputing Center (BSC), Spain + Copyright 2015-2020, Johannes Gutenberg Universitaet Mainz, Germany This software was partially supported by the EC H2020 funded project NEXTGenIO (Project ID: 671951, www.nextgenio.eu). diff --git a/include/global/rpc/rpc_utils.hpp b/include/global/rpc/rpc_utils.hpp index 14f487829..f1cfa3f57 100644 --- a/include/global/rpc/rpc_utils.hpp +++ b/include/global/rpc/rpc_utils.hpp @@ -1,6 +1,6 @@ /* - Copyright 2018-2019, Barcelona Supercomputing Center (BSC), Spain - Copyright 2015-2019, Johannes Gutenberg Universitaet Mainz, Germany + Copyright 2018-2020, Barcelona Supercomputing Center (BSC), Spain + Copyright 2015-2020, Johannes Gutenberg Universitaet Mainz, Germany This software was partially supported by the EC H2020 funded project NEXTGenIO (Project ID: 671951, www.nextgenio.eu). diff --git a/include/version.hpp.in b/include/version.hpp.in index ebeb161c1..736479937 100644 --- a/include/version.hpp.in +++ b/include/version.hpp.in @@ -1,3 +1,15 @@ +/* + Copyright 2018-2020, Barcelona Supercomputing Center (BSC), Spain + Copyright 2015-2020, Johannes Gutenberg Universitaet Mainz, Germany + + This software was partially supported by the + EC H2020 funded project NEXTGenIO (Project ID: 671951, www.nextgenio.eu). + + This software was partially supported by the + ADA-FS project under the SPPEXA project funded by the DFG. + + SPDX-License-Identifier: MIT +*/ #ifndef __GKFS_VERSION_HPP #define __GKFS_VERSION_HPP diff --git a/scripts/license/header b/scripts/license/header index 8d3c73be6..fe3b01643 100644 --- a/scripts/license/header +++ b/scripts/license/header @@ -1,5 +1,5 @@ -Copyright 2018-2019, Barcelona Supercomputing Center (BSC), Spain -Copyright 2015-2019, Johannes Gutenberg Universitaet Mainz, Germany +Copyright 2018-2020, Barcelona Supercomputing Center (BSC), Spain +Copyright 2015-2020, Johannes Gutenberg Universitaet Mainz, Germany This software was partially supported by the EC H2020 funded project NEXTGenIO (Project ID: 671951, www.nextgenio.eu). diff --git a/src/client/gkfs_functions.cpp b/src/client/gkfs_functions.cpp index 97c333cf5..28521505b 100644 --- a/src/client/gkfs_functions.cpp +++ b/src/client/gkfs_functions.cpp @@ -1,6 +1,6 @@ /* - Copyright 2018-2019, Barcelona Supercomputing Center (BSC), Spain - Copyright 2015-2019, Johannes Gutenberg Universitaet Mainz, Germany + Copyright 2018-2020, Barcelona Supercomputing Center (BSC), Spain + Copyright 2015-2020, Johannes Gutenberg Universitaet Mainz, Germany This software was partially supported by the EC H2020 funded project NEXTGenIO (Project ID: 671951, www.nextgenio.eu). diff --git a/src/client/hooks.cpp b/src/client/hooks.cpp index 789312bb2..1b8e9b947 100644 --- a/src/client/hooks.cpp +++ b/src/client/hooks.cpp @@ -1,6 +1,6 @@ /* - Copyright 2018-2019, Barcelona Supercomputing Center (BSC), Spain - Copyright 2015-2019, Johannes Gutenberg Universitaet Mainz, Germany + Copyright 2018-2020, Barcelona Supercomputing Center (BSC), Spain + Copyright 2015-2020, Johannes Gutenberg Universitaet Mainz, Germany This software was partially supported by the EC H2020 funded project NEXTGenIO (Project ID: 671951, www.nextgenio.eu). diff --git a/src/client/intercept.cpp b/src/client/intercept.cpp index 0d1d13ab2..1c71e21aa 100644 --- a/src/client/intercept.cpp +++ b/src/client/intercept.cpp @@ -1,6 +1,6 @@ /* - Copyright 2018-2019, Barcelona Supercomputing Center (BSC), Spain - Copyright 2015-2019, Johannes Gutenberg Universitaet Mainz, Germany + Copyright 2018-2020, Barcelona Supercomputing Center (BSC), Spain + Copyright 2015-2020, Johannes Gutenberg Universitaet Mainz, Germany This software was partially supported by the EC H2020 funded project NEXTGenIO (Project ID: 671951, www.nextgenio.eu). diff --git a/src/client/logging.cpp b/src/client/logging.cpp index 8bbeb9312..b88c1601c 100644 --- a/src/client/logging.cpp +++ b/src/client/logging.cpp @@ -1,6 +1,6 @@ /* - Copyright 2018-2019, Barcelona Supercomputing Center (BSC), Spain - Copyright 2015-2019, Johannes Gutenberg Universitaet Mainz, Germany + Copyright 2018-2020, Barcelona Supercomputing Center (BSC), Spain + Copyright 2015-2020, Johannes Gutenberg Universitaet Mainz, Germany This software was partially supported by the EC H2020 funded project NEXTGenIO (Project ID: 671951, www.nextgenio.eu). diff --git a/src/client/open_dir.cpp b/src/client/open_dir.cpp index 3b1cf0862..92638de74 100644 --- a/src/client/open_dir.cpp +++ b/src/client/open_dir.cpp @@ -1,6 +1,6 @@ /* - Copyright 2018-2019, Barcelona Supercomputing Center (BSC), Spain - Copyright 2015-2019, Johannes Gutenberg Universitaet Mainz, Germany + Copyright 2018-2020, Barcelona Supercomputing Center (BSC), Spain + Copyright 2015-2020, Johannes Gutenberg Universitaet Mainz, Germany This software was partially supported by the EC H2020 funded project NEXTGenIO (Project ID: 671951, www.nextgenio.eu). diff --git a/src/client/open_file_map.cpp b/src/client/open_file_map.cpp index 6bb1ff878..5e9eae85f 100644 --- a/src/client/open_file_map.cpp +++ b/src/client/open_file_map.cpp @@ -1,6 +1,6 @@ /* - Copyright 2018-2019, Barcelona Supercomputing Center (BSC), Spain - Copyright 2015-2019, Johannes Gutenberg Universitaet Mainz, Germany + Copyright 2018-2020, Barcelona Supercomputing Center (BSC), Spain + Copyright 2015-2020, Johannes Gutenberg Universitaet Mainz, Germany This software was partially supported by the EC H2020 funded project NEXTGenIO (Project ID: 671951, www.nextgenio.eu). diff --git a/src/client/path.cpp b/src/client/path.cpp index 391588f35..55185b468 100644 --- a/src/client/path.cpp +++ b/src/client/path.cpp @@ -1,6 +1,6 @@ /* - Copyright 2018-2019, Barcelona Supercomputing Center (BSC), Spain - Copyright 2015-2019, Johannes Gutenberg Universitaet Mainz, Germany + Copyright 2018-2020, Barcelona Supercomputing Center (BSC), Spain + Copyright 2015-2020, Johannes Gutenberg Universitaet Mainz, Germany This software was partially supported by the EC H2020 funded project NEXTGenIO (Project ID: 671951, www.nextgenio.eu). diff --git a/src/client/preload.cpp b/src/client/preload.cpp index 1b6a49e81..a96ebca3b 100644 --- a/src/client/preload.cpp +++ b/src/client/preload.cpp @@ -1,6 +1,6 @@ /* - Copyright 2018-2019, Barcelona Supercomputing Center (BSC), Spain - Copyright 2015-2019, Johannes Gutenberg Universitaet Mainz, Germany + Copyright 2018-2020, Barcelona Supercomputing Center (BSC), Spain + Copyright 2015-2020, Johannes Gutenberg Universitaet Mainz, Germany This software was partially supported by the EC H2020 funded project NEXTGenIO (Project ID: 671951, www.nextgenio.eu). diff --git a/src/client/preload_context.cpp b/src/client/preload_context.cpp index a86969021..869e5f69f 100644 --- a/src/client/preload_context.cpp +++ b/src/client/preload_context.cpp @@ -1,6 +1,6 @@ /* - Copyright 2018-2019, Barcelona Supercomputing Center (BSC), Spain - Copyright 2015-2019, Johannes Gutenberg Universitaet Mainz, Germany + Copyright 2018-2020, Barcelona Supercomputing Center (BSC), Spain + Copyright 2015-2020, Johannes Gutenberg Universitaet Mainz, Germany This software was partially supported by the EC H2020 funded project NEXTGenIO (Project ID: 671951, www.nextgenio.eu). diff --git a/src/client/preload_util.cpp b/src/client/preload_util.cpp index 7796f782c..5085e9a04 100644 --- a/src/client/preload_util.cpp +++ b/src/client/preload_util.cpp @@ -1,6 +1,6 @@ /* - Copyright 2018-2019, Barcelona Supercomputing Center (BSC), Spain - Copyright 2015-2019, Johannes Gutenberg Universitaet Mainz, Germany + Copyright 2018-2020, Barcelona Supercomputing Center (BSC), Spain + Copyright 2015-2020, Johannes Gutenberg Universitaet Mainz, Germany This software was partially supported by the EC H2020 funded project NEXTGenIO (Project ID: 671951, www.nextgenio.eu). diff --git a/src/client/rpc/hg_rpcs.cpp b/src/client/rpc/hg_rpcs.cpp index a7a01df6d..b9a05bae2 100644 --- a/src/client/rpc/hg_rpcs.cpp +++ b/src/client/rpc/hg_rpcs.cpp @@ -1,6 +1,6 @@ /* - Copyright 2018-2019, Barcelona Supercomputing Center (BSC), Spain - Copyright 2015-2019, Johannes Gutenberg Universitaet Mainz, Germany + Copyright 2018-2020, Barcelona Supercomputing Center (BSC), Spain + Copyright 2015-2020, Johannes Gutenberg Universitaet Mainz, Germany This software was partially supported by the EC H2020 funded project NEXTGenIO (Project ID: 671951, www.nextgenio.eu). diff --git a/src/client/rpc/ld_rpc_data_ws.cpp b/src/client/rpc/ld_rpc_data_ws.cpp index 6c133b3db..9c99c62d4 100644 --- a/src/client/rpc/ld_rpc_data_ws.cpp +++ b/src/client/rpc/ld_rpc_data_ws.cpp @@ -1,6 +1,6 @@ /* - Copyright 2018-2019, Barcelona Supercomputing Center (BSC), Spain - Copyright 2015-2019, Johannes Gutenberg Universitaet Mainz, Germany + Copyright 2018-2020, Barcelona Supercomputing Center (BSC), Spain + Copyright 2015-2020, Johannes Gutenberg Universitaet Mainz, Germany This software was partially supported by the EC H2020 funded project NEXTGenIO (Project ID: 671951, www.nextgenio.eu). diff --git a/src/client/rpc/ld_rpc_management.cpp b/src/client/rpc/ld_rpc_management.cpp index e8244db80..b7b8edd7a 100644 --- a/src/client/rpc/ld_rpc_management.cpp +++ b/src/client/rpc/ld_rpc_management.cpp @@ -1,6 +1,6 @@ /* - Copyright 2018-2019, Barcelona Supercomputing Center (BSC), Spain - Copyright 2015-2019, Johannes Gutenberg Universitaet Mainz, Germany + Copyright 2018-2020, Barcelona Supercomputing Center (BSC), Spain + Copyright 2015-2020, Johannes Gutenberg Universitaet Mainz, Germany This software was partially supported by the EC H2020 funded project NEXTGenIO (Project ID: 671951, www.nextgenio.eu). diff --git a/src/client/rpc/ld_rpc_metadentry.cpp b/src/client/rpc/ld_rpc_metadentry.cpp index 582d92f39..e3cf8f044 100644 --- a/src/client/rpc/ld_rpc_metadentry.cpp +++ b/src/client/rpc/ld_rpc_metadentry.cpp @@ -1,6 +1,6 @@ /* - Copyright 2018-2019, Barcelona Supercomputing Center (BSC), Spain - Copyright 2015-2019, Johannes Gutenberg Universitaet Mainz, Germany + Copyright 2018-2020, Barcelona Supercomputing Center (BSC), Spain + Copyright 2015-2020, Johannes Gutenberg Universitaet Mainz, Germany This software was partially supported by the EC H2020 funded project NEXTGenIO (Project ID: 671951, www.nextgenio.eu). diff --git a/src/client/syscalls/detail/syscall_info.c b/src/client/syscalls/detail/syscall_info.c index a71d945bd..b4449704b 100644 --- a/src/client/syscalls/detail/syscall_info.c +++ b/src/client/syscalls/detail/syscall_info.c @@ -1,6 +1,6 @@ /* - Copyright 2018-2019, Barcelona Supercomputing Center (BSC), Spain - Copyright 2015-2019, Johannes Gutenberg Universitaet Mainz, Germany + Copyright 2018-2020, Barcelona Supercomputing Center (BSC), Spain + Copyright 2015-2020, Johannes Gutenberg Universitaet Mainz, Germany This software was partially supported by the EC H2020 funded project NEXTGenIO (Project ID: 671951, www.nextgenio.eu). diff --git a/src/daemon/backend/data/chunk_storage.cpp b/src/daemon/backend/data/chunk_storage.cpp index 095582e72..0637ef21b 100644 --- a/src/daemon/backend/data/chunk_storage.cpp +++ b/src/daemon/backend/data/chunk_storage.cpp @@ -1,6 +1,6 @@ /* - Copyright 2018-2019, Barcelona Supercomputing Center (BSC), Spain - Copyright 2015-2019, Johannes Gutenberg Universitaet Mainz, Germany + Copyright 2018-2020, Barcelona Supercomputing Center (BSC), Spain + Copyright 2015-2020, Johannes Gutenberg Universitaet Mainz, Germany This software was partially supported by the EC H2020 funded project NEXTGenIO (Project ID: 671951, www.nextgenio.eu). diff --git a/src/daemon/backend/metadata/db.cpp b/src/daemon/backend/metadata/db.cpp index c1ffdc6a5..adf776594 100644 --- a/src/daemon/backend/metadata/db.cpp +++ b/src/daemon/backend/metadata/db.cpp @@ -1,6 +1,6 @@ /* - Copyright 2018-2019, Barcelona Supercomputing Center (BSC), Spain - Copyright 2015-2019, Johannes Gutenberg Universitaet Mainz, Germany + Copyright 2018-2020, Barcelona Supercomputing Center (BSC), Spain + Copyright 2015-2020, Johannes Gutenberg Universitaet Mainz, Germany This software was partially supported by the EC H2020 funded project NEXTGenIO (Project ID: 671951, www.nextgenio.eu). diff --git a/src/daemon/backend/metadata/merge.cpp b/src/daemon/backend/metadata/merge.cpp index 861104cdb..e279f36b1 100644 --- a/src/daemon/backend/metadata/merge.cpp +++ b/src/daemon/backend/metadata/merge.cpp @@ -1,6 +1,6 @@ /* - Copyright 2018-2019, Barcelona Supercomputing Center (BSC), Spain - Copyright 2015-2019, Johannes Gutenberg Universitaet Mainz, Germany + Copyright 2018-2020, Barcelona Supercomputing Center (BSC), Spain + Copyright 2015-2020, Johannes Gutenberg Universitaet Mainz, Germany This software was partially supported by the EC H2020 funded project NEXTGenIO (Project ID: 671951, www.nextgenio.eu). diff --git a/src/daemon/classes/fs_data.cpp b/src/daemon/classes/fs_data.cpp index f34bf2b58..1cd689177 100644 --- a/src/daemon/classes/fs_data.cpp +++ b/src/daemon/classes/fs_data.cpp @@ -1,6 +1,6 @@ /* - Copyright 2018-2019, Barcelona Supercomputing Center (BSC), Spain - Copyright 2015-2019, Johannes Gutenberg Universitaet Mainz, Germany + Copyright 2018-2020, Barcelona Supercomputing Center (BSC), Spain + Copyright 2015-2020, Johannes Gutenberg Universitaet Mainz, Germany This software was partially supported by the EC H2020 funded project NEXTGenIO (Project ID: 671951, www.nextgenio.eu). diff --git a/src/daemon/classes/rpc_data.cpp b/src/daemon/classes/rpc_data.cpp index b04b4c2b5..41076f127 100644 --- a/src/daemon/classes/rpc_data.cpp +++ b/src/daemon/classes/rpc_data.cpp @@ -1,6 +1,6 @@ /* - Copyright 2018-2019, Barcelona Supercomputing Center (BSC), Spain - Copyright 2015-2019, Johannes Gutenberg Universitaet Mainz, Germany + Copyright 2018-2020, Barcelona Supercomputing Center (BSC), Spain + Copyright 2015-2020, Johannes Gutenberg Universitaet Mainz, Germany This software was partially supported by the EC H2020 funded project NEXTGenIO (Project ID: 671951, www.nextgenio.eu). diff --git a/src/daemon/handler/h_data.cpp b/src/daemon/handler/h_data.cpp index 8b76d99ff..f697b97ae 100644 --- a/src/daemon/handler/h_data.cpp +++ b/src/daemon/handler/h_data.cpp @@ -1,6 +1,6 @@ /* - Copyright 2018-2019, Barcelona Supercomputing Center (BSC), Spain - Copyright 2015-2019, Johannes Gutenberg Universitaet Mainz, Germany + Copyright 2018-2020, Barcelona Supercomputing Center (BSC), Spain + Copyright 2015-2020, Johannes Gutenberg Universitaet Mainz, Germany This software was partially supported by the EC H2020 funded project NEXTGenIO (Project ID: 671951, www.nextgenio.eu). diff --git a/src/daemon/handler/h_metadentry.cpp b/src/daemon/handler/h_metadentry.cpp index 9b04888b4..c2ee6c346 100644 --- a/src/daemon/handler/h_metadentry.cpp +++ b/src/daemon/handler/h_metadentry.cpp @@ -1,6 +1,6 @@ /* - Copyright 2018-2019, Barcelona Supercomputing Center (BSC), Spain - Copyright 2015-2019, Johannes Gutenberg Universitaet Mainz, Germany + Copyright 2018-2020, Barcelona Supercomputing Center (BSC), Spain + Copyright 2015-2020, Johannes Gutenberg Universitaet Mainz, Germany This software was partially supported by the EC H2020 funded project NEXTGenIO (Project ID: 671951, www.nextgenio.eu). diff --git a/src/daemon/handler/h_preload.cpp b/src/daemon/handler/h_preload.cpp index b41066464..d807410d8 100644 --- a/src/daemon/handler/h_preload.cpp +++ b/src/daemon/handler/h_preload.cpp @@ -1,6 +1,6 @@ /* - Copyright 2018-2019, Barcelona Supercomputing Center (BSC), Spain - Copyright 2015-2019, Johannes Gutenberg Universitaet Mainz, Germany + Copyright 2018-2020, Barcelona Supercomputing Center (BSC), Spain + Copyright 2015-2020, Johannes Gutenberg Universitaet Mainz, Germany This software was partially supported by the EC H2020 funded project NEXTGenIO (Project ID: 671951, www.nextgenio.eu). diff --git a/src/daemon/main.cpp b/src/daemon/main.cpp index f19a5d1ab..10544bdc9 100644 --- a/src/daemon/main.cpp +++ b/src/daemon/main.cpp @@ -1,6 +1,6 @@ /* - Copyright 2018-2019, Barcelona Supercomputing Center (BSC), Spain - Copyright 2015-2019, Johannes Gutenberg Universitaet Mainz, Germany + Copyright 2018-2020, Barcelona Supercomputing Center (BSC), Spain + Copyright 2015-2020, Johannes Gutenberg Universitaet Mainz, Germany This software was partially supported by the EC H2020 funded project NEXTGenIO (Project ID: 671951, www.nextgenio.eu). diff --git a/src/daemon/ops/metadentry.cpp b/src/daemon/ops/metadentry.cpp index b2842453b..9ef31a333 100644 --- a/src/daemon/ops/metadentry.cpp +++ b/src/daemon/ops/metadentry.cpp @@ -1,6 +1,6 @@ /* - Copyright 2018-2019, Barcelona Supercomputing Center (BSC), Spain - Copyright 2015-2019, Johannes Gutenberg Universitaet Mainz, Germany + Copyright 2018-2020, Barcelona Supercomputing Center (BSC), Spain + Copyright 2015-2020, Johannes Gutenberg Universitaet Mainz, Germany This software was partially supported by the EC H2020 funded project NEXTGenIO (Project ID: 671951, www.nextgenio.eu). diff --git a/src/daemon/util.cpp b/src/daemon/util.cpp index c79fd97f5..7c6d6bcc8 100644 --- a/src/daemon/util.cpp +++ b/src/daemon/util.cpp @@ -1,6 +1,6 @@ /* - Copyright 2018-2019, Barcelona Supercomputing Center (BSC), Spain - Copyright 2015-2019, Johannes Gutenberg Universitaet Mainz, Germany + Copyright 2018-2020, Barcelona Supercomputing Center (BSC), Spain + Copyright 2015-2020, Johannes Gutenberg Universitaet Mainz, Germany This software was partially supported by the EC H2020 funded project NEXTGenIO (Project ID: 671951, www.nextgenio.eu). diff --git a/src/global/env_util.cpp b/src/global/env_util.cpp index 0ba2a8278..258b7d52e 100644 --- a/src/global/env_util.cpp +++ b/src/global/env_util.cpp @@ -1,6 +1,6 @@ /* - Copyright 2018-2019, Barcelona Supercomputing Center (BSC), Spain - Copyright 2015-2019, Johannes Gutenberg Universitaet Mainz, Germany + Copyright 2018-2020, Barcelona Supercomputing Center (BSC), Spain + Copyright 2015-2020, Johannes Gutenberg Universitaet Mainz, Germany This software was partially supported by the EC H2020 funded project NEXTGenIO (Project ID: 671951, www.nextgenio.eu). diff --git a/src/global/log_util.cpp b/src/global/log_util.cpp index 4196b7fa2..7f8b766de 100644 --- a/src/global/log_util.cpp +++ b/src/global/log_util.cpp @@ -1,6 +1,6 @@ /* - Copyright 2018-2019, Barcelona Supercomputing Center (BSC), Spain - Copyright 2015-2019, Johannes Gutenberg Universitaet Mainz, Germany + Copyright 2018-2020, Barcelona Supercomputing Center (BSC), Spain + Copyright 2015-2020, Johannes Gutenberg Universitaet Mainz, Germany This software was partially supported by the EC H2020 funded project NEXTGenIO (Project ID: 671951, www.nextgenio.eu). diff --git a/src/global/metadata.cpp b/src/global/metadata.cpp index 7ae72ded9..3eae4dd79 100644 --- a/src/global/metadata.cpp +++ b/src/global/metadata.cpp @@ -1,6 +1,6 @@ /* - Copyright 2018-2019, Barcelona Supercomputing Center (BSC), Spain - Copyright 2015-2019, Johannes Gutenberg Universitaet Mainz, Germany + Copyright 2018-2020, Barcelona Supercomputing Center (BSC), Spain + Copyright 2015-2020, Johannes Gutenberg Universitaet Mainz, Germany This software was partially supported by the EC H2020 funded project NEXTGenIO (Project ID: 671951, www.nextgenio.eu). diff --git a/src/global/path_util.cpp b/src/global/path_util.cpp index 4d23b99a1..56ecfbd99 100644 --- a/src/global/path_util.cpp +++ b/src/global/path_util.cpp @@ -1,6 +1,6 @@ /* - Copyright 2018-2019, Barcelona Supercomputing Center (BSC), Spain - Copyright 2015-2019, Johannes Gutenberg Universitaet Mainz, Germany + Copyright 2018-2020, Barcelona Supercomputing Center (BSC), Spain + Copyright 2015-2020, Johannes Gutenberg Universitaet Mainz, Germany This software was partially supported by the EC H2020 funded project NEXTGenIO (Project ID: 671951, www.nextgenio.eu). diff --git a/src/global/rpc/distributor.cpp b/src/global/rpc/distributor.cpp index 2bce921a9..d8ea2803a 100644 --- a/src/global/rpc/distributor.cpp +++ b/src/global/rpc/distributor.cpp @@ -1,6 +1,6 @@ /* - Copyright 2018-2019, Barcelona Supercomputing Center (BSC), Spain - Copyright 2015-2019, Johannes Gutenberg Universitaet Mainz, Germany + Copyright 2018-2020, Barcelona Supercomputing Center (BSC), Spain + Copyright 2015-2020, Johannes Gutenberg Universitaet Mainz, Germany This software was partially supported by the EC H2020 funded project NEXTGenIO (Project ID: 671951, www.nextgenio.eu). diff --git a/src/global/rpc/rpc_utils.cpp b/src/global/rpc/rpc_utils.cpp index c632bfe1b..dd06d3a4d 100644 --- a/src/global/rpc/rpc_utils.cpp +++ b/src/global/rpc/rpc_utils.cpp @@ -1,6 +1,6 @@ /* - Copyright 2018-2019, Barcelona Supercomputing Center (BSC), Spain - Copyright 2015-2019, Johannes Gutenberg Universitaet Mainz, Germany + Copyright 2018-2020, Barcelona Supercomputing Center (BSC), Spain + Copyright 2015-2020, Johannes Gutenberg Universitaet Mainz, Germany This software was partially supported by the EC H2020 funded project NEXTGenIO (Project ID: 671951, www.nextgenio.eu). -- GitLab From 2fd6c14b2e0cd59b9b3ca7a1ca834e342cf74984 Mon Sep 17 00:00:00 2001 From: Marc Vef Date: Tue, 18 Feb 2020 14:56:10 +0100 Subject: [PATCH 09/25] Renaming gkfs_config namespace to gkfs::config --- include/config.hpp | 98 ++++++++++++++-------------- src/client/gkfs_functions.cpp | 2 +- src/client/preload_context.cpp | 4 +- src/client/preload_util.cpp | 4 +- src/client/rpc/ld_rpc_data_ws.cpp | 30 ++++----- src/client/rpc/ld_rpc_metadentry.cpp | 8 +-- src/daemon/backend/metadata/db.cpp | 2 +- src/daemon/handler/h_data.cpp | 32 ++++----- src/daemon/main.cpp | 26 ++++---- src/global/metadata.cpp | 20 +++--- 10 files changed, 114 insertions(+), 112 deletions(-) diff --git a/include/config.hpp b/include/config.hpp index c962cf8fc..a10b444ea 100644 --- a/include/config.hpp +++ b/include/config.hpp @@ -20,53 +20,55 @@ #define CLIENT_ENV_PREFIX "LIBGKFS_" #define DAEMON_ENV_PREFIX "GKFS_" -namespace gkfs_config { - - constexpr auto hostfile_path = "./gkfs_hosts.txt"; - - namespace io { - /* - * Zero buffer before read. This is relevant if sparse files are used. - * If buffer is not zeroed, sparse regions contain invalid data. - */ - constexpr auto zero_buffer_before_read = false; - } - - namespace logging { - constexpr auto client_log_path = "/tmp/gkfs_client.log"; - constexpr auto daemon_log_path = "/tmp/gkfs_daemon.log"; - - constexpr auto client_log_level = "info,errors,critical,mercury"; - constexpr auto daemon_log_level = 4; //info - } - - namespace metadata { - - // which metadata should be considered apart from size and mode - constexpr auto use_atime = false; - constexpr auto use_ctime = false; - constexpr auto use_mtime = false; - constexpr auto use_link_cnt = false; - constexpr auto use_blocks = false; - } - - namespace rpc { - constexpr auto chunksize = 524288; // in bytes (e.g., 524288 == 512KB) - //size of preallocated buffer to hold directory entries in rpc call - constexpr auto dirents_buff_size = (8 * 1024 * 1024); // 8 mega - /* - * Indicates the number of concurrent progress to drive I/O operations of chunk files to and from local file systems - * The value is directly mapped to created Argobots xstreams, controlled in a single pool with ABT_snoozer scheduler - */ - constexpr auto daemon_io_xstreams = 8; - // Number of threads used for RPC handlers at the daemon - constexpr auto daemon_handler_xstreams = 8; - } - - namespace rocksdb { - // Write-ahead logging of rocksdb - constexpr auto use_write_ahead_log = false; - } -} +namespace gkfs { +namespace config { + +constexpr auto hostfile_path = "./gkfs_hosts.txt"; + +namespace io { +/* + * Zero buffer before read. This is relevant if sparse files are used. + * If buffer is not zeroed, sparse regions contain invalid data. + */ +constexpr auto zero_buffer_before_read = false; +} // namespace io + +namespace logging { +constexpr auto client_log_path = "/tmp/gkfs_client.log"; +constexpr auto daemon_log_path = "/tmp/gkfs_daemon.log"; + +constexpr auto client_log_level = "info,errors,critical,hermes"; +constexpr auto daemon_log_level = 4; //info +} // namespace logging + +namespace metadata { +// which metadata should be considered apart from size and mode +constexpr auto use_atime = false; +constexpr auto use_ctime = false; +constexpr auto use_mtime = false; +constexpr auto use_link_cnt = false; +constexpr auto use_blocks = false; +} // namespace metadata + +namespace rpc { +constexpr auto chunksize = 524288; // in bytes (e.g., 524288 == 512KB) +//size of preallocated buffer to hold directory entries in rpc call +constexpr auto dirents_buff_size = (8 * 1024 * 1024); // 8 mega +/* + * Indicates the number of concurrent progress to drive I/O operations of chunk files to and from local file systems + * The value is directly mapped to created Argobots xstreams, controlled in a single pool with ABT_snoozer scheduler + */ +constexpr auto daemon_io_xstreams = 8; +// Number of threads used for RPC handlers at the daemon +constexpr auto daemon_handler_xstreams = 8; +} // namespace rpc + +namespace rocksdb { +// Write-ahead logging of rocksdb +constexpr auto use_write_ahead_log = false; +} // namespace rocksdb + +} // namespace gkfs +} // namespace config #endif //GEKKOFS_CONFIG_HPP diff --git a/src/client/gkfs_functions.cpp b/src/client/gkfs_functions.cpp index 28521505b..43837d12a 100644 --- a/src/client/gkfs_functions.cpp +++ b/src/client/gkfs_functions.cpp @@ -452,7 +452,7 @@ ssize_t gkfs::func::pread(std::shared_ptr file, char* buf, size_t coun } // Zeroing buffer before read is only relevant for sparse files. Otherwise sparse regions contain invalid data. - if (gkfs_config::io::zero_buffer_before_read) { + if (gkfs::config::io::zero_buffer_before_read) { memset(buf, 0, sizeof(char) * count); } auto ret = rpc_send::read(file->path(), buf, offset, count); diff --git a/src/client/preload_context.cpp b/src/client/preload_context.cpp index 869e5f69f..66923da8a 100644 --- a/src/client/preload_context.cpp +++ b/src/client/preload_context.cpp @@ -50,10 +50,10 @@ void PreloadContext::init_logging() { const std::string log_opts = - gkfs::env::get_var(gkfs::env::LOG, gkfs_config::logging::client_log_level); + gkfs::env::get_var(gkfs::env::LOG, gkfs::config::logging::client_log_level); const std::string log_output = - gkfs::env::get_var(gkfs::env::LOG_OUTPUT, gkfs_config::logging::client_log_path); + gkfs::env::get_var(gkfs::env::LOG_OUTPUT, gkfs::config::logging::client_log_path); #ifdef GKFS_DEBUG_BUILD // atoi returns 0 if no int conversion can be performed, which works diff --git a/src/client/preload_util.cpp b/src/client/preload_util.cpp index 5085e9a04..c1725a10f 100644 --- a/src/client/preload_util.cpp +++ b/src/client/preload_util.cpp @@ -49,7 +49,7 @@ int gkfs::client::metadata_to_stat(const std::string& path, const Metadata& md, attr.st_uid = CTX->fs_conf()->uid; attr.st_gid = CTX->fs_conf()->gid; attr.st_rdev = 0; - attr.st_blksize = gkfs_config::rpc::chunksize; + attr.st_blksize = gkfs::config::rpc::chunksize; attr.st_blocks = 0; memset(&attr.st_atim, 0, sizeof(timespec)); @@ -149,7 +149,7 @@ hermes::endpoint lookup_endpoint(const std::string& uri, void gkfs::client::load_hosts() { string hostfile; - hostfile = gkfs::env::get_var(gkfs::env::HOSTS_FILE, gkfs_config::hostfile_path); + hostfile = gkfs::env::get_var(gkfs::env::HOSTS_FILE, gkfs::config::hostfile_path); vector> hosts; try { diff --git a/src/client/rpc/ld_rpc_data_ws.cpp b/src/client/rpc/ld_rpc_data_ws.cpp index 9c99c62d4..907f95c76 100644 --- a/src/client/rpc/ld_rpc_data_ws.cpp +++ b/src/client/rpc/ld_rpc_data_ws.cpp @@ -39,8 +39,8 @@ ssize_t rpc_send::write(const string& path, const void* buf, const bool append_f // which interval to look for chunks off64_t offset = append_flag ? in_offset : (updated_metadentry_size - write_size); - auto chnk_start = chnk_id_for_offset(offset, gkfs_config::rpc::chunksize); - auto chnk_end = chnk_id_for_offset((offset + write_size) - 1, gkfs_config::rpc::chunksize); + auto chnk_start = chnk_id_for_offset(offset, gkfs::config::rpc::chunksize); + auto chnk_end = chnk_id_for_offset((offset + write_size) - 1, gkfs::config::rpc::chunksize); // Collect all chunk ids within count that have the same destination so // that those are send in one rpc bulk transfer @@ -101,16 +101,16 @@ ssize_t rpc_send::write(const string& path, const void* buf, const bool append_f for (const auto& target : targets) { // total chunk_size for target - auto total_chunk_size = target_chnks[target].size() * gkfs_config::rpc::chunksize; + auto total_chunk_size = target_chnks[target].size() * gkfs::config::rpc::chunksize; // receiver of first chunk must subtract the offset from first chunk if (target == chnk_start_target) { - total_chunk_size -= chnk_lpad(offset, gkfs_config::rpc::chunksize); + total_chunk_size -= chnk_lpad(offset, gkfs::config::rpc::chunksize); } // receiver of last chunk must subtract if (target == chnk_end_target) { - total_chunk_size -= chnk_rpad(offset + write_size, gkfs_config::rpc::chunksize); + total_chunk_size -= chnk_rpad(offset + write_size, gkfs::config::rpc::chunksize); } auto endp = CTX->hosts().at(target); @@ -123,7 +123,7 @@ ssize_t rpc_send::write(const string& path, const void* buf, const bool append_f path, // first offset in targets is the chunk with // a potential offset - chnk_lpad(offset, gkfs_config::rpc::chunksize), + chnk_lpad(offset, gkfs::config::rpc::chunksize), target, CTX->hosts().size(), // number of chunks handled by that destination @@ -195,8 +195,8 @@ ssize_t rpc_send::read(const string& path, void* buf, const off64_t offset, cons // Calculate chunkid boundaries and numbers so that daemons know in which // interval to look for chunks - auto chnk_start = chnk_id_for_offset(offset, gkfs_config::rpc::chunksize); - auto chnk_end = chnk_id_for_offset((offset + read_size - 1), gkfs_config::rpc::chunksize); + auto chnk_start = chnk_id_for_offset(offset, gkfs::config::rpc::chunksize); + auto chnk_end = chnk_id_for_offset((offset + read_size - 1), gkfs::config::rpc::chunksize); // Collect all chunk ids within count that have the same destination so // that those are send in one rpc bulk transfer @@ -257,16 +257,16 @@ ssize_t rpc_send::read(const string& path, void* buf, const off64_t offset, cons for (const auto& target : targets) { // total chunk_size for target - auto total_chunk_size = target_chnks[target].size() * gkfs_config::rpc::chunksize; + auto total_chunk_size = target_chnks[target].size() * gkfs::config::rpc::chunksize; // receiver of first chunk must subtract the offset from first chunk if (target == chnk_start_target) { - total_chunk_size -= chnk_lpad(offset, gkfs_config::rpc::chunksize); + total_chunk_size -= chnk_lpad(offset, gkfs::config::rpc::chunksize); } // receiver of last chunk must subtract if (target == chnk_end_target) { - total_chunk_size -= chnk_rpad(offset + read_size, gkfs_config::rpc::chunksize); + total_chunk_size -= chnk_rpad(offset + read_size, gkfs::config::rpc::chunksize); } auto endp = CTX->hosts().at(target); @@ -279,7 +279,7 @@ ssize_t rpc_send::read(const string& path, void* buf, const off64_t offset, cons path, // first offset in targets is the chunk with // a potential offset - chnk_lpad(offset, gkfs_config::rpc::chunksize), + chnk_lpad(offset, gkfs::config::rpc::chunksize), target, CTX->hosts().size(), // number of chunks handled by that destination @@ -352,8 +352,8 @@ int rpc_send::trunc_data(const std::string& path, size_t current_size, size_t ne // Find out which data servers need to delete data chunks in order to // contact only them - const unsigned int chunk_start = chnk_id_for_offset(new_size, gkfs_config::rpc::chunksize); - const unsigned int chunk_end = chnk_id_for_offset(current_size - new_size - 1, gkfs_config::rpc::chunksize); + const unsigned int chunk_start = chnk_id_for_offset(new_size, gkfs::config::rpc::chunksize); + const unsigned int chunk_end = chnk_id_for_offset(current_size - new_size - 1, gkfs::config::rpc::chunksize); std::unordered_set hosts; for (unsigned int chunk_id = chunk_start; chunk_id <= chunk_end; ++chunk_id) { @@ -436,7 +436,7 @@ rpc_send::ChunkStat rpc_send::chunk_stat() { } } - unsigned long chunk_size = gkfs_config::rpc::chunksize; + unsigned long chunk_size = gkfs::config::rpc::chunksize; unsigned long chunk_total = 0; unsigned long chunk_free = 0; diff --git a/src/client/rpc/ld_rpc_metadentry.cpp b/src/client/rpc/ld_rpc_metadentry.cpp index e3cf8f044..e08be4ba9 100644 --- a/src/client/rpc/ld_rpc_metadentry.cpp +++ b/src/client/rpc/ld_rpc_metadentry.cpp @@ -155,7 +155,7 @@ int rpc_send::rm_node(const std::string& path, const bool remove_metadentry_only std::vector> handles; // Small files - if (static_cast(size / gkfs_config::rpc::chunksize) < CTX->hosts().size()) { + if (static_cast(size / gkfs::config::rpc::chunksize) < CTX->hosts().size()) { auto endp = CTX->hosts().at(CTX->distributor()->locate_file_metadata(path)); @@ -165,7 +165,7 @@ int rpc_send::rm_node(const std::string& path, const bool remove_metadentry_only handles.emplace_back(ld_network_service->post(endp, in)); uint64_t chnk_start = 0; - uint64_t chnk_end = size / gkfs_config::rpc::chunksize; + uint64_t chnk_end = size / gkfs::config::rpc::chunksize; for (uint64_t chnk_id = chnk_start; chnk_id <= chnk_end; chnk_id++) { const auto target = CTX->hosts().at( @@ -360,10 +360,10 @@ void rpc_send::get_dirents(OpenDir& open_dir) { * It turns out that this operation is increadibly slow for such a big * buffer. Moreover we don't need a zeroed buffer here. */ - auto large_buffer = std::unique_ptr(new char[gkfs_config::rpc::dirents_buff_size]); + auto large_buffer = std::unique_ptr(new char[gkfs::config::rpc::dirents_buff_size]); //XXX there is a rounding error here depending on the number of targets... - const std::size_t per_host_buff_size = gkfs_config::rpc::dirents_buff_size / targets.size(); + const std::size_t per_host_buff_size = gkfs::config::rpc::dirents_buff_size / targets.size(); // expose local buffers for RMA from servers std::vector exposed_buffers; diff --git a/src/daemon/backend/metadata/db.cpp b/src/daemon/backend/metadata/db.cpp index adf776594..48f0d3415 100644 --- a/src/daemon/backend/metadata/db.cpp +++ b/src/daemon/backend/metadata/db.cpp @@ -31,7 +31,7 @@ MetadataDB::MetadataDB(const std::string& path) : path(path) { options.create_if_missing = true; options.merge_operator.reset(new MetadataMergeOperator); MetadataDB::optimize_rocksdb_options(options); - write_opts.disableWAL = !(gkfs_config::rocksdb::use_write_ahead_log); + write_opts.disableWAL = !(gkfs::config::rocksdb::use_write_ahead_log); rdb::DB* rdb_ptr; auto s = rocksdb::DB::Open(options, path, &rdb_ptr); if (!s.ok()) { diff --git a/src/daemon/handler/h_data.cpp b/src/daemon/handler/h_data.cpp index f697b97ae..539ae4d0f 100644 --- a/src/daemon/handler/h_data.cpp +++ b/src/daemon/handler/h_data.cpp @@ -186,7 +186,7 @@ static hg_return_t rpc_srv_write_data(hg_handle_t handle) { * Last chunk can also happen if only one chunk is written. This is covered by 2 and 3. */ // temporary variables - auto transfer_size = (bulk_size <= gkfs_config::rpc::chunksize) ? bulk_size : gkfs_config::rpc::chunksize; + auto transfer_size = (bulk_size <= gkfs::config::rpc::chunksize) ? bulk_size : gkfs::config::rpc::chunksize; uint64_t origin_offset; uint64_t local_offset; // task structures for async writing @@ -205,9 +205,9 @@ static hg_return_t rpc_srv_write_data(hg_handle_t handle) { // offset case. Only relevant in the first iteration of the loop and if the chunk hashes to this host if (chnk_id_file == in.chunk_start && in.offset > 0) { // if only 1 destination and 1 chunk (small write) the transfer_size == bulk_size - auto offset_transfer_size = (in.offset + bulk_size <= gkfs_config::rpc::chunksize) ? bulk_size - : static_cast( - gkfs_config::rpc::chunksize - in.offset); + auto offset_transfer_size = (in.offset + bulk_size <= gkfs::config::rpc::chunksize) ? bulk_size + : static_cast( + gkfs::config::rpc::chunksize - in.offset); ret = margo_bulk_transfer(mid, HG_BULK_PULL, hgi->addr, in.bulk_handle, 0, bulk_handle, 0, offset_transfer_size); if (ret != HG_SUCCESS) { @@ -225,10 +225,10 @@ static hg_return_t rpc_srv_write_data(hg_handle_t handle) { local_offset = in.total_chunk_size - chnk_size_left_host; // origin offset of a chunk is dependent on a given offset in a write operation if (in.offset > 0) - origin_offset = (gkfs_config::rpc::chunksize - in.offset) + - ((chnk_id_file - in.chunk_start) - 1) * gkfs_config::rpc::chunksize; + origin_offset = (gkfs::config::rpc::chunksize - in.offset) + + ((chnk_id_file - in.chunk_start) - 1) * gkfs::config::rpc::chunksize; else - origin_offset = (chnk_id_file - in.chunk_start) * gkfs_config::rpc::chunksize; + origin_offset = (chnk_id_file - in.chunk_start) * gkfs::config::rpc::chunksize; // last chunk might have different transfer_size if (chnk_id_curr == in.chunk_n - 1) transfer_size = chnk_size_left_host; @@ -386,7 +386,7 @@ static hg_return_t rpc_srv_read_data(hg_handle_t handle) { // temporary traveling pointer auto chnk_ptr = static_cast(bulk_buf); // temporary variables - auto transfer_size = (bulk_size <= gkfs_config::rpc::chunksize) ? bulk_size : gkfs_config::rpc::chunksize; + auto transfer_size = (bulk_size <= gkfs::config::rpc::chunksize) ? bulk_size : gkfs::config::rpc::chunksize; // tasks structures vector abt_tasks(in.chunk_n); vector task_eventuals(in.chunk_n); @@ -403,9 +403,9 @@ static hg_return_t rpc_srv_read_data(hg_handle_t handle) { // Only relevant in the first iteration of the loop and if the chunk hashes to this host if (chnk_id_file == in.chunk_start && in.offset > 0) { // if only 1 destination and 1 chunk (small read) the transfer_size == bulk_size - auto offset_transfer_size = (in.offset + bulk_size <= gkfs_config::rpc::chunksize) ? bulk_size - : static_cast( - gkfs_config::rpc::chunksize - in.offset); + auto offset_transfer_size = (in.offset + bulk_size <= gkfs::config::rpc::chunksize) ? bulk_size + : static_cast( + gkfs::config::rpc::chunksize - in.offset); // Setting later transfer offsets local_offsets[chnk_id_curr] = 0; origin_offsets[chnk_id_curr] = 0; @@ -419,10 +419,10 @@ static hg_return_t rpc_srv_read_data(hg_handle_t handle) { // origin offset of a chunk is dependent on a given offset in a write operation if (in.offset > 0) origin_offsets[chnk_id_curr] = - (gkfs_config::rpc::chunksize - in.offset) + - ((chnk_id_file - in.chunk_start) - 1) * gkfs_config::rpc::chunksize; + (gkfs::config::rpc::chunksize - in.offset) + + ((chnk_id_file - in.chunk_start) - 1) * gkfs::config::rpc::chunksize; else - origin_offsets[chnk_id_curr] = (chnk_id_file - in.chunk_start) * gkfs_config::rpc::chunksize; + origin_offsets[chnk_id_curr] = (chnk_id_file - in.chunk_start) * gkfs::config::rpc::chunksize; // last chunk might have different transfer_size if (chnk_id_curr == in.chunk_n - 1) transfer_size = chnk_size_left_host; @@ -524,10 +524,10 @@ static hg_return_t rpc_srv_trunc_data(hg_handle_t handle) { } GKFS_DATA->spdlogger()->debug("{}() path: '{}', length: {}", __func__, in.path, in.length); - unsigned int chunk_start = chnk_id_for_offset(in.length, gkfs_config::rpc::chunksize); + unsigned int chunk_start = chnk_id_for_offset(in.length, gkfs::config::rpc::chunksize); // If we trunc in the the middle of a chunk, do not delete that chunk - auto left_pad = chnk_lpad(in.length, gkfs_config::rpc::chunksize); + auto left_pad = chnk_lpad(in.length, gkfs::config::rpc::chunksize); if (left_pad != 0) { GKFS_DATA->storage()->truncate_chunk(in.path, chunk_start, left_pad); ++chunk_start; diff --git a/src/daemon/main.cpp b/src/daemon/main.cpp index 10544bdc9..12cabe633 100644 --- a/src/daemon/main.cpp +++ b/src/daemon/main.cpp @@ -58,7 +58,7 @@ void init_environment() { GKFS_DATA->spdlogger()->debug("{}() Initializing storage backend: '{}'", __func__, chunk_storage_path); bfs::create_directories(chunk_storage_path); try { - GKFS_DATA->storage(std::make_shared(chunk_storage_path, gkfs_config::rpc::chunksize)); + GKFS_DATA->storage(std::make_shared(chunk_storage_path, gkfs::config::rpc::chunksize)); } catch (const std::exception& e) { GKFS_DATA->spdlogger()->error("{}() Failed to initialize storage backend: {}", __func__, e.what()); throw; @@ -84,11 +84,11 @@ void init_environment() { } // TODO set metadata configurations. these have to go into a user configurable file that is parsed here - GKFS_DATA->atime_state(gkfs_config::metadata::use_atime); - GKFS_DATA->mtime_state(gkfs_config::metadata::use_mtime); - GKFS_DATA->ctime_state(gkfs_config::metadata::use_ctime); - GKFS_DATA->link_cnt_state(gkfs_config::metadata::use_link_cnt); - GKFS_DATA->blocks_state(gkfs_config::metadata::use_blocks); + GKFS_DATA->atime_state(gkfs::config::metadata::use_atime); + GKFS_DATA->mtime_state(gkfs::config::metadata::use_mtime); + GKFS_DATA->ctime_state(gkfs::config::metadata::use_ctime); + GKFS_DATA->link_cnt_state(gkfs::config::metadata::use_link_cnt); + GKFS_DATA->blocks_state(gkfs::config::metadata::use_blocks); // Create metadentry for root directory Metadata root_md{S_IFDIR | S_IRWXU | S_IRWXG | S_IRWXO}; try { @@ -135,8 +135,8 @@ void destroy_enviroment() { } void init_io_tasklet_pool() { - assert(gkfs_config::rpc::daemon_io_xstreams >= 0); - unsigned int xstreams_num = gkfs_config::rpc::daemon_io_xstreams; + assert(gkfs::config::rpc::daemon_io_xstreams >= 0); + unsigned int xstreams_num = gkfs::config::rpc::daemon_io_xstreams; //retrieve the pool of the just created scheduler ABT_pool pool; @@ -177,7 +177,7 @@ void init_rpc_server(const string & protocol_port) { MARGO_SERVER_MODE, &hg_options, HG_TRUE, - gkfs_config::rpc::daemon_handler_xstreams); + gkfs::config::rpc::daemon_handler_xstreams); if (mid == MARGO_INSTANCE_NULL) { throw runtime_error("Failed to initialize the Margo RPC server"); } @@ -241,7 +241,7 @@ void shutdown_handler(int dummy) { } void initialize_loggers() { - std::string path = gkfs_config::logging::daemon_log_path; + std::string path = gkfs::config::logging::daemon_log_path; // Try to get log path from env variable std::string env_path_key = DAEMON_ENV_PREFIX; env_path_key += "DAEMON_LOG_PATH"; @@ -250,7 +250,7 @@ void initialize_loggers() { path = env_path; } - spdlog::level::level_enum level = gkfs::logging::get_level(gkfs_config::logging::daemon_log_level); + spdlog::level::level_enum level = gkfs::logging::get_level(gkfs::config::logging::daemon_log_level); // Try to get log path from env variable std::string env_level_key = DAEMON_ENV_PREFIX; env_level_key += "LOG_LEVEL"; @@ -308,7 +308,7 @@ int main(int argc, const char* argv[]) { #else cout << "Create check parents: OFF" << endl; #endif - cout << "Chunk size: " << gkfs_config::rpc::chunksize << " bytes" << endl; + cout << "Chunk size: " << gkfs::config::rpc::chunksize << " bytes" << endl; return 0; } @@ -337,7 +337,7 @@ int main(int argc, const char* argv[]) { hosts_file = vm["hosts-file"].as(); } else { hosts_file = - gkfs::env::get_var(gkfs::env::HOSTS_FILE, gkfs_config::hostfile_path); + gkfs::env::get_var(gkfs::env::HOSTS_FILE, gkfs::config::hostfile_path); } GKFS_DATA->hosts_file(hosts_file); diff --git a/src/global/metadata.cpp b/src/global/metadata.cpp index 3eae4dd79..b3cb40c4d 100644 --- a/src/global/metadata.cpp +++ b/src/global/metadata.cpp @@ -76,31 +76,31 @@ Metadata::Metadata(const std::string& binary_str) { ptr += read; // The order is important. don't change. - if (gkfs_config::metadata::use_atime) { + if (gkfs::config::metadata::use_atime) { assert(*ptr == MSP); atime_ = static_cast(std::stol(++ptr, &read)); assert(read > 0); ptr += read; } - if (gkfs_config::metadata::use_mtime) { + if (gkfs::config::metadata::use_mtime) { assert(*ptr == MSP); mtime_ = static_cast(std::stol(++ptr, &read)); assert(read > 0); ptr += read; } - if (gkfs_config::metadata::use_ctime) { + if (gkfs::config::metadata::use_ctime) { assert(*ptr == MSP); ctime_ = static_cast(std::stol(++ptr, &read)); assert(read > 0); ptr += read; } - if (gkfs_config::metadata::use_link_cnt) { + if (gkfs::config::metadata::use_link_cnt) { assert(*ptr == MSP); link_count_ = static_cast(std::stoul(++ptr, &read)); assert(read > 0); ptr += read; } - if (gkfs_config::metadata::use_blocks) { // last one will not encounter a delimiter anymore + if (gkfs::config::metadata::use_blocks) { // last one will not encounter a delimiter anymore assert(*ptr == MSP); blocks_ = static_cast(std::stoul(++ptr, &read)); assert(read > 0); @@ -126,23 +126,23 @@ std::string Metadata::serialize() const { s += fmt::format_int(mode_).c_str(); // add mandatory mode s += MSP; s += fmt::format_int(size_).c_str(); // add mandatory size - if (gkfs_config::metadata::use_atime) { + if (gkfs::config::metadata::use_atime) { s += MSP; s += fmt::format_int(atime_).c_str(); } - if (gkfs_config::metadata::use_mtime) { + if (gkfs::config::metadata::use_mtime) { s += MSP; s += fmt::format_int(mtime_).c_str(); } - if (gkfs_config::metadata::use_ctime) { + if (gkfs::config::metadata::use_ctime) { s += MSP; s += fmt::format_int(ctime_).c_str(); } - if (gkfs_config::metadata::use_link_cnt) { + if (gkfs::config::metadata::use_link_cnt) { s += MSP; s += fmt::format_int(link_count_).c_str(); } - if (gkfs_config::metadata::use_blocks) { + if (gkfs::config::metadata::use_blocks) { s += MSP; s += fmt::format_int(blocks_).c_str(); } -- GitLab From 3083d1abae603becf88352cdeb6ffff40d4fd224 Mon Sep 17 00:00:00 2001 From: Marc Vef Date: Tue, 18 Feb 2020 15:11:17 +0100 Subject: [PATCH 10/25] Renaming gkfs::path_util namespace to gkfs::path; indent fixes --- include/client/path.hpp | 25 +++++----- include/global/path_util.hpp | 25 +++++----- src/client/gkfs_functions.cpp | 6 +-- src/client/hooks.cpp | 4 +- src/client/path.cpp | 58 ++++++++++++----------- src/client/preload_context.cpp | 16 +++---- src/daemon/backend/data/chunk_storage.cpp | 6 +-- src/daemon/backend/metadata/db.cpp | 8 ++-- src/global/path_util.cpp | 20 +++++--- 9 files changed, 90 insertions(+), 78 deletions(-) diff --git a/include/client/path.hpp b/include/client/path.hpp index c351aae03..ad603d4c1 100644 --- a/include/client/path.hpp +++ b/include/client/path.hpp @@ -15,23 +15,24 @@ #include namespace gkfs { - namespace path { +namespace path { - unsigned int match_components(const std::string& path, unsigned int& path_components, - const std::vector& components); +unsigned int match_components(const std::string& path, unsigned int& path_components, + const std::vector& components); - bool resolve(const std::string& path, std::string& resolved, bool resolve_last_link = true); +bool resolve(const std::string& path, std::string& resolved, bool resolve_last_link = true); - std::string get_sys_cwd(); +std::string get_sys_cwd(); - void set_sys_cwd(const std::string& path); +void set_sys_cwd(const std::string& path); - void set_env_cwd(const std::string& path); +void set_env_cwd(const std::string& path); - void unset_env_cwd(); +void unset_env_cwd(); - void init_cwd(); +void init_cwd(); - void set_cwd(const std::string& path, bool internal); - } -} +void set_cwd(const std::string& path, bool internal); + +} // namespace path +} // namespace gkfs diff --git a/include/global/path_util.hpp b/include/global/path_util.hpp index 6729376c4..61a4387c7 100644 --- a/include/global/path_util.hpp +++ b/include/global/path_util.hpp @@ -18,26 +18,27 @@ #include namespace gkfs { - namespace path_util { +namespace path { - constexpr unsigned int max_length = 4096; // 4k chars +constexpr unsigned int max_length = 4096; // 4k chars - constexpr char separator = '/'; // PATH SEPARATOR +constexpr char separator = '/'; // PATH SEPARATOR - bool is_relative(const std::string& path); +bool is_relative(const std::string& path); - bool is_absolute(const std::string& path); +bool is_absolute(const std::string& path); - bool has_trailing_slash(const std::string& path); +bool has_trailing_slash(const std::string& path); - std::string prepend_path(const std::string& path, const char* raw_path); +std::string prepend_path(const std::string& path, const char* raw_path); - std::string absolute_to_relative(const std::string& root_path, const std::string& absolute_path); // unused ATM +std::string absolute_to_relative(const std::string& root_path, const std::string& absolute_path); // unused ATM - std::string dirname(const std::string& path); +std::string dirname(const std::string& path); - std::vector split_path(const std::string& path); - } -} +std::vector split_path(const std::string& path); + +} // namespace gkfs +} // namespace path #endif //GEKKOFS_PATH_UTIL_HPP diff --git a/src/client/gkfs_functions.cpp b/src/client/gkfs_functions.cpp index 43837d12a..5f45285c9 100644 --- a/src/client/gkfs_functions.cpp +++ b/src/client/gkfs_functions.cpp @@ -57,7 +57,7 @@ std::shared_ptr gkfs::func::metadata(const string& path, bool follow_l int gkfs::func::check_parent_dir(const std::string& path) { #if CREATE_CHECK_PARENTS - auto p_comp = gkfs::path_util::dirname(path); + auto p_comp = path::dirname(path); auto md = gkfs::func::metadata(p_comp); if (!md) { if (errno == ENOENT) { @@ -232,7 +232,7 @@ int gkfs::func::statfs(sys_statfs* buf) { buf->f_files = 0; buf->f_ffree = 0; buf->f_fsid = {0, 0}; - buf->f_namelen = gkfs::path_util::max_length; + buf->f_namelen = path::max_length; buf->f_frsize = 0; buf->f_flags = ST_NOATIME | ST_NODIRATIME | ST_NOSUID | ST_NODEV | ST_SYNCHRONOUS; @@ -250,7 +250,7 @@ int gkfs::func::statvfs(sys_statvfs* buf) { buf->f_ffree = 0; buf->f_favail = 0; buf->f_fsid = 0; - buf->f_namemax = gkfs::path_util::max_length; + buf->f_namemax = path::max_length; buf->f_frsize = 0; buf->f_flag = ST_NOATIME | ST_NODIRATIME | ST_NOSUID | ST_NODEV | ST_SYNCHRONOUS; diff --git a/src/client/hooks.cpp b/src/client/hooks.cpp index 1b8e9b947..68a586fd6 100644 --- a/src/client/hooks.cpp +++ b/src/client/hooks.cpp @@ -515,7 +515,7 @@ int hook_chdir(const char* path) { //TODO get complete path from relativize_path instead of // removing mountdir and then adding again here rel_path.insert(0, CTX->mountdir()); - if (gkfs::path_util::has_trailing_slash(rel_path)) { + if (gkfs::path::has_trailing_slash(rel_path)) { // open_dir is '/' rel_path.pop_back(); } @@ -543,7 +543,7 @@ int hook_fchdir(unsigned int fd) { } std::string new_path = CTX->mountdir() + open_dir->path(); - if (gkfs::path_util::has_trailing_slash(new_path)) { + if (gkfs::path::has_trailing_slash(new_path)) { // open_dir is '/' new_path.pop_back(); } diff --git a/src/client/path.cpp b/src/client/path.cpp index 55185b468..c1aeffe3d 100644 --- a/src/client/path.cpp +++ b/src/client/path.cpp @@ -30,9 +30,10 @@ extern "C" { using namespace std; -static const string excluded_paths[2] = {"sys/", "proc/"}; +namespace gkfs { +namespace path { -namespace p_util = gkfs::path_util; +static const string excluded_paths[2] = {"sys/", "proc/"}; /** Match components in path * @@ -42,14 +43,14 @@ namespace p_util = gkfs::path_util; * `path_components` will be set to the total number of components found in `path` * * Example: - * ``` + * ```ÏÏ * unsigned int tot_comp; * path_match_components("/matched/head/with/tail", &tot_comp, ["matched", "head", "no"]) == 2; * tot_comp == 4; * ``` */ -unsigned int gkfs::path::match_components(const string& path, unsigned int& path_components, - const ::vector& components) { +unsigned int match_components(const string& path, unsigned int& path_components, + const ::vector& components) { unsigned int matched = 0; unsigned int processed_components = 0; string::size_type comp_size = 0; // size of current component @@ -60,7 +61,7 @@ unsigned int gkfs::path::match_components(const string& path, unsigned int& path start = end; // Find next component - end = path.find(p_util::separator, start); + end = path.find(path::separator, start); if (end == string::npos) { end = path.size(); } @@ -88,12 +89,12 @@ unsigned int gkfs::path::match_components(const string& path, unsigned int& path * returns true if the resolved path fall inside GekkoFS namespace, * and false otherwise. */ -bool gkfs::path::resolve(const string& path, string& resolved, bool resolve_last_link) { +bool resolve(const string& path, string& resolved, bool resolve_last_link) { LOG(DEBUG, "path: \"{}\", resolved: \"{}\", resolve_last_link: {}", path, resolved, resolve_last_link); - assert(p_util::is_absolute(path)); + assert(path::is_absolute(path)); for (auto& excl_path: excluded_paths) { if (path.compare(1, excl_path.length(), excl_path) == 0) { @@ -118,12 +119,12 @@ bool gkfs::path::resolve(const string& path, string& resolved, bool resolve_last start = end; /* Skip sequence of multiple path-separators. */ - while (start < path.size() && path[start] == p_util::separator) { + while (start < path.size() && path[start] == path::separator) { ++start; } // Find next component - end = path.find(p_util::separator, start); + end = path.find(path::separator, start); if (end == string::npos) { end = path.size(); } @@ -145,7 +146,7 @@ bool gkfs::path::resolve(const string& path, string& resolved, bool resolve_last * the previous slash position should be stored. * The following search could be avoided. */ - last_slash_pos = resolved.find_last_of(p_util::separator); + last_slash_pos = resolved.find_last_of(path::separator); } if (resolved_components > 0) { if (matched_components == resolved_components) { @@ -157,7 +158,7 @@ bool gkfs::path::resolve(const string& path, string& resolved, bool resolve_last } // add `/` to the reresolved path - resolved.push_back(p_util::separator); + resolved.push_back(path::separator); last_slash_pos = resolved.size() - 1; resolved.append(path, start, comp_size); @@ -191,7 +192,7 @@ bool gkfs::path::resolve(const string& path, string& resolved, bool resolve_last resolved = link_resolved.get(); matched_components = match_components(resolved, resolved_components, mnt_components); // set matched counter to value coherent with the new path - last_slash_pos = resolved.find_last_of(p_util::separator); + last_slash_pos = resolved.find_last_of(path::separator); continue; } else if ((!S_ISDIR(st.st_mode)) && (end != path.size())) { resolved.append(path, end, string::npos); @@ -211,28 +212,28 @@ bool gkfs::path::resolve(const string& path, string& resolved, bool resolve_last } if (resolved.empty()) { - resolved.push_back(p_util::separator); + resolved.push_back(path::separator); } LOG(DEBUG, "external: \"{}\"", resolved); return false; } -string gkfs::path::get_sys_cwd() { - char temp[p_util::max_length]; - if (long ret = syscall_no_intercept(SYS_getcwd, temp, p_util::max_length) < 0) { +string get_sys_cwd() { + char temp[path::max_length]; + if (long ret = syscall_no_intercept(SYS_getcwd, temp, path::max_length) < 0) { throw ::system_error(syscall_error_code(ret), ::system_category(), - "Failed to retrieve current working directory"); + "Failed to retrieve current working directory"); } // getcwd could return "(unreachable)" in some cases - if (temp[0] != p_util::separator) { + if (temp[0] != path::separator) { throw ::runtime_error( "Current working directory is unreachable"); } return {temp}; } -void gkfs::path::set_sys_cwd(const string& path) { +void set_sys_cwd(const string& path) { LOG(DEBUG, "Changing working directory to \"{}\"", path); @@ -241,11 +242,11 @@ void gkfs::path::set_sys_cwd(const string& path) { ::strerror(syscall_error_code(ret))); throw ::system_error(syscall_error_code(ret), ::system_category(), - "Failed to set system current working directory"); + "Failed to set system current working directory"); } } -void gkfs::path::set_env_cwd(const string& path) { +void set_env_cwd(const string& path) { LOG(DEBUG, "Setting {} to \"{}\"", gkfs::env::CWD, path); @@ -254,11 +255,11 @@ void gkfs::path::set_env_cwd(const string& path) { gkfs::env::CWD, ::strerror(errno)); throw ::system_error(errno, ::system_category(), - "Failed to set environment current working directory"); + "Failed to set environment current working directory"); } } -void gkfs::path::unset_env_cwd() { +void unset_env_cwd() { LOG(DEBUG, "Clearing {}()", gkfs::env::CWD); @@ -269,11 +270,11 @@ void gkfs::path::unset_env_cwd() { throw ::system_error(errno, ::system_category(), - "Failed to unset environment current working directory"); + "Failed to unset environment current working directory"); } } -void gkfs::path::init_cwd() { +void init_cwd() { const char* env_cwd = ::getenv(gkfs::env::CWD); if (env_cwd != nullptr) { CTX->cwd(env_cwd); @@ -282,7 +283,7 @@ void gkfs::path::init_cwd() { } } -void gkfs::path::set_cwd(const string& path, bool internal) { +void set_cwd(const string& path, bool internal) { if (internal) { set_sys_cwd(CTX->mountdir()); set_env_cwd(path); @@ -292,3 +293,6 @@ void gkfs::path::set_cwd(const string& path, bool internal) { } CTX->cwd(path); } + +} // namespace path +} // namespace gkfs \ No newline at end of file diff --git a/src/client/preload_context.cpp b/src/client/preload_context.cpp index 66923da8a..040ab5db7 100644 --- a/src/client/preload_context.cpp +++ b/src/client/preload_context.cpp @@ -79,9 +79,9 @@ PreloadContext::init_logging() { } void PreloadContext::mountdir(const std::string& path) { - assert(gkfs::path_util::is_absolute(path)); - assert(!gkfs::path_util::has_trailing_slash(path)); - mountdir_components_ = gkfs::path_util::split_path(path); + assert(gkfs::path::is_absolute(path)); + assert(!gkfs::path::has_trailing_slash(path)); + mountdir_components_ = gkfs::path::split_path(path); mountdir_ = path; } @@ -136,11 +136,11 @@ RelativizeStatus PreloadContext::relativize_fd_path(int dirfd, std::string path; - if (raw_path[0] != gkfs::path_util::separator) { + if (raw_path[0] != gkfs::path::separator) { // path is relative if (dirfd == AT_FDCWD) { // path is relative to cwd - path = gkfs::path_util::prepend_path(cwd_, raw_path); + path = gkfs::path::prepend_path(cwd_, raw_path); } else { if (!ofm_->exist(dirfd)) { return RelativizeStatus::fd_unknown; @@ -152,7 +152,7 @@ RelativizeStatus PreloadContext::relativize_fd_path(int dirfd, } path = mountdir_; path.append(dir->path()); - path.push_back(gkfs::path_util::separator); + path.push_back(gkfs::path::separator); path.append(raw_path); } } else { @@ -176,11 +176,11 @@ bool PreloadContext::relativize_path(const char* raw_path, std::string& relative std::string path; - if (raw_path[0] != gkfs::path_util::separator) { + if (raw_path[0] != gkfs::path::separator) { /* Path is not absolute, we need to prepend CWD; * First reserve enough space to minimize memory copy */ - path = gkfs::path_util::prepend_path(cwd_, raw_path); + path = gkfs::path::prepend_path(cwd_, raw_path); } else { path = raw_path; } diff --git a/src/daemon/backend/data/chunk_storage.cpp b/src/daemon/backend/data/chunk_storage.cpp index 0637ef21b..3f194bd20 100644 --- a/src/daemon/backend/data/chunk_storage.cpp +++ b/src/daemon/backend/data/chunk_storage.cpp @@ -26,7 +26,7 @@ namespace bfs = boost::filesystem; using namespace std; string ChunkStorage::absolute(const string& internal_path) const { - assert(gkfs::path_util::is_relative(internal_path)); + assert(gkfs::path::is_relative(internal_path)); return root_path + '/' + internal_path; } @@ -34,7 +34,7 @@ ChunkStorage::ChunkStorage(const string& path, const size_t chunksize) : root_path(path), chunksize(chunksize) { //TODO check path: absolute, exists, permission to write etc... - assert(gkfs::path_util::is_absolute(root_path)); + assert(gkfs::path::is_absolute(root_path)); /* Initialize logger */ log = spdlog::get(LOGGER_NAME); @@ -44,7 +44,7 @@ ChunkStorage::ChunkStorage(const string& path, const size_t chunksize) : } string ChunkStorage::get_chunks_dir(const string& file_path) { - assert(gkfs::path_util::is_absolute(file_path)); + assert(gkfs::path::is_absolute(file_path)); string chunk_dir = file_path.substr(1); ::replace(chunk_dir.begin(), chunk_dir.end(), '/', ':'); return chunk_dir; diff --git a/src/daemon/backend/metadata/db.cpp b/src/daemon/backend/metadata/db.cpp index 48f0d3415..592e6028e 100644 --- a/src/daemon/backend/metadata/db.cpp +++ b/src/daemon/backend/metadata/db.cpp @@ -60,8 +60,8 @@ std::string MetadataDB::get(const std::string& key) const { } void MetadataDB::put(const std::string& key, const std::string& val) { - assert(gkfs::path_util::is_absolute(key)); - assert(key == "/" || !gkfs::path_util::has_trailing_slash(key)); + assert(gkfs::path::is_absolute(key)); + assert(key == "/" || !gkfs::path::has_trailing_slash(key)); auto cop = CreateOperand(val); auto s = db->Merge(write_opts, key, cop.serialize()); @@ -133,9 +133,9 @@ void MetadataDB::decrease_size(const std::string& key, size_t size) { */ std::vector> MetadataDB::get_dirents(const std::string& dir) const { auto root_path = dir; - assert(gkfs::path_util::is_absolute(root_path)); + assert(gkfs::path::is_absolute(root_path)); //add trailing slash if missing - if (!gkfs::path_util::has_trailing_slash(root_path) && root_path.size() != 1) { + if (!gkfs::path::has_trailing_slash(root_path) && root_path.size() != 1) { //add trailing slash only if missing and is not the root_folder "/" root_path.push_back('/'); } diff --git a/src/global/path_util.cpp b/src/global/path_util.cpp index 56ecfbd99..671556702 100644 --- a/src/global/path_util.cpp +++ b/src/global/path_util.cpp @@ -19,17 +19,20 @@ using namespace std; -bool gkfs::path_util::is_relative(const string& path) { +namespace gkfs { +namespace path { + +bool is_relative(const string& path) { return (!path.empty()) && (path.front() != separator); } -bool gkfs::path_util::is_absolute(const string& path) { +bool is_absolute(const string& path) { return (!path.empty()) && (path.front() == separator); } -bool gkfs::path_util::has_trailing_slash(const string& path) { +bool has_trailing_slash(const string& path) { return (!path.empty()) && (path.back() == separator); } @@ -52,7 +55,7 @@ bool gkfs::path_util::has_trailing_slash(const string& path) { * prepend_path("/tmp/prefix", "./my/path") == "/tmp/prefix/./my/path" * ``` */ -string gkfs::path_util::prepend_path(const string& prefix_path, const char* raw_path) { +string prepend_path(const string& prefix_path, const char* raw_path) { assert(!has_trailing_slash(prefix_path)); ::size_t raw_len = ::strlen(raw_path); string res; @@ -70,7 +73,7 @@ string gkfs::path_util::prepend_path(const string& prefix_path, const char* raw_ * Example: * split_path("/first/second/third") == ["first", "second", "third"] */ -::vector gkfs::path_util::split_path(const string& path) { +::vector split_path(const string& path) { ::vector tokens; size_t start = string::npos; size_t end = (path.front() != separator) ? 0 : 1; @@ -92,7 +95,7 @@ string gkfs::path_util::prepend_path(const string& prefix_path, const char* raw_ * If @absolute_path do not start at the given @root_path an empty string will be returned. * NOTE: Trailing slash will be stripped from the new constructed relative path. */ -string gkfs::path_util::absolute_to_relative(const string& root_path, const string& absolute_path) { +string absolute_to_relative(const string& root_path, const string& absolute_path) { assert(is_absolute(root_path)); assert(is_absolute(absolute_path)); assert(!has_trailing_slash(root_path)); @@ -130,7 +133,7 @@ string gkfs::path_util::absolute_to_relative(const string& root_path, const stri * @param path * @return */ -string gkfs::path_util::dirname(const string& path) { +string dirname(const string& path) { assert(path.size() > 1 || path.front() == separator); assert(path.size() == 1 || !has_trailing_slash(path)); @@ -142,3 +145,6 @@ string gkfs::path_util::dirname(const string& path) { } return path.substr(0, parent_path_size); } + +} // namespace +} // namespace gkfs \ No newline at end of file -- GitLab From a9f317e853f125c05976dd8169e86c4f2e49345d Mon Sep 17 00:00:00 2001 From: Marc Vef Date: Wed, 19 Feb 2020 16:51:06 +0100 Subject: [PATCH 11/25] rpc_send to gkfs::rpc namespace. renaming sending functions to avoid name clash with rpc types --- include/client/rpc/hg_rpcs.hpp | 13 ++-- include/client/rpc/ld_rpc_data_ws.hpp | 28 ++++---- include/client/rpc/ld_rpc_management.hpp | 8 ++- include/client/rpc/ld_rpc_metadentry.hpp | 26 ++++---- src/client/gkfs_functions.cpp | 36 +++++----- src/client/preload.cpp | 2 +- src/client/rpc/ld_rpc_data_ws.cpp | 18 +++-- src/client/rpc/ld_rpc_management.cpp | 8 ++- src/client/rpc/ld_rpc_metadentry.cpp | 83 +++++++++++++----------- 9 files changed, 123 insertions(+), 99 deletions(-) diff --git a/include/client/rpc/hg_rpcs.hpp b/include/client/rpc/hg_rpcs.hpp index b8b7a4e63..499d209cd 100644 --- a/include/client/rpc/hg_rpcs.hpp +++ b/include/client/rpc/hg_rpcs.hpp @@ -34,19 +34,22 @@ #include #include -namespace hermes { namespace detail { +namespace hermes { +namespace detail { -struct hg_void_t { }; +struct hg_void_t { +}; - static HG_INLINE hg_return_t - hg_proc_void_t(hg_proc_t proc, void* data) { +static HG_INLINE hg_return_t +hg_proc_void_t(hg_proc_t proc, void* data) { (void) proc; (void) data; return HG_SUCCESS; } -}} // namespace hermes::detail +} +} // namespace hermes::detail namespace gkfs { namespace rpc { diff --git a/include/client/rpc/ld_rpc_data_ws.hpp b/include/client/rpc/ld_rpc_data_ws.hpp index c164a87dc..51a7ec6cf 100644 --- a/include/client/rpc/ld_rpc_data_ws.hpp +++ b/include/client/rpc/ld_rpc_data_ws.hpp @@ -15,25 +15,25 @@ #ifndef GEKKOFS_PRELOAD_C_DATA_WS_HPP #define GEKKOFS_PRELOAD_C_DATA_WS_HPP +namespace gkfs { +namespace rpc { -namespace rpc_send { +struct ChunkStat { + unsigned long chunk_size; + unsigned long chunk_total; + unsigned long chunk_free; +}; - struct ChunkStat { - unsigned long chunk_size; - unsigned long chunk_total; - unsigned long chunk_free; - }; +ssize_t forward_write(const std::string& path, const void* buf, bool append_flag, off64_t in_offset, + size_t write_size, int64_t updated_metadentry_size); - ssize_t write(const std::string& path, const void* buf, bool append_flag, off64_t in_offset, - size_t write_size, int64_t updated_metadentry_size); +ssize_t forward_read(const std::string& path, void* buf, off64_t offset, size_t read_size); - ssize_t read(const std::string& path, void* buf, off64_t offset, size_t read_size); +int forward_truncate(const std::string& path, size_t current_size, size_t new_size); - int trunc_data(const std::string& path, size_t current_size, size_t new_size); - - ChunkStat chunk_stat(); - -} +ChunkStat forward_get_chunk_stat(); +} // namespace rpc +} // namespace gkfs #endif //GEKKOFS_PRELOAD_C_DATA_WS_HPP diff --git a/include/client/rpc/ld_rpc_management.hpp b/include/client/rpc/ld_rpc_management.hpp index 5fd6be799..592ff5db9 100644 --- a/include/client/rpc/ld_rpc_management.hpp +++ b/include/client/rpc/ld_rpc_management.hpp @@ -15,10 +15,12 @@ #ifndef GEKKOFS_MARGO_RPC_MANAGMENT_HPP #define GEKKOFS_MARGO_RPC_MANAGMENT_HPP -namespace rpc_send { +namespace gkfs { +namespace rpc { - bool get_fs_config(); +bool forward_get_fs_config(); -} // end namespace rpc_send +} // namespace rpc +} // namespace gkfs #endif //GEKKOFS_MARGO_RPC_NANAGMENT_HPP diff --git a/include/client/rpc/ld_rpc_metadentry.hpp b/include/client/rpc/ld_rpc_metadentry.hpp index 6dffb2c8c..09f64ed80 100644 --- a/include/client/rpc/ld_rpc_metadentry.hpp +++ b/include/client/rpc/ld_rpc_metadentry.hpp @@ -24,32 +24,34 @@ class OpenDir; class Metadata; -namespace rpc_send { +namespace gkfs { +namespace rpc { - int mk_node(const std::string& path, mode_t mode); +int forward_create(const std::string& path, mode_t mode); - int stat(const std::string& path, std::string& attr); +int forward_stat(const std::string& path, std::string& attr); - int rm_node(const std::string& path, bool remove_metadentry_only, ssize_t size); +int forward_remove(const std::string& path, bool remove_metadentry_only, ssize_t size); - int decr_size(const std::string& path, size_t length); +int forward_decr_size(const std::string& path, size_t length); - int update_metadentry(const std::string& path, const Metadata& md, const MetadentryUpdateFlags& md_flags); +int forward_update_metadentry(const std::string& path, const Metadata& md, const MetadentryUpdateFlags& md_flags); - int update_metadentry_size(const std::string& path, size_t size, off64_t offset, bool append_flag, - off64_t& ret_size); +int forward_update_metadentry_size(const std::string& path, size_t size, off64_t offset, bool append_flag, + off64_t& ret_size); - int get_metadentry_size(const std::string& path, off64_t& ret_size); +int forward_get_metadentry_size(const std::string& path, off64_t& ret_size); - void get_dirents(OpenDir& open_dir); +void forward_get_dirents(OpenDir& open_dir); #ifdef HAS_SYMLINKS - int mk_symlink(const std::string& path, const std::string& target_path); +int forward_mk_symlink(const std::string& path, const std::string& target_path); #endif -} // end namespace rpc_send +} // namespace rpc +} // namespace gkfs #endif //GEKKOFS_PRELOAD_C_METADENTRY_HPP diff --git a/src/client/gkfs_functions.cpp b/src/client/gkfs_functions.cpp index 5f45285c9..edff6f7e2 100644 --- a/src/client/gkfs_functions.cpp +++ b/src/client/gkfs_functions.cpp @@ -36,7 +36,7 @@ using namespace std; std::shared_ptr gkfs::func::metadata(const string& path, bool follow_links) { std::string attr; - auto err = rpc_send::stat(path, attr); + auto err = gkfs::rpc::forward_stat(path, attr); if (err) { return nullptr; } @@ -44,7 +44,7 @@ std::shared_ptr gkfs::func::metadata(const string& path, bool follow_l if (follow_links) { Metadata md{attr}; while (md.is_link()) { - err = rpc_send::stat(md.target_path(), attr); + err = gkfs::rpc::forward_stat(md.target_path(), attr); if (err) { return nullptr; } @@ -187,7 +187,7 @@ int gkfs::func::mk_node(const std::string& path, mode_t mode) { if (check_parent_dir(path)) { return -1; } - return rpc_send::mk_node(path, mode); + return gkfs::rpc::forward_create(path, mode); } /** @@ -201,7 +201,7 @@ int gkfs::func::rm_node(const std::string& path) { return -1; } bool has_data = S_ISREG(md->mode()) && (md->size() != 0); - return rpc_send::rm_node(path, !has_data, md->size()); + return gkfs::rpc::forward_remove(path, !has_data, md->size()); } int gkfs::func::access(const std::string& path, const int mask, bool follow_links) { @@ -223,7 +223,7 @@ int gkfs::func::stat(const string& path, struct stat* buf, bool follow_links) { } int gkfs::func::statfs(sys_statfs* buf) { - auto blk_stat = rpc_send::chunk_stat(); + auto blk_stat = gkfs::rpc::forward_get_chunk_stat(); buf->f_type = 0; buf->f_bsize = blk_stat.chunk_size; buf->f_blocks = blk_stat.chunk_total; @@ -241,7 +241,7 @@ int gkfs::func::statfs(sys_statfs* buf) { int gkfs::func::statvfs(sys_statvfs* buf) { init_ld_env_if_needed(); - auto blk_stat = rpc_send::chunk_stat(); + auto blk_stat = gkfs::rpc::forward_get_chunk_stat(); buf->f_bsize = blk_stat.chunk_size; buf->f_blocks = blk_stat.chunk_total; buf->f_bfree = blk_stat.chunk_free; @@ -271,7 +271,7 @@ off_t gkfs::func::lseek(shared_ptr gkfs_fd, off_t offset, unsigned int break; case SEEK_END: { off64_t file_size; - auto err = rpc_send::get_metadentry_size(gkfs_fd->path(), file_size); + auto err = gkfs::rpc::forward_get_metadentry_size(gkfs_fd->path(), file_size); if (err < 0) { errno = err; // Negative numbers are explicitly for error codes return -1; @@ -305,12 +305,12 @@ int gkfs::func::truncate(const std::string& path, off_t old_size, off_t new_size return 0; } - if (rpc_send::decr_size(path, new_size)) { + if (gkfs::rpc::forward_decr_size(path, new_size)) { LOG(DEBUG, "Failed to decrease size"); return -1; } - if (rpc_send::trunc_data(path, old_size, new_size)) { + if (gkfs::rpc::forward_truncate(path, old_size, new_size)) { LOG(DEBUG, "Failed to truncate data"); return -1; } @@ -365,14 +365,14 @@ ssize_t gkfs::func::pwrite(std::shared_ptr file, const char* buf, size ssize_t ret = 0; long updated_size = 0; - ret = rpc_send::update_metadentry_size(*path, count, offset, append_flag, updated_size); + ret = gkfs::rpc::forward_update_metadentry_size(*path, count, offset, append_flag, updated_size); if (ret != 0) { LOG(ERROR, "update_metadentry_size() failed with ret {}", ret); return ret; // ERR } - ret = rpc_send::write(*path, buf, append_flag, offset, count, updated_size); + ret = gkfs::rpc::forward_write(*path, buf, append_flag, offset, count, updated_size); if (ret < 0) { - LOG(WARNING, "rpc_send::write() failed with ret {}", ret); + LOG(WARNING, "gkfs::rpc::forward_write() failed with ret {}", ret); } return ret; // return written size or -1 as error } @@ -455,9 +455,9 @@ ssize_t gkfs::func::pread(std::shared_ptr file, char* buf, size_t coun if (gkfs::config::io::zero_buffer_before_read) { memset(buf, 0, sizeof(char) * count); } - auto ret = rpc_send::read(file->path(), buf, offset, count); + auto ret = gkfs::rpc::forward_read(file->path(), buf, offset, count); if (ret < 0) { - LOG(WARNING, "rpc_send::read() failed with ret {}", ret); + LOG(WARNING, "gkfs::rpc::forward_read() failed with ret {}", ret); } // XXX check that we don't try to read past end of the file return ret; // return read size or -1 as error @@ -492,7 +492,7 @@ int gkfs::func::opendir(const std::string& path) { } auto open_dir = std::make_shared(path); - rpc_send::get_dirents(*open_dir); + gkfs::rpc::forward_get_dirents(*open_dir); return CTX->file_map()->add(open_dir); } @@ -510,12 +510,12 @@ int gkfs::func::rmdir(const std::string& path) { } auto open_dir = std::make_shared(path); - rpc_send::get_dirents(*open_dir); + gkfs::rpc::forward_get_dirents(*open_dir); if (open_dir->size() != 0) { errno = ENOTEMPTY; return -1; } - return rpc_send::rm_node(path, true, 0); + return gkfs::rpc::forward_remove(path, true, 0); } int gkfs::func::getdents(unsigned int fd, @@ -653,7 +653,7 @@ int gkfs::func::mk_symlink(const std::string& path, const std::string& target_pa return -1; } - return rpc_send::mk_symlink(path, target_path); + return gkfs::rpc::forward_mk_symlink(path, target_path); } int gkfs::func::readlink(const std::string& path, char* buf, int bufsize) { diff --git a/src/client/preload.cpp b/src/client/preload.cpp index a96ebca3b..94b7b43f0 100644 --- a/src/client/preload.cpp +++ b/src/client/preload.cpp @@ -101,7 +101,7 @@ void init_ld_environment_() { LOG(INFO, "Retrieving file system configuration..."); - if (!rpc_send::get_fs_config()) { + if (!gkfs::rpc::forward_get_fs_config()) { exit_error_msg(EXIT_FAILURE, "Unable to fetch file system configurations from daemon process through RPC."); } diff --git a/src/client/rpc/ld_rpc_data_ws.cpp b/src/client/rpc/ld_rpc_data_ws.cpp index 907f95c76..f3a8f5a48 100644 --- a/src/client/rpc/ld_rpc_data_ws.cpp +++ b/src/client/rpc/ld_rpc_data_ws.cpp @@ -23,15 +23,18 @@ using namespace std; +namespace gkfs { +namespace rpc { + // TODO If we decide to keep this functionality with one segment, the function can be merged mostly. // Code is mostly redundant /** * Sends an RPC request to a specific node to pull all chunks that belong to him */ -ssize_t rpc_send::write(const string& path, const void* buf, const bool append_flag, - const off64_t in_offset, const size_t write_size, - const int64_t updated_metadentry_size) { +ssize_t forward_write(const string& path, const void* buf, const bool append_flag, + const off64_t in_offset, const size_t write_size, + const int64_t updated_metadentry_size) { assert(write_size > 0); @@ -191,7 +194,7 @@ ssize_t rpc_send::write(const string& path, const void* buf, const bool append_f /** * Sends an RPC request to a specific node to push all chunks that belong to him */ -ssize_t rpc_send::read(const string& path, void* buf, const off64_t offset, const size_t read_size) { +ssize_t forward_read(const string& path, void* buf, const off64_t offset, const size_t read_size) { // Calculate chunkid boundaries and numbers so that daemons know in which // interval to look for chunks @@ -345,7 +348,7 @@ ssize_t rpc_send::read(const string& path, void* buf, const off64_t offset, cons return error ? -1 : out_size; } -int rpc_send::trunc_data(const std::string& path, size_t current_size, size_t new_size) { +int forward_truncate(const std::string& path, size_t current_size, size_t new_size) { assert(current_size > new_size); bool error = false; @@ -411,7 +414,7 @@ int rpc_send::trunc_data(const std::string& path, size_t current_size, size_t ne return error ? -1 : 0; } -rpc_send::ChunkStat rpc_send::chunk_stat() { +ChunkStat forward_get_chunk_stat() { std::vector> handles; @@ -462,3 +465,6 @@ rpc_send::ChunkStat rpc_send::chunk_stat() { return {chunk_size, chunk_total, chunk_free}; } + +} // namespace rpc +} // namespace gkfs \ No newline at end of file diff --git a/src/client/rpc/ld_rpc_management.cpp b/src/client/rpc/ld_rpc_management.cpp index b7b8edd7a..73fadf230 100644 --- a/src/client/rpc/ld_rpc_management.cpp +++ b/src/client/rpc/ld_rpc_management.cpp @@ -18,11 +18,14 @@ #include +namespace gkfs { +namespace rpc { + /** * Gets fs configuration information from the running daemon and transfers it to the memory of the library * @return */ -bool rpc_send::get_fs_config() { +bool forward_get_fs_config() { auto endp = CTX->hosts().at(CTX->local_host_id()); gkfs::rpc::fs_config::output out; @@ -56,3 +59,6 @@ bool rpc_send::get_fs_config() { return true; } + +} // namespace rpc +} // namespace gkfs diff --git a/src/client/rpc/ld_rpc_metadentry.cpp b/src/client/rpc/ld_rpc_metadentry.cpp index e08be4ba9..5db66bc19 100644 --- a/src/client/rpc/ld_rpc_metadentry.cpp +++ b/src/client/rpc/ld_rpc_metadentry.cpp @@ -24,7 +24,10 @@ using namespace std; -int rpc_send::mk_node(const std::string& path, const mode_t mode) { +namespace gkfs { +namespace rpc { + +int forward_create(const std::string& path, const mode_t mode) { int err = EUNKNOWN; auto endp = CTX->hosts().at(CTX->distributor()->locate_file_metadata(path)); @@ -54,7 +57,7 @@ int rpc_send::mk_node(const std::string& path, const mode_t mode) { return err; } -int rpc_send::stat(const std::string& path, string& attr) { +int forward_stat(const std::string& path, string& attr) { auto endp = CTX->hosts().at(CTX->distributor()->locate_file_metadata(path)); @@ -85,37 +88,7 @@ int rpc_send::stat(const std::string& path, string& attr) { return 0; } -int rpc_send::decr_size(const std::string& path, size_t length) { - - auto endp = CTX->hosts().at(CTX->distributor()->locate_file_metadata(path)); - - try { - - LOG(DEBUG, "Sending RPC ..."); - // TODO(amiranda): add a post() with RPC_TIMEOUT to hermes so that we can - // retry for RPC_TRIES (see old commits with margo) - // TODO(amiranda): hermes will eventually provide a post(endpoint) - // returning one result and a broadcast(endpoint_set) returning a - // result_set. When that happens we can remove the .at(0) :/ - auto out = ld_network_service->post(endp, path, length).get().at(0); - - LOG(DEBUG, "Got response success: {}", out.err()); - - if (out.err() != 0) { - errno = out.err(); - return -1; - } - - return 0; - - } catch (const std::exception& ex) { - LOG(ERROR, "while getting rpc output"); - errno = EBUSY; - return -1; - } -} - -int rpc_send::rm_node(const std::string& path, const bool remove_metadentry_only, const ssize_t size) { +int forward_remove(const std::string& path, const bool remove_metadentry_only, const ssize_t size) { // if only the metadentry should be removed, send one rpc to the // metadentry's responsible node to remove the metadata @@ -232,8 +205,37 @@ int rpc_send::rm_node(const std::string& path, const bool remove_metadentry_only return got_error ? -1 : 0; } +int forward_decr_size(const std::string& path, size_t length) { -int rpc_send::update_metadentry(const string& path, const Metadata& md, const MetadentryUpdateFlags& md_flags) { + auto endp = CTX->hosts().at(CTX->distributor()->locate_file_metadata(path)); + + try { + + LOG(DEBUG, "Sending RPC ..."); + // TODO(amiranda): add a post() with RPC_TIMEOUT to hermes so that we can + // retry for RPC_TRIES (see old commits with margo) + // TODO(amiranda): hermes will eventually provide a post(endpoint) + // returning one result and a broadcast(endpoint_set) returning a + // result_set. When that happens we can remove the .at(0) :/ + auto out = ld_network_service->post(endp, path, length).get().at(0); + + LOG(DEBUG, "Got response success: {}", out.err()); + + if (out.err() != 0) { + errno = out.err(); + return -1; + } + + return 0; + + } catch (const std::exception& ex) { + LOG(ERROR, "while getting rpc output"); + errno = EBUSY; + return -1; + } +} + +int forward_update_metadentry(const string& path, const Metadata& md, const MetadentryUpdateFlags& md_flags) { auto endp = CTX->hosts().at(CTX->distributor()->locate_file_metadata(path)); @@ -282,8 +284,8 @@ int rpc_send::update_metadentry(const string& path, const Metadata& md, const Me } int -rpc_send::update_metadentry_size(const string& path, const size_t size, const off64_t offset, const bool append_flag, - off64_t& ret_size) { +forward_update_metadentry_size(const string& path, const size_t size, const off64_t offset, const bool append_flag, + off64_t& ret_size) { auto endp = CTX->hosts().at(CTX->distributor()->locate_file_metadata(path)); @@ -319,7 +321,7 @@ rpc_send::update_metadentry_size(const string& path, const size_t size, const of } } -int rpc_send::get_metadentry_size(const std::string& path, off64_t& ret_size) { +int forward_get_metadentry_size(const std::string& path, off64_t& ret_size) { auto endp = CTX->hosts().at(CTX->distributor()->locate_file_metadata(path)); @@ -349,7 +351,7 @@ int rpc_send::get_metadentry_size(const std::string& path, off64_t& ret_size) { /** * Sends an RPC request to a specific node to push all chunks that belong to him */ -void rpc_send::get_dirents(OpenDir& open_dir) { +void forward_get_dirents(OpenDir& open_dir) { auto const root_dir = open_dir.path(); auto const targets = CTX->distributor()->locate_directory_metadata(root_dir); @@ -458,7 +460,7 @@ void rpc_send::get_dirents(OpenDir& open_dir) { #ifdef HAS_SYMLINKS -int rpc_send::mk_symlink(const std::string& path, const std::string& target_path) { +int forward_mk_symlink(const std::string& path, const std::string& target_path) { auto endp = CTX->hosts().at(CTX->distributor()->locate_file_metadata(path)); @@ -489,3 +491,6 @@ int rpc_send::mk_symlink(const std::string& path, const std::string& target_path } #endif + +} // namespace rpc +} // namespace gkfs \ No newline at end of file -- GitLab From 9aae92e15080bd975dae93ecdb737dbff4d4d85a Mon Sep 17 00:00:00 2001 From: Marc Vef Date: Wed, 19 Feb 2020 17:16:43 +0100 Subject: [PATCH 12/25] Renaming rpc files and handler functions --- .../{ld_rpc_data_ws.hpp => forward_data.hpp} | 6 +++--- ..._management.hpp => forward_management.hpp} | 6 +++--- ...pc_metadentry.hpp => forward_metadata.hpp} | 6 +++--- .../client/rpc/{hg_rpcs.hpp => rpc_types.hpp} | 6 +++--- include/daemon/handler/rpc_defs.hpp | 20 +++++++++---------- src/client/CMakeLists.txt | 16 +++++++-------- src/client/gkfs_functions.cpp | 4 ++-- src/client/preload.cpp | 2 +- .../{ld_rpc_data_ws.cpp => forward_data.cpp} | 4 ++-- ..._management.cpp => forward_management.cpp} | 4 ++-- ...pc_metadentry.cpp => forward_metadata.cpp} | 4 ++-- src/client/rpc/{hg_rpcs.cpp => rpc_types.cpp} | 2 +- src/daemon/CMakeLists.txt | 6 +++--- .../handler/{h_data.cpp => srv_data.cpp} | 16 +++++++-------- .../{h_preload.cpp => srv_management.cpp} | 4 ++-- .../{h_metadentry.cpp => srv_metadata.cpp} | 8 ++++---- src/daemon/main.cpp | 14 ++++++------- 17 files changed, 64 insertions(+), 64 deletions(-) rename include/client/rpc/{ld_rpc_data_ws.hpp => forward_data.hpp} (89%) rename include/client/rpc/{ld_rpc_management.hpp => forward_management.hpp} (80%) rename include/client/rpc/{ld_rpc_metadentry.hpp => forward_metadata.hpp} (91%) rename include/client/rpc/{hg_rpcs.hpp => rpc_types.hpp} (99%) rename src/client/rpc/{ld_rpc_data_ws.cpp => forward_data.cpp} (99%) rename src/client/rpc/{ld_rpc_management.cpp => forward_management.cpp} (96%) rename src/client/rpc/{ld_rpc_metadentry.cpp => forward_metadata.cpp} (99%) rename src/client/rpc/{hg_rpcs.cpp => rpc_types.cpp} (97%) rename src/daemon/handler/{h_data.cpp => srv_data.cpp} (98%) rename src/daemon/handler/{h_preload.cpp => srv_management.cpp} (93%) rename src/daemon/handler/{h_metadentry.cpp => srv_metadata.cpp} (98%) diff --git a/include/client/rpc/ld_rpc_data_ws.hpp b/include/client/rpc/forward_data.hpp similarity index 89% rename from include/client/rpc/ld_rpc_data_ws.hpp rename to include/client/rpc/forward_data.hpp index 51a7ec6cf..a82e73cf9 100644 --- a/include/client/rpc/ld_rpc_data_ws.hpp +++ b/include/client/rpc/forward_data.hpp @@ -12,8 +12,8 @@ */ -#ifndef GEKKOFS_PRELOAD_C_DATA_WS_HPP -#define GEKKOFS_PRELOAD_C_DATA_WS_HPP +#ifndef GEKKOFS_CLIENT_FORWARD_DATA_HPP +#define GEKKOFS_CLIENT_FORWARD_DATA_HPP namespace gkfs { namespace rpc { @@ -36,4 +36,4 @@ ChunkStat forward_get_chunk_stat(); } // namespace rpc } // namespace gkfs -#endif //GEKKOFS_PRELOAD_C_DATA_WS_HPP +#endif //GEKKOFS_CLIENT_FORWARD_DATA_HPP diff --git a/include/client/rpc/ld_rpc_management.hpp b/include/client/rpc/forward_management.hpp similarity index 80% rename from include/client/rpc/ld_rpc_management.hpp rename to include/client/rpc/forward_management.hpp index 592ff5db9..a25bed442 100644 --- a/include/client/rpc/ld_rpc_management.hpp +++ b/include/client/rpc/forward_management.hpp @@ -12,8 +12,8 @@ */ -#ifndef GEKKOFS_MARGO_RPC_MANAGMENT_HPP -#define GEKKOFS_MARGO_RPC_MANAGMENT_HPP +#ifndef GEKKOFS_CLIENT_FORWARD_MNGMNT_HPP +#define GEKKOFS_CLIENT_FORWARD_MNGMNT_HPP namespace gkfs { namespace rpc { @@ -23,4 +23,4 @@ bool forward_get_fs_config(); } // namespace rpc } // namespace gkfs -#endif //GEKKOFS_MARGO_RPC_NANAGMENT_HPP +#endif //GEKKOFS_CLIENT_FORWARD_MNGMNT_HPP diff --git a/include/client/rpc/ld_rpc_metadentry.hpp b/include/client/rpc/forward_metadata.hpp similarity index 91% rename from include/client/rpc/ld_rpc_metadentry.hpp rename to include/client/rpc/forward_metadata.hpp index 09f64ed80..75bbd7d86 100644 --- a/include/client/rpc/ld_rpc_metadentry.hpp +++ b/include/client/rpc/forward_metadata.hpp @@ -12,8 +12,8 @@ */ -#ifndef GEKKOFS_PRELOAD_C_METADENTRY_HPP -#define GEKKOFS_PRELOAD_C_METADENTRY_HPP +#ifndef GEKKOFS_CLIENT_FORWARD_METADATA_HPP +#define GEKKOFS_CLIENT_FORWARD_METADATA_HPP #include @@ -54,4 +54,4 @@ int forward_mk_symlink(const std::string& path, const std::string& target_path); } // namespace rpc } // namespace gkfs -#endif //GEKKOFS_PRELOAD_C_METADENTRY_HPP +#endif //GEKKOFS_CLIENT_FORWARD_METADATA_HPP diff --git a/include/client/rpc/hg_rpcs.hpp b/include/client/rpc/rpc_types.hpp similarity index 99% rename from include/client/rpc/hg_rpcs.hpp rename to include/client/rpc/rpc_types.hpp index 499d209cd..e19b1cc3a 100644 --- a/include/client/rpc/hg_rpcs.hpp +++ b/include/client/rpc/rpc_types.hpp @@ -11,8 +11,8 @@ SPDX-License-Identifier: MIT */ -#ifndef GKFS_RPCS_HPP -#define GKFS_RPCS_HPP +#ifndef GKFS_RPCS_TYPES_HPP +#define GKFS_RPCS_TYPES_HPP // C includes #include @@ -2142,4 +2142,4 @@ struct chunk_stat { } // namespace gkfs -#endif // GKFS_RPCS_HPP +#endif // GKFS_RPCS_TYPES_HPP diff --git a/include/daemon/handler/rpc_defs.hpp b/include/daemon/handler/rpc_defs.hpp index 92acb6b78..e230969eb 100644 --- a/include/daemon/handler/rpc_defs.hpp +++ b/include/daemon/handler/rpc_defs.hpp @@ -12,8 +12,8 @@ */ -#ifndef LFS_RPC_DEFS_HPP -#define LFS_RPC_DEFS_HPP +#ifndef GKFS_DAEMON_RPC_DEFS_HPP +#define GKFS_DAEMON_RPC_DEFS_HPP extern "C" { #include @@ -21,15 +21,15 @@ extern "C" { /* visible API for RPC operations */ -DECLARE_MARGO_RPC_HANDLER(rpc_srv_fs_config) +DECLARE_MARGO_RPC_HANDLER(rpc_srv_get_fs_config) -DECLARE_MARGO_RPC_HANDLER(rpc_srv_mk_node) +DECLARE_MARGO_RPC_HANDLER(rpc_srv_create) DECLARE_MARGO_RPC_HANDLER(rpc_srv_stat) DECLARE_MARGO_RPC_HANDLER(rpc_srv_decr_size) -DECLARE_MARGO_RPC_HANDLER(rpc_srv_rm_node) +DECLARE_MARGO_RPC_HANDLER(rpc_srv_remove) DECLARE_MARGO_RPC_HANDLER(rpc_srv_update_metadentry) @@ -47,12 +47,12 @@ DECLARE_MARGO_RPC_HANDLER(rpc_srv_mk_symlink) // data -DECLARE_MARGO_RPC_HANDLER(rpc_srv_read_data) +DECLARE_MARGO_RPC_HANDLER(rpc_srv_read) -DECLARE_MARGO_RPC_HANDLER(rpc_srv_write_data) +DECLARE_MARGO_RPC_HANDLER(rpc_srv_write) -DECLARE_MARGO_RPC_HANDLER(rpc_srv_trunc_data) +DECLARE_MARGO_RPC_HANDLER(rpc_srv_truncate) -DECLARE_MARGO_RPC_HANDLER(rpc_srv_chunk_stat) +DECLARE_MARGO_RPC_HANDLER(rpc_srv_get_chunk_stat) -#endif //LFS_RPC_DEFS_HPP +#endif //GKFS_DAEMON_RPC_DEFS_HPP diff --git a/src/client/CMakeLists.txt b/src/client/CMakeLists.txt index 035317f6c..12498247b 100644 --- a/src/client/CMakeLists.txt +++ b/src/client/CMakeLists.txt @@ -11,10 +11,10 @@ set(PRELOAD_SRC preload_util.cpp ../global/path_util.cpp ../global/rpc/rpc_utils.cpp - rpc/hg_rpcs.cpp - rpc/ld_rpc_data_ws.cpp - rpc/ld_rpc_management.cpp - rpc/ld_rpc_metadentry.cpp + rpc/rpc_types.cpp + rpc/forward_data.cpp + rpc/forward_management.cpp + rpc/forward_metadata.cpp syscalls/detail/syscall_info.c ) set(PRELOAD_HEADERS @@ -31,10 +31,10 @@ set(PRELOAD_HEADERS ../../include/client/preload.hpp ../../include/client/preload_context.hpp ../../include/client/preload_util.hpp - ../../include/client/rpc/hg_rpcs.hpp - ../../include/client/rpc/ld_rpc_management.hpp - ../../include/client/rpc/ld_rpc_metadentry.hpp - ../../include/client/rpc/ld_rpc_data_ws.hpp + ../../include/client/rpc/rpc_types.hpp + ../../include/client/rpc/forward_management.hpp + ../../include/client/rpc/forward_metadata.hpp + ../../include/client/rpc/forward_data.hpp ../../include/client/syscalls/args.hpp ../../include/client/syscalls/decoder.hpp ../../include/client/syscalls/errno.hpp diff --git a/src/client/gkfs_functions.cpp b/src/client/gkfs_functions.cpp index edff6f7e2..0fd1d66b0 100644 --- a/src/client/gkfs_functions.cpp +++ b/src/client/gkfs_functions.cpp @@ -17,8 +17,8 @@ #include #include #include -#include -#include +#include +#include #include #include diff --git a/src/client/preload.cpp b/src/client/preload.cpp index 94b7b43f0..3b392ffee 100644 --- a/src/client/preload.cpp +++ b/src/client/preload.cpp @@ -14,7 +14,7 @@ #include #include #include -#include +#include #include #include diff --git a/src/client/rpc/ld_rpc_data_ws.cpp b/src/client/rpc/forward_data.cpp similarity index 99% rename from src/client/rpc/ld_rpc_data_ws.cpp rename to src/client/rpc/forward_data.cpp index f3a8f5a48..dbe80cc30 100644 --- a/src/client/rpc/ld_rpc_data_ws.cpp +++ b/src/client/rpc/forward_data.cpp @@ -12,8 +12,8 @@ */ #include -#include -#include +#include +#include #include #include diff --git a/src/client/rpc/ld_rpc_management.cpp b/src/client/rpc/forward_management.cpp similarity index 96% rename from src/client/rpc/ld_rpc_management.cpp rename to src/client/rpc/forward_management.cpp index 73fadf230..1f19fb449 100644 --- a/src/client/rpc/ld_rpc_management.cpp +++ b/src/client/rpc/forward_management.cpp @@ -11,10 +11,10 @@ SPDX-License-Identifier: MIT */ -#include +#include #include #include -#include +#include #include diff --git a/src/client/rpc/ld_rpc_metadentry.cpp b/src/client/rpc/forward_metadata.cpp similarity index 99% rename from src/client/rpc/ld_rpc_metadentry.cpp rename to src/client/rpc/forward_metadata.cpp index 5db66bc19..62f0019ef 100644 --- a/src/client/rpc/ld_rpc_metadentry.cpp +++ b/src/client/rpc/forward_metadata.cpp @@ -11,12 +11,12 @@ SPDX-License-Identifier: MIT */ -#include +#include #include #include #include #include -#include +#include #include #include diff --git a/src/client/rpc/hg_rpcs.cpp b/src/client/rpc/rpc_types.cpp similarity index 97% rename from src/client/rpc/hg_rpcs.cpp rename to src/client/rpc/rpc_types.cpp index b9a05bae2..7aaffaae1 100644 --- a/src/client/rpc/hg_rpcs.cpp +++ b/src/client/rpc/rpc_types.cpp @@ -12,7 +12,7 @@ */ #include -#include +#include //============================================================================== // register request types so that they can be used by users and the engine diff --git a/src/daemon/CMakeLists.txt b/src/daemon/CMakeLists.txt index da70e0ec2..a16cc2707 100644 --- a/src/daemon/CMakeLists.txt +++ b/src/daemon/CMakeLists.txt @@ -9,9 +9,9 @@ set(DAEMON_SRC ops/metadentry.cpp classes/fs_data.cpp classes/rpc_data.cpp - handler/h_metadentry.cpp - handler/h_data.cpp - handler/h_preload.cpp + handler/srv_metadata.cpp + handler/srv_data.cpp + handler/srv_management.cpp ) set(DAEMON_HEADERS ../../include/config.hpp diff --git a/src/daemon/handler/h_data.cpp b/src/daemon/handler/srv_data.cpp similarity index 98% rename from src/daemon/handler/h_data.cpp rename to src/daemon/handler/srv_data.cpp index 539ae4d0f..fce4a8840 100644 --- a/src/daemon/handler/h_data.cpp +++ b/src/daemon/handler/srv_data.cpp @@ -121,7 +121,7 @@ void cancel_abt_io(vector* abt_tasks, vector* abt_eventu } -static hg_return_t rpc_srv_write_data(hg_handle_t handle) { +static hg_return_t rpc_srv_write(hg_handle_t handle) { /* * 1. Setup */ @@ -324,9 +324,9 @@ static hg_return_t rpc_srv_write_data(hg_handle_t handle) { return ret; } -DEFINE_MARGO_RPC_HANDLER(rpc_srv_write_data) +DEFINE_MARGO_RPC_HANDLER(rpc_srv_write) -static hg_return_t rpc_srv_read_data(hg_handle_t handle) { +static hg_return_t rpc_srv_read(hg_handle_t handle) { /* * 1. Setup */ @@ -511,9 +511,9 @@ static hg_return_t rpc_srv_read_data(hg_handle_t handle) { return ret; } -DEFINE_MARGO_RPC_HANDLER(rpc_srv_read_data) +DEFINE_MARGO_RPC_HANDLER(rpc_srv_read) -static hg_return_t rpc_srv_trunc_data(hg_handle_t handle) { +static hg_return_t rpc_srv_truncate(hg_handle_t handle) { rpc_trunc_in_t in{}; rpc_err_out_t out{}; @@ -546,9 +546,9 @@ static hg_return_t rpc_srv_trunc_data(hg_handle_t handle) { return HG_SUCCESS; } -DEFINE_MARGO_RPC_HANDLER(rpc_srv_trunc_data) +DEFINE_MARGO_RPC_HANDLER(rpc_srv_truncate) -static hg_return_t rpc_srv_chunk_stat(hg_handle_t handle) { +static hg_return_t rpc_srv_get_chunk_stat(hg_handle_t handle) { GKFS_DATA->spdlogger()->trace("{}() called", __func__); rpc_chunk_stat_out_t out{}; @@ -568,6 +568,6 @@ static hg_return_t rpc_srv_chunk_stat(hg_handle_t handle) { return hret; } -DEFINE_MARGO_RPC_HANDLER(rpc_srv_chunk_stat) +DEFINE_MARGO_RPC_HANDLER(rpc_srv_get_chunk_stat) diff --git a/src/daemon/handler/h_preload.cpp b/src/daemon/handler/srv_management.cpp similarity index 93% rename from src/daemon/handler/h_preload.cpp rename to src/daemon/handler/srv_management.cpp index d807410d8..0d9fbe7b5 100644 --- a/src/daemon/handler/h_preload.cpp +++ b/src/daemon/handler/srv_management.cpp @@ -20,7 +20,7 @@ using namespace std; -static hg_return_t rpc_srv_fs_config(hg_handle_t handle) { +static hg_return_t rpc_srv_get_fs_config(hg_handle_t handle) { rpc_config_out_t out{}; GKFS_DATA->spdlogger()->debug("{}() Got config RPC", __func__); @@ -47,4 +47,4 @@ static hg_return_t rpc_srv_fs_config(hg_handle_t handle) { return HG_SUCCESS; } -DEFINE_MARGO_RPC_HANDLER(rpc_srv_fs_config) \ No newline at end of file +DEFINE_MARGO_RPC_HANDLER(rpc_srv_get_fs_config) \ No newline at end of file diff --git a/src/daemon/handler/h_metadentry.cpp b/src/daemon/handler/srv_metadata.cpp similarity index 98% rename from src/daemon/handler/h_metadentry.cpp rename to src/daemon/handler/srv_metadata.cpp index c2ee6c346..cafde927c 100644 --- a/src/daemon/handler/h_metadentry.cpp +++ b/src/daemon/handler/srv_metadata.cpp @@ -21,7 +21,7 @@ using namespace std; -static hg_return_t rpc_srv_mk_node(hg_handle_t handle) { +static hg_return_t rpc_srv_create(hg_handle_t handle) { rpc_mk_node_in_t in; rpc_err_out_t out; @@ -51,7 +51,7 @@ static hg_return_t rpc_srv_mk_node(hg_handle_t handle) { return HG_SUCCESS; } -DEFINE_MARGO_RPC_HANDLER(rpc_srv_mk_node) +DEFINE_MARGO_RPC_HANDLER(rpc_srv_create) static hg_return_t rpc_srv_stat(hg_handle_t handle) { rpc_path_only_in_t in{}; @@ -124,7 +124,7 @@ static hg_return_t rpc_srv_decr_size(hg_handle_t handle) { DEFINE_MARGO_RPC_HANDLER(rpc_srv_decr_size) -static hg_return_t rpc_srv_rm_node(hg_handle_t handle) { +static hg_return_t rpc_srv_remove(hg_handle_t handle) { rpc_rm_node_in_t in{}; rpc_err_out_t out{}; @@ -164,7 +164,7 @@ static hg_return_t rpc_srv_rm_node(hg_handle_t handle) { return HG_SUCCESS; } -DEFINE_MARGO_RPC_HANDLER(rpc_srv_rm_node) +DEFINE_MARGO_RPC_HANDLER(rpc_srv_remove) static hg_return_t rpc_srv_update_metadentry(hg_handle_t handle) { diff --git a/src/daemon/main.cpp b/src/daemon/main.cpp index 12cabe633..048f3b8a4 100644 --- a/src/daemon/main.cpp +++ b/src/daemon/main.cpp @@ -213,11 +213,11 @@ void init_rpc_server(const string & protocol_port) { * @param hg_class */ void register_server_rpcs(margo_instance_id mid) { - MARGO_REGISTER(mid, gkfs::hg_tag::fs_config, void, rpc_config_out_t, rpc_srv_fs_config); - MARGO_REGISTER(mid, gkfs::hg_tag::create, rpc_mk_node_in_t, rpc_err_out_t, rpc_srv_mk_node); + MARGO_REGISTER(mid, gkfs::hg_tag::fs_config, void, rpc_config_out_t, rpc_srv_get_fs_config); + MARGO_REGISTER(mid, gkfs::hg_tag::create, rpc_mk_node_in_t, rpc_err_out_t, rpc_srv_create); MARGO_REGISTER(mid, gkfs::hg_tag::stat, rpc_path_only_in_t, rpc_stat_out_t, rpc_srv_stat); MARGO_REGISTER(mid, gkfs::hg_tag::decr_size, rpc_trunc_in_t, rpc_err_out_t, rpc_srv_decr_size); - MARGO_REGISTER(mid, gkfs::hg_tag::remove, rpc_rm_node_in_t, rpc_err_out_t, rpc_srv_rm_node); + MARGO_REGISTER(mid, gkfs::hg_tag::remove, rpc_rm_node_in_t, rpc_err_out_t, rpc_srv_remove); MARGO_REGISTER(mid, gkfs::hg_tag::update_metadentry, rpc_update_metadentry_in_t, rpc_err_out_t, rpc_srv_update_metadentry); MARGO_REGISTER(mid, gkfs::hg_tag::get_metadentry_size, rpc_path_only_in_t, rpc_get_metadentry_size_out_t, @@ -229,10 +229,10 @@ void register_server_rpcs(margo_instance_id mid) { #ifdef HAS_SYMLINKS MARGO_REGISTER(mid, gkfs::hg_tag::mk_symlink, rpc_mk_symlink_in_t, rpc_err_out_t, rpc_srv_mk_symlink); #endif - MARGO_REGISTER(mid, gkfs::hg_tag::write_data, rpc_write_data_in_t, rpc_data_out_t, rpc_srv_write_data); - MARGO_REGISTER(mid, gkfs::hg_tag::read_data, rpc_read_data_in_t, rpc_data_out_t, rpc_srv_read_data); - MARGO_REGISTER(mid, gkfs::hg_tag::trunc_data, rpc_trunc_in_t, rpc_err_out_t, rpc_srv_trunc_data); - MARGO_REGISTER(mid, gkfs::hg_tag::chunk_stat, rpc_chunk_stat_in_t, rpc_chunk_stat_out_t, rpc_srv_chunk_stat); + MARGO_REGISTER(mid, gkfs::hg_tag::write_data, rpc_write_data_in_t, rpc_data_out_t, rpc_srv_write); + MARGO_REGISTER(mid, gkfs::hg_tag::read_data, rpc_read_data_in_t, rpc_data_out_t, rpc_srv_read); + MARGO_REGISTER(mid, gkfs::hg_tag::trunc_data, rpc_trunc_in_t, rpc_err_out_t, rpc_srv_truncate); + MARGO_REGISTER(mid, gkfs::hg_tag::chunk_stat, rpc_chunk_stat_in_t, rpc_chunk_stat_out_t, rpc_srv_get_chunk_stat); } void shutdown_handler(int dummy) { -- GitLab From a2a16583f0c4c28a771a7d67524fd8bed2464c3c Mon Sep 17 00:00:00 2001 From: Marc Vef Date: Wed, 19 Feb 2020 17:28:22 +0100 Subject: [PATCH 13/25] Rename main.cpp/hpp to daemon.cpp/hpp --- include/daemon/classes/fs_data.hpp | 2 +- include/daemon/classes/rpc_data.hpp | 2 +- include/daemon/{main.hpp => daemon.hpp} | 6 +++--- include/daemon/ops/metadentry.hpp | 2 +- src/daemon/CMakeLists.txt | 4 ++-- src/daemon/{main.cpp => daemon.cpp} | 4 ++-- src/daemon/handler/srv_data.cpp | 2 +- src/daemon/handler/srv_management.cpp | 2 +- src/daemon/util.cpp | 2 +- 9 files changed, 13 insertions(+), 13 deletions(-) rename include/daemon/{main.hpp => daemon.hpp} (91%) rename src/daemon/{main.cpp => daemon.cpp} (99%) diff --git a/include/daemon/classes/fs_data.hpp b/include/daemon/classes/fs_data.hpp index 3759fc8eb..078246161 100644 --- a/include/daemon/classes/fs_data.hpp +++ b/include/daemon/classes/fs_data.hpp @@ -15,7 +15,7 @@ #ifndef LFS_FS_DATA_H #define LFS_FS_DATA_H -#include +#include /* Forward declarations */ class MetadataDB; diff --git a/include/daemon/classes/rpc_data.hpp b/include/daemon/classes/rpc_data.hpp index 1ce278a83..ce1917794 100644 --- a/include/daemon/classes/rpc_data.hpp +++ b/include/daemon/classes/rpc_data.hpp @@ -15,7 +15,7 @@ #ifndef LFS_RPC_DATA_HPP #define LFS_RPC_DATA_HPP -#include +#include class RPCData { diff --git a/include/daemon/main.hpp b/include/daemon/daemon.hpp similarity index 91% rename from include/daemon/main.hpp rename to include/daemon/daemon.hpp index ee455cbe7..8fc4cfb9d 100644 --- a/include/daemon/main.hpp +++ b/include/daemon/daemon.hpp @@ -11,8 +11,8 @@ SPDX-License-Identifier: MIT */ -#ifndef GKFS_DAEMON_MAIN_HPP -#define GKFS_DAEMON_MAIN_HPP +#ifndef GKFS_DAEMON_DAEMON_HPP +#define GKFS_DAEMON_DAEMON_HPP // std libs #include @@ -43,4 +43,4 @@ void init_rpc_server(const std::string& protocol); void register_server_rpcs(margo_instance_id mid); -#endif // GKFS_DAEMON_MAIN_HPP +#endif // GKFS_DAEMON_DAEMON_HPP diff --git a/include/daemon/ops/metadentry.hpp b/include/daemon/ops/metadentry.hpp index eea41d52f..0900d07ac 100644 --- a/include/daemon/ops/metadentry.hpp +++ b/include/daemon/ops/metadentry.hpp @@ -15,7 +15,7 @@ #ifndef GEKKOFS_METADENTRY_HPP #define GEKKOFS_METADENTRY_HPP -#include +#include #include namespace gkfs { diff --git a/src/daemon/CMakeLists.txt b/src/daemon/CMakeLists.txt index a16cc2707..72d0a7b0e 100644 --- a/src/daemon/CMakeLists.txt +++ b/src/daemon/CMakeLists.txt @@ -4,7 +4,7 @@ add_subdirectory(backend/data) set(DAEMON_SRC ../global/rpc/rpc_utils.cpp ../global/path_util.cpp - main.cpp + daemon.cpp util.cpp ops/metadentry.cpp classes/fs_data.cpp @@ -21,7 +21,7 @@ set(DAEMON_HEADERS ../../include/global/rpc/rpc_types.hpp ../../include/global/rpc/rpc_utils.hpp ../../include/global/path_util.hpp - ../../include/daemon/main.hpp + ../../include/daemon/daemon.hpp ../../include/daemon/util.hpp ../../include/daemon/ops/metadentry.hpp ../../include/daemon/classes/fs_data.hpp diff --git a/src/daemon/main.cpp b/src/daemon/daemon.cpp similarity index 99% rename from src/daemon/main.cpp rename to src/daemon/daemon.cpp index 048f3b8a4..923b87494 100644 --- a/src/daemon/main.cpp +++ b/src/daemon/daemon.cpp @@ -12,8 +12,8 @@ */ -#include -#include "version.hpp" +#include +#include #include #include #include diff --git a/src/daemon/handler/srv_data.cpp b/src/daemon/handler/srv_data.cpp index fce4a8840..afaa0fd70 100644 --- a/src/daemon/handler/srv_data.cpp +++ b/src/daemon/handler/srv_data.cpp @@ -12,7 +12,7 @@ */ -#include +#include #include #include diff --git a/src/daemon/handler/srv_management.cpp b/src/daemon/handler/srv_management.cpp index 0d9fbe7b5..79d0cf5ee 100644 --- a/src/daemon/handler/srv_management.cpp +++ b/src/daemon/handler/srv_management.cpp @@ -12,7 +12,7 @@ */ -#include +#include #include #include diff --git a/src/daemon/util.cpp b/src/daemon/util.cpp index 7c6d6bcc8..636005975 100644 --- a/src/daemon/util.cpp +++ b/src/daemon/util.cpp @@ -11,7 +11,7 @@ SPDX-License-Identifier: MIT */ #include -#include +#include #include -- GitLab From 0179b2a12482dbdaea8e93940ff51c16e13edbea Mon Sep 17 00:00:00 2001 From: Marc Vef Date: Wed, 19 Feb 2020 17:44:06 +0100 Subject: [PATCH 14/25] Namespace renaming and consistency for rpc tags gkfs::hg_tag -> gkfs::rpc::tag gkfs::utils -> gkfs::util gkfs::client -> gkfs::util gkfs::logging -> gkfs::log --- include/client/logging.hpp | 4 +-- include/client/make_array.hpp | 4 +-- include/client/preload_util.hpp | 2 +- include/client/rpc/rpc_types.hpp | 28 +++++++-------- include/client/syscalls/args.hpp | 18 +++++----- include/config.hpp | 2 +- include/global/global_defs.hpp | 61 ++++++++++++++++---------------- include/global/log_util.hpp | 2 +- src/client/gkfs_functions.cpp | 2 +- src/client/logging.cpp | 2 +- src/client/open_file_map.cpp | 16 ++++----- src/client/preload.cpp | 2 +- src/client/preload_context.cpp | 4 +-- src/client/preload_util.cpp | 8 ++--- src/daemon/daemon.cpp | 37 +++++++++---------- src/global/log_util.cpp | 8 ++--- 16 files changed, 101 insertions(+), 99 deletions(-) diff --git a/include/client/logging.hpp b/include/client/logging.hpp index d041db52c..06e7587e0 100644 --- a/include/client/logging.hpp +++ b/include/client/logging.hpp @@ -110,8 +110,8 @@ static const auto constexpr most = log_level::print_most; static const auto constexpr all = log_level::print_all; static const auto constexpr help = log_level::print_help; -static const auto constexpr level_names = - utils::make_array( +static const auto constexpr level_names = + util::make_array( "syscall", "syscall", // sycall_entry uses the same name as syscall "info", diff --git a/include/client/make_array.hpp b/include/client/make_array.hpp index 78868b64a..5a971f35b 100644 --- a/include/client/make_array.hpp +++ b/include/client/make_array.hpp @@ -15,7 +15,7 @@ #define LIBGKFS_UTILS_MAKE_ARRAY_HPP namespace gkfs { -namespace utils { +namespace util { template constexpr auto make_array(T&&... values) -> @@ -29,7 +29,7 @@ constexpr auto make_array(T&&... values) -> sizeof...(T)>{std::forward(values)...}; } -} // namespace utils +} // namespace util } // namespace gkfs #endif // LIBGKFS_UTILS_MAKE_ARRAY_HPP diff --git a/include/client/preload_util.hpp b/include/client/preload_util.hpp index 9771f4ac3..ebf25c44f 100644 --- a/include/client/preload_util.hpp +++ b/include/client/preload_util.hpp @@ -42,7 +42,7 @@ extern std::unique_ptr ld_network_service; // function definitions namespace gkfs { - namespace client { +namespace util { template constexpr typename std::underlying_type::type to_underlying(E e) { return static_cast::type>(e); diff --git a/include/client/rpc/rpc_types.hpp b/include/client/rpc/rpc_types.hpp index e19b1cc3a..95babcb0c 100644 --- a/include/client/rpc/rpc_types.hpp +++ b/include/client/rpc/rpc_types.hpp @@ -80,7 +80,7 @@ struct fs_config { constexpr static const hg_id_t mercury_id = public_id; // RPC name - constexpr static const auto name = gkfs::hg_tag::fs_config; + constexpr static const auto name = gkfs::rpc::tag::fs_config; // requires response? constexpr static const auto requires_response = true; @@ -267,7 +267,7 @@ struct create { constexpr static const hg_id_t mercury_id = public_id; // RPC name - constexpr static const auto name = gkfs::hg_tag::create; + constexpr static const auto name = gkfs::rpc::tag::create; // requires response? constexpr static const auto requires_response = true; @@ -385,7 +385,7 @@ struct stat { constexpr static const hg_id_t mercury_id = public_id; // RPC name - constexpr static const auto name = gkfs::hg_tag::stat; + constexpr static const auto name = gkfs::rpc::tag::stat; // requires response? constexpr static const auto requires_response = true; @@ -506,7 +506,7 @@ struct remove { constexpr static const hg_id_t mercury_id = public_id; // RPC name - constexpr static const auto name = gkfs::hg_tag::remove; + constexpr static const auto name = gkfs::rpc::tag::remove; // requires response? constexpr static const auto requires_response = true; @@ -615,7 +615,7 @@ struct decr_size { constexpr static const hg_id_t mercury_id = public_id; // RPC name - constexpr static const auto name = gkfs::hg_tag::decr_size; + constexpr static const auto name = gkfs::rpc::tag::decr_size; // requires response? constexpr static const auto requires_response = true; @@ -732,7 +732,7 @@ struct update_metadentry { constexpr static const hg_id_t mercury_id = public_id; // RPC name - constexpr static const auto name = gkfs::hg_tag::update_metadentry; + constexpr static const auto name = gkfs::rpc::tag::update_metadentry; // requires response? constexpr static const auto requires_response = true; @@ -1001,7 +1001,7 @@ struct get_metadentry_size { constexpr static const hg_id_t mercury_id = public_id; // RPC name - constexpr static const auto name = gkfs::hg_tag::get_metadentry_size; + constexpr static const auto name = gkfs::rpc::tag::get_metadentry_size; // requires response? constexpr static const auto requires_response = true; @@ -1119,7 +1119,7 @@ struct update_metadentry_size { constexpr static const hg_id_t mercury_id = public_id; // RPC name - constexpr static const auto name = gkfs::hg_tag::update_metadentry_size; + constexpr static const auto name = gkfs::rpc::tag::update_metadentry_size; // requires response? constexpr static const auto requires_response = true; @@ -1266,7 +1266,7 @@ struct mk_symlink { constexpr static const hg_id_t mercury_id = public_id; // RPC name - constexpr static const auto name = gkfs::hg_tag::mk_symlink; + constexpr static const auto name = gkfs::rpc::tag::mk_symlink; // requires response? constexpr static const auto requires_response = true; @@ -1386,7 +1386,7 @@ struct write_data { constexpr static const hg_id_t mercury_id = public_id; // RPC name - constexpr static const auto name = gkfs::hg_tag::write_data; + constexpr static const auto name = gkfs::rpc::tag::write; // requires response? constexpr static const auto requires_response = true; @@ -1586,7 +1586,7 @@ struct read_data { constexpr static const hg_id_t mercury_id = public_id; // RPC name - constexpr static const auto name = gkfs::hg_tag::read_data; + constexpr static const auto name = gkfs::rpc::tag::read; // requires response? constexpr static const auto requires_response = true; @@ -1786,7 +1786,7 @@ struct trunc_data { constexpr static const hg_id_t mercury_id = public_id; // RPC name - constexpr static const auto name = gkfs::hg_tag::trunc_data; + constexpr static const auto name = gkfs::rpc::tag::truncate; // requires response? constexpr static const auto requires_response = true; @@ -1907,7 +1907,7 @@ struct get_dirents { constexpr static const hg_id_t mercury_id = public_id; // RPC name - constexpr static const auto name = gkfs::hg_tag::get_dirents; + constexpr static const auto name = gkfs::rpc::tag::get_dirents; // requires response? constexpr static const auto requires_response = true; @@ -2037,7 +2037,7 @@ struct chunk_stat { constexpr static const hg_id_t mercury_id = public_id; // RPC name - constexpr static const auto name = gkfs::hg_tag::chunk_stat; + constexpr static const auto name = gkfs::rpc::tag::get_chunk_stat; // requires response? constexpr static const auto requires_response = true; diff --git a/include/client/syscalls/args.hpp b/include/client/syscalls/args.hpp index e9a9e5add..2576cd0e4 100644 --- a/include/client/syscalls/args.hpp +++ b/include/client/syscalls/args.hpp @@ -292,8 +292,8 @@ format_whence_arg_to(FmtBuffer& buffer, const printable_arg& parg) { /* Names for lseek() whence arg */ - const auto flag_names = - utils::make_array( + const auto flag_names = + util::make_array( FLAG_ENTRY(SEEK_SET), FLAG_ENTRY(SEEK_CUR), FLAG_ENTRY(SEEK_END) @@ -317,7 +317,7 @@ format_mmap_prot_arg_to(FmtBuffer& buffer, /* Names for mmap() prot arg */ const auto flag_names = - utils::make_array( + util::make_array( FLAG_ENTRY(PROT_NONE), FLAG_ENTRY(PROT_READ), FLAG_ENTRY(PROT_WRITE), @@ -343,7 +343,7 @@ format_mmap_flags_arg_to(FmtBuffer& buffer, /* Names for mmap() flags arg */ const auto flag_names = - utils::make_array( + util::make_array( FLAG_ENTRY(MAP_SHARED), FLAG_ENTRY(MAP_PRIVATE), #ifdef MAP_SHARED_VALIDATE @@ -384,7 +384,7 @@ format_clone_flags_arg_to(FmtBuffer& buffer, /* Names for clone() flags arg */ const auto flag_names = - utils::make_array( + util::make_array( FLAG_ENTRY(CLONE_VM), FLAG_ENTRY(CLONE_FS), FLAG_ENTRY(CLONE_FILES), @@ -437,7 +437,7 @@ format_signum_arg_to(FmtBuffer& buffer, /* Names for signum args */ const auto flag_names = - utils::make_array( + util::make_array( FLAG_ENTRY(SIGHUP), FLAG_ENTRY(SIGINT), FLAG_ENTRY(SIGQUIT), @@ -492,7 +492,7 @@ format_sigproc_how_arg_to(FmtBuffer& buffer, /* Names for sigproc how args */ const auto flag_names = - utils::make_array( + util::make_array( FLAG_ENTRY(SIG_BLOCK), FLAG_ENTRY(SIG_UNBLOCK), FLAG_ENTRY(SIG_SETMASK)); @@ -583,13 +583,13 @@ format_open_flags_to(FmtBuffer& buffer, /* Names for O_ACCMODE args */ const auto flag_names = - utils::make_array( + util::make_array( FLAG_ENTRY(O_RDONLY), FLAG_ENTRY(O_WRONLY), FLAG_ENTRY(O_RDWR)); const auto extra_flag_names = - utils::make_array( + util::make_array( #ifdef O_EXEC FLAG_ENTRY(O_EXEC), #endif diff --git a/include/config.hpp b/include/config.hpp index a10b444ea..982eddcf4 100644 --- a/include/config.hpp +++ b/include/config.hpp @@ -33,7 +33,7 @@ namespace io { constexpr auto zero_buffer_before_read = false; } // namespace io -namespace logging { +namespace log { constexpr auto client_log_path = "/tmp/gkfs_client.log"; constexpr auto daemon_log_path = "/tmp/gkfs_daemon.log"; diff --git a/include/global/global_defs.hpp b/include/global/global_defs.hpp index 8c9e22a66..8bd958615 100644 --- a/include/global/global_defs.hpp +++ b/include/global/global_defs.hpp @@ -17,37 +17,38 @@ namespace gkfs { // These constexpr set the RPC's identity and which handler the receiver end should use - namespace hg_tag { - constexpr auto fs_config = "rpc_srv_fs_config"; - constexpr auto create = "rpc_srv_mk_node"; - constexpr auto stat = "rpc_srv_stat"; - constexpr auto remove = "rpc_srv_rm_node"; - constexpr auto decr_size = "rpc_srv_decr_size"; - constexpr auto update_metadentry = "rpc_srv_update_metadentry"; - constexpr auto get_metadentry_size = "rpc_srv_get_metadentry_size"; - constexpr auto update_metadentry_size = "rpc_srv_update_metadentry_size"; - constexpr auto get_dirents = "rpc_srv_get_dirents"; +namespace rpc { +namespace tag { + +constexpr auto fs_config = "rpc_srv_fs_config"; +constexpr auto create = "rpc_srv_mk_node"; +constexpr auto stat = "rpc_srv_stat"; +constexpr auto remove = "rpc_srv_rm_node"; +constexpr auto decr_size = "rpc_srv_decr_size"; +constexpr auto update_metadentry = "rpc_srv_update_metadentry"; +constexpr auto get_metadentry_size = "rpc_srv_get_metadentry_size"; +constexpr auto update_metadentry_size = "rpc_srv_update_metadentry_size"; +constexpr auto get_dirents = "rpc_srv_get_dirents"; #ifdef HAS_SYMLINKS - constexpr auto mk_symlink = "rpc_srv_mk_symlink"; +constexpr auto mk_symlink = "rpc_srv_mk_symlink"; #endif - constexpr auto write_data = "rpc_srv_write_data"; - constexpr auto read_data = "rpc_srv_read_data"; - constexpr auto trunc_data = "rpc_srv_trunc_data"; - constexpr auto chunk_stat = "rpc_srv_chunk_stat"; - } - - namespace rpc { - namespace protocol { - constexpr auto ofi_psm2 = "ofi+psm2"; - constexpr auto ofi_sockets = "ofi+sockets"; - constexpr auto ofi_tcp = "ofi+tcp"; - } - } - - namespace types { - // typedefs - typedef unsigned long rpc_chnk_id_t; - } -} +constexpr auto write = "rpc_srv_write_data"; +constexpr auto read = "rpc_srv_read_data"; +constexpr auto truncate = "rpc_srv_trunc_data"; +constexpr auto get_chunk_stat = "rpc_srv_chunk_stat"; +} // namespace tag + +namespace protocol { +constexpr auto ofi_psm2 = "ofi+psm2"; +constexpr auto ofi_sockets = "ofi+sockets"; +constexpr auto ofi_tcp = "ofi+tcp"; +} // namespace protocol +} // namespace rpc + +namespace types { +// typedefs +typedef unsigned long rpc_chnk_id_t; +} // namespace types +} // namespace gkfs #endif //GEKKOFS_GLOBAL_DEFS_HPP diff --git a/include/global/log_util.hpp b/include/global/log_util.hpp index 8ccfac68d..6ef05d3ef 100644 --- a/include/global/log_util.hpp +++ b/include/global/log_util.hpp @@ -17,7 +17,7 @@ #include namespace gkfs { - namespace logging { +namespace log { spdlog::level::level_enum get_level(std::string level_str); diff --git a/src/client/gkfs_functions.cpp b/src/client/gkfs_functions.cpp index 0fd1d66b0..e2b780a8e 100644 --- a/src/client/gkfs_functions.cpp +++ b/src/client/gkfs_functions.cpp @@ -218,7 +218,7 @@ int gkfs::func::stat(const string& path, struct stat* buf, bool follow_links) { if (!md) { return -1; } - gkfs::client::metadata_to_stat(path, *md, *buf); + gkfs::util::metadata_to_stat(path, *md, *buf); return 0; } diff --git a/src/client/logging.cpp b/src/client/logging.cpp index b88c1601c..a79c5b30c 100644 --- a/src/client/logging.cpp +++ b/src/client/logging.cpp @@ -39,7 +39,7 @@ struct opt_info { #define STR_AND_LEN(strbuf) \ strbuf, sizeof(strbuf) - 1 -static const auto constexpr debug_opts = utils::make_array( +static const auto constexpr debug_opts = util::make_array( opt_info{STR_AND_LEN("none"), {"don't print any messages"}, diff --git a/src/client/open_file_map.cpp b/src/client/open_file_map.cpp index 5e9eae85f..e443df2f2 100644 --- a/src/client/open_file_map.cpp +++ b/src/client/open_file_map.cpp @@ -29,17 +29,17 @@ OpenFile::OpenFile(const string& path, const int flags, FileType type) : path_(path) { // set flags to OpenFile if (flags & O_CREAT) - flags_[gkfs::client::to_underlying(OpenFile_flags::creat)] = true; + flags_[gkfs::util::to_underlying(OpenFile_flags::creat)] = true; if (flags & O_APPEND) - flags_[gkfs::client::to_underlying(OpenFile_flags::append)] = true; + flags_[gkfs::util::to_underlying(OpenFile_flags::append)] = true; if (flags & O_TRUNC) - flags_[gkfs::client::to_underlying(OpenFile_flags::trunc)] = true; + flags_[gkfs::util::to_underlying(OpenFile_flags::trunc)] = true; if (flags & O_RDONLY) - flags_[gkfs::client::to_underlying(OpenFile_flags::rdonly)] = true; + flags_[gkfs::util::to_underlying(OpenFile_flags::rdonly)] = true; if (flags & O_WRONLY) - flags_[gkfs::client::to_underlying(OpenFile_flags::wronly)] = true; + flags_[gkfs::util::to_underlying(OpenFile_flags::wronly)] = true; if (flags & O_RDWR) - flags_[gkfs::client::to_underlying(OpenFile_flags::rdwr)] = true; + flags_[gkfs::util::to_underlying(OpenFile_flags::rdwr)] = true; pos_ = 0; // If O_APPEND flag is used, it will be used before each write. } @@ -68,12 +68,12 @@ void OpenFile::pos(unsigned long pos) { bool OpenFile::get_flag(OpenFile_flags flag) { lock_guard lock(pos_mutex_); - return flags_[gkfs::client::to_underlying(flag)]; + return flags_[gkfs::util::to_underlying(flag)]; } void OpenFile::set_flag(OpenFile_flags flag, bool value) { lock_guard lock(flag_mutex_); - flags_[gkfs::client::to_underlying(flag)] = value; + flags_[gkfs::util::to_underlying(flag)] = value; } FileType OpenFile::type() const { diff --git a/src/client/preload.cpp b/src/client/preload.cpp index 3b392ffee..370b96667 100644 --- a/src/client/preload.cpp +++ b/src/client/preload.cpp @@ -90,7 +90,7 @@ void init_ld_environment_() { try { LOG(INFO, "Loading peer addresses..."); - gkfs::client::load_hosts(); + gkfs::util::load_hosts(); } catch (const std::exception& e) { exit_error_msg(EXIT_FAILURE, "Failed to load hosts addresses: "s + e.what()); } diff --git a/src/client/preload_context.cpp b/src/client/preload_context.cpp index 040ab5db7..7457cf66c 100644 --- a/src/client/preload_context.cpp +++ b/src/client/preload_context.cpp @@ -50,10 +50,10 @@ void PreloadContext::init_logging() { const std::string log_opts = - gkfs::env::get_var(gkfs::env::LOG, gkfs::config::logging::client_log_level); + gkfs::env::get_var(gkfs::env::LOG, gkfs::config::log::client_log_level); const std::string log_output = - gkfs::env::get_var(gkfs::env::LOG_OUTPUT, gkfs::config::logging::client_log_path); + gkfs::env::get_var(gkfs::env::LOG_OUTPUT, gkfs::config::log::client_log_path); #ifdef GKFS_DEBUG_BUILD // atoi returns 0 if no int conversion can be performed, which works diff --git a/src/client/preload_util.cpp b/src/client/preload_util.cpp index c1725a10f..f23bf2b63 100644 --- a/src/client/preload_util.cpp +++ b/src/client/preload_util.cpp @@ -40,7 +40,7 @@ using namespace std; * @param attr * @return */ -int gkfs::client::metadata_to_stat(const std::string& path, const Metadata& md, struct stat& attr) { +int gkfs::util::metadata_to_stat(const std::string& path, const Metadata& md, struct stat& attr) { /* Populate default values */ attr.st_dev = makedev(0, 0); @@ -83,7 +83,7 @@ int gkfs::client::metadata_to_stat(const std::string& path, const Metadata& md, return 0; } -vector> gkfs::client::load_hostfile(const std::string& lfpath) { +vector> gkfs::util::load_hostfile(const std::string& lfpath) { LOG(DEBUG, "Loading hosts file: \"{}\"", lfpath); @@ -146,14 +146,14 @@ hermes::endpoint lookup_endpoint(const std::string& uri, uri, error_msg)); } -void gkfs::client::load_hosts() { +void gkfs::util::load_hosts() { string hostfile; hostfile = gkfs::env::get_var(gkfs::env::HOSTS_FILE, gkfs::config::hostfile_path); vector> hosts; try { - hosts = gkfs::client::load_hostfile(hostfile); + hosts = gkfs::util::load_hostfile(hostfile); } catch (const exception& e) { auto emsg = fmt::format("Failed to load hosts file: {}", e.what()); throw runtime_error(emsg); diff --git a/src/daemon/daemon.cpp b/src/daemon/daemon.cpp index 923b87494..c58bffc36 100644 --- a/src/daemon/daemon.cpp +++ b/src/daemon/daemon.cpp @@ -213,26 +213,27 @@ void init_rpc_server(const string & protocol_port) { * @param hg_class */ void register_server_rpcs(margo_instance_id mid) { - MARGO_REGISTER(mid, gkfs::hg_tag::fs_config, void, rpc_config_out_t, rpc_srv_get_fs_config); - MARGO_REGISTER(mid, gkfs::hg_tag::create, rpc_mk_node_in_t, rpc_err_out_t, rpc_srv_create); - MARGO_REGISTER(mid, gkfs::hg_tag::stat, rpc_path_only_in_t, rpc_stat_out_t, rpc_srv_stat); - MARGO_REGISTER(mid, gkfs::hg_tag::decr_size, rpc_trunc_in_t, rpc_err_out_t, rpc_srv_decr_size); - MARGO_REGISTER(mid, gkfs::hg_tag::remove, rpc_rm_node_in_t, rpc_err_out_t, rpc_srv_remove); - MARGO_REGISTER(mid, gkfs::hg_tag::update_metadentry, rpc_update_metadentry_in_t, rpc_err_out_t, + MARGO_REGISTER(mid, gkfs::rpc::tag::fs_config, void, rpc_config_out_t, rpc_srv_get_fs_config); + MARGO_REGISTER(mid, gkfs::rpc::tag::create, rpc_mk_node_in_t, rpc_err_out_t, rpc_srv_create); + MARGO_REGISTER(mid, gkfs::rpc::tag::stat, rpc_path_only_in_t, rpc_stat_out_t, rpc_srv_stat); + MARGO_REGISTER(mid, gkfs::rpc::tag::decr_size, rpc_trunc_in_t, rpc_err_out_t, rpc_srv_decr_size); + MARGO_REGISTER(mid, gkfs::rpc::tag::remove, rpc_rm_node_in_t, rpc_err_out_t, rpc_srv_remove); + MARGO_REGISTER(mid, gkfs::rpc::tag::update_metadentry, rpc_update_metadentry_in_t, rpc_err_out_t, rpc_srv_update_metadentry); - MARGO_REGISTER(mid, gkfs::hg_tag::get_metadentry_size, rpc_path_only_in_t, rpc_get_metadentry_size_out_t, + MARGO_REGISTER(mid, gkfs::rpc::tag::get_metadentry_size, rpc_path_only_in_t, rpc_get_metadentry_size_out_t, rpc_srv_get_metadentry_size); - MARGO_REGISTER(mid, gkfs::hg_tag::update_metadentry_size, rpc_update_metadentry_size_in_t, + MARGO_REGISTER(mid, gkfs::rpc::tag::update_metadentry_size, rpc_update_metadentry_size_in_t, rpc_update_metadentry_size_out_t, rpc_srv_update_metadentry_size); - MARGO_REGISTER(mid, gkfs::hg_tag::get_dirents, rpc_get_dirents_in_t, rpc_get_dirents_out_t, + MARGO_REGISTER(mid, gkfs::rpc::tag::get_dirents, rpc_get_dirents_in_t, rpc_get_dirents_out_t, rpc_srv_get_dirents); #ifdef HAS_SYMLINKS - MARGO_REGISTER(mid, gkfs::hg_tag::mk_symlink, rpc_mk_symlink_in_t, rpc_err_out_t, rpc_srv_mk_symlink); + MARGO_REGISTER(mid, gkfs::rpc::tag::mk_symlink, rpc_mk_symlink_in_t, rpc_err_out_t, rpc_srv_mk_symlink); #endif - MARGO_REGISTER(mid, gkfs::hg_tag::write_data, rpc_write_data_in_t, rpc_data_out_t, rpc_srv_write); - MARGO_REGISTER(mid, gkfs::hg_tag::read_data, rpc_read_data_in_t, rpc_data_out_t, rpc_srv_read); - MARGO_REGISTER(mid, gkfs::hg_tag::trunc_data, rpc_trunc_in_t, rpc_err_out_t, rpc_srv_truncate); - MARGO_REGISTER(mid, gkfs::hg_tag::chunk_stat, rpc_chunk_stat_in_t, rpc_chunk_stat_out_t, rpc_srv_get_chunk_stat); + MARGO_REGISTER(mid, gkfs::rpc::tag::write, rpc_write_data_in_t, rpc_data_out_t, rpc_srv_write); + MARGO_REGISTER(mid, gkfs::rpc::tag::read, rpc_read_data_in_t, rpc_data_out_t, rpc_srv_read); + MARGO_REGISTER(mid, gkfs::rpc::tag::truncate, rpc_trunc_in_t, rpc_err_out_t, rpc_srv_truncate); + MARGO_REGISTER(mid, gkfs::rpc::tag::get_chunk_stat, rpc_chunk_stat_in_t, rpc_chunk_stat_out_t, + rpc_srv_get_chunk_stat); } void shutdown_handler(int dummy) { @@ -241,7 +242,7 @@ void shutdown_handler(int dummy) { } void initialize_loggers() { - std::string path = gkfs::config::logging::daemon_log_path; + std::string path = gkfs::config::log::daemon_log_path; // Try to get log path from env variable std::string env_path_key = DAEMON_ENV_PREFIX; env_path_key += "DAEMON_LOG_PATH"; @@ -250,13 +251,13 @@ void initialize_loggers() { path = env_path; } - spdlog::level::level_enum level = gkfs::logging::get_level(gkfs::config::logging::daemon_log_level); + spdlog::level::level_enum level = gkfs::log::get_level(gkfs::config::log::daemon_log_level); // Try to get log path from env variable std::string env_level_key = DAEMON_ENV_PREFIX; env_level_key += "LOG_LEVEL"; char* env_level = getenv(env_level_key.c_str()); if (env_level != nullptr) { - level = gkfs::logging::get_level(env_level); + level = gkfs::log::get_level(env_level); } auto logger_names = std::vector{ @@ -265,7 +266,7 @@ void initialize_loggers() { "ChunkStorage", }; - gkfs::logging::setup(logger_names, level, path); + gkfs::log::setup(logger_names, level, path); } int main(int argc, const char* argv[]) { diff --git a/src/global/log_util.cpp b/src/global/log_util.cpp index 7f8b766de..256f2bca2 100644 --- a/src/global/log_util.cpp +++ b/src/global/log_util.cpp @@ -20,7 +20,7 @@ using namespace std; -spdlog::level::level_enum gkfs::logging::get_level(string level_str) { +spdlog::level::level_enum gkfs::log::get_level(string level_str) { char* parse_end; auto level = strtoul(level_str.c_str(), &parse_end, 10); if (parse_end != (level_str.c_str() + level_str.size())) { @@ -46,7 +46,7 @@ spdlog::level::level_enum gkfs::logging::get_level(string level_str) { return get_level(level); } -spdlog::level::level_enum gkfs::logging::get_level(unsigned long level) { +spdlog::level::level_enum gkfs::log::get_level(unsigned long level) { switch (level) { case 0: return spdlog::level::off; @@ -65,8 +65,8 @@ spdlog::level::level_enum gkfs::logging::get_level(unsigned long level) { } } -void gkfs::logging::setup(const vector& loggers_name, - spdlog::level::level_enum level, const string& path) { +void gkfs::log::setup(const vector& loggers_name, + spdlog::level::level_enum level, const string& path) { /* Create common sink */ auto file_sink = make_shared(path); -- GitLab From 847d1e6c9274664b5bd73b9bdf8ebd69013b1380 Mon Sep 17 00:00:00 2001 From: Marc Vef Date: Thu, 20 Feb 2020 10:47:28 +0100 Subject: [PATCH 15/25] gkfs::metadentry -> gkfs::metadata. Added code to gkfs::metadata --- include/client/gkfs_functions.hpp | 2 +- include/client/preload_util.hpp | 2 +- include/daemon/backend/metadata/db.hpp | 6 ++++++ include/daemon/backend/metadata/merge.hpp | 6 ++++++ include/daemon/classes/fs_data.hpp | 10 +++++++--- include/daemon/ops/metadentry.hpp | 23 ++++++++++++----------- include/global/metadata.hpp | 6 +++++- src/client/gkfs_functions.cpp | 8 ++++---- src/client/preload_util.cpp | 2 +- src/client/rpc/forward_metadata.cpp | 3 ++- src/daemon/backend/metadata/db.cpp | 6 ++++++ src/daemon/backend/metadata/merge.cpp | 8 +++++++- src/daemon/classes/fs_data.cpp | 4 ++-- src/daemon/daemon.cpp | 6 +++--- src/daemon/handler/srv_metadata.cpp | 22 +++++++++++----------- src/daemon/ops/metadentry.cpp | 22 ++++++++++++++-------- src/global/metadata.cpp | 5 +++++ 17 files changed, 93 insertions(+), 48 deletions(-) diff --git a/include/client/gkfs_functions.hpp b/include/client/gkfs_functions.hpp index be78f5e30..c830b379e 100644 --- a/include/client/gkfs_functions.hpp +++ b/include/client/gkfs_functions.hpp @@ -39,7 +39,7 @@ namespace gkfs { namespace func { - std::shared_ptr metadata(const std::string& path, bool follow_links = false); + std::shared_ptr metadata(const std::string& path, bool follow_links = false); int check_parent_dir(const std::string& path); diff --git a/include/client/preload_util.hpp b/include/client/preload_util.hpp index ebf25c44f..4382e8a04 100644 --- a/include/client/preload_util.hpp +++ b/include/client/preload_util.hpp @@ -48,7 +48,7 @@ namespace util { return static_cast::type>(e); } - int metadata_to_stat(const std::string& path, const Metadata& md, struct stat& attr); +int metadata_to_stat(const std::string& path, const gkfs::metadata::Metadata& md, struct stat& attr); std::vector> load_hostfile(const std::string& lfpath); diff --git a/include/daemon/backend/metadata/db.hpp b/include/daemon/backend/metadata/db.hpp index 17097948d..4b2e927c3 100644 --- a/include/daemon/backend/metadata/db.hpp +++ b/include/daemon/backend/metadata/db.hpp @@ -20,6 +20,9 @@ namespace rdb = rocksdb; +namespace gkfs { +namespace metadata { + class MetadataDB { private: std::unique_ptr db; @@ -53,4 +56,7 @@ public: void iterate_all(); }; +} // namespace metadata +} // namespace gkfs + #endif //GEKKOFS_METADATA_DB_HPP diff --git a/include/daemon/backend/metadata/merge.hpp b/include/daemon/backend/metadata/merge.hpp index db47cd504..acba86a4f 100644 --- a/include/daemon/backend/metadata/merge.hpp +++ b/include/daemon/backend/metadata/merge.hpp @@ -20,6 +20,9 @@ namespace rdb = rocksdb; +namespace gkfs { +namespace metadata { + enum class OperandID : char { increase_size = 'i', decrease_size = 'd', @@ -102,4 +105,7 @@ public: bool AllowSingleOperand() const override; }; +} // namespace metadata +} // namespace gkfs + #endif // DB_MERGE_HPP diff --git a/include/daemon/classes/fs_data.hpp b/include/daemon/classes/fs_data.hpp index 078246161..2c6cb63b8 100644 --- a/include/daemon/classes/fs_data.hpp +++ b/include/daemon/classes/fs_data.hpp @@ -18,7 +18,11 @@ #include /* Forward declarations */ +namespace gkfs { +namespace metadata { class MetadataDB; +} +} class ChunkStorage; @@ -43,7 +47,7 @@ private: std::string hosts_file_; // Database - std::shared_ptr mdb_; + std::shared_ptr mdb_; // Storage backend std::shared_ptr storage_; @@ -82,9 +86,9 @@ public: void metadir(const std::string& metadir_); - const std::shared_ptr& mdb() const; + const std::shared_ptr& mdb() const; - void mdb(const std::shared_ptr& mdb); + void mdb(const std::shared_ptr& mdb); void close_mdb(); diff --git a/include/daemon/ops/metadentry.hpp b/include/daemon/ops/metadentry.hpp index 0900d07ac..1c903a3ad 100644 --- a/include/daemon/ops/metadentry.hpp +++ b/include/daemon/ops/metadentry.hpp @@ -19,24 +19,25 @@ #include namespace gkfs { - namespace metadentry { +namespace metadata { - Metadata get(const std::string& path); +Metadata get(const std::string& path); - std::string get_str(const std::string& path); +std::string get_str(const std::string& path); - size_t get_size(const std::string& path); +size_t get_size(const std::string& path); - std::vector> get_dirents(const std::string& dir); +std::vector> get_dirents(const std::string& dir); - void create(const std::string& path, Metadata& md); +void create(const std::string& path, Metadata& md); - void update(const std::string& path, Metadata& md); +void update(const std::string& path, Metadata& md); - void update_size(const std::string& path, size_t io_size, off_t offset, bool append); +void update_size(const std::string& path, size_t io_size, off_t offset, bool append); - void remove_node(const std::string& path); - } -} +void remove_node(const std::string& path); + +} // namespace metadata +} // namespace gkfs #endif //GEKKOFS_METADENTRY_HPP diff --git a/include/global/metadata.hpp b/include/global/metadata.hpp index 2936c48c9..71901438b 100644 --- a/include/global/metadata.hpp +++ b/include/global/metadata.hpp @@ -21,10 +21,11 @@ #include #include +namespace gkfs { +namespace metadata { constexpr mode_t LINK_MODE = ((S_IRWXU | S_IRWXG | S_IRWXO) | S_IFLNK); - class Metadata { private: time_t atime_; // access time. gets updated on file access unless mounted with noatime @@ -99,5 +100,8 @@ public: #endif }; +} // namespace metadata +} // namespace gkfs + #endif //FS_METADATA_H diff --git a/src/client/gkfs_functions.cpp b/src/client/gkfs_functions.cpp index e2b780a8e..0fb053297 100644 --- a/src/client/gkfs_functions.cpp +++ b/src/client/gkfs_functions.cpp @@ -34,7 +34,7 @@ extern "C" { using namespace std; -std::shared_ptr gkfs::func::metadata(const string& path, bool follow_links) { +std::shared_ptr gkfs::func::metadata(const string& path, bool follow_links) { std::string attr; auto err = gkfs::rpc::forward_stat(path, attr); if (err) { @@ -42,17 +42,17 @@ std::shared_ptr gkfs::func::metadata(const string& path, bool follow_l } #ifdef HAS_SYMLINKS if (follow_links) { - Metadata md{attr}; + gkfs::metadata::Metadata md{attr}; while (md.is_link()) { err = gkfs::rpc::forward_stat(md.target_path(), attr); if (err) { return nullptr; } - md = Metadata{attr}; + md = gkfs::metadata::Metadata{attr}; } } #endif - return make_shared(attr); + return make_shared(attr); } int gkfs::func::check_parent_dir(const std::string& path) { diff --git a/src/client/preload_util.cpp b/src/client/preload_util.cpp index f23bf2b63..796930d61 100644 --- a/src/client/preload_util.cpp +++ b/src/client/preload_util.cpp @@ -40,7 +40,7 @@ using namespace std; * @param attr * @return */ -int gkfs::util::metadata_to_stat(const std::string& path, const Metadata& md, struct stat& attr) { +int gkfs::util::metadata_to_stat(const std::string& path, const gkfs::metadata::Metadata& md, struct stat& attr) { /* Populate default values */ attr.st_dev = makedev(0, 0); diff --git a/src/client/rpc/forward_metadata.cpp b/src/client/rpc/forward_metadata.cpp index 62f0019ef..6e992a7b0 100644 --- a/src/client/rpc/forward_metadata.cpp +++ b/src/client/rpc/forward_metadata.cpp @@ -235,7 +235,8 @@ int forward_decr_size(const std::string& path, size_t length) { } } -int forward_update_metadentry(const string& path, const Metadata& md, const MetadentryUpdateFlags& md_flags) { +int forward_update_metadentry(const string& path, const gkfs::metadata::Metadata& md, + const MetadentryUpdateFlags& md_flags) { auto endp = CTX->hosts().at(CTX->distributor()->locate_file_metadata(path)); diff --git a/src/daemon/backend/metadata/db.cpp b/src/daemon/backend/metadata/db.cpp index 592e6028e..f0226ce22 100644 --- a/src/daemon/backend/metadata/db.cpp +++ b/src/daemon/backend/metadata/db.cpp @@ -22,6 +22,9 @@ extern "C" { #include } +namespace gkfs { +namespace metadata { + MetadataDB::MetadataDB(const std::string& path) : path(path) { // Optimize RocksDB. This is the easiest way to get RocksDB to perform well @@ -190,3 +193,6 @@ void MetadataDB::iterate_all() { void MetadataDB::optimize_rocksdb_options(rdb::Options& options) { options.max_successive_merges = 128; } + +} // namespace metadata +} // namespace gkfs \ No newline at end of file diff --git a/src/daemon/backend/metadata/merge.cpp b/src/daemon/backend/metadata/merge.cpp index e279f36b1..067b08354 100644 --- a/src/daemon/backend/metadata/merge.cpp +++ b/src/daemon/backend/metadata/merge.cpp @@ -15,6 +15,9 @@ using namespace std; +namespace gkfs { +namespace metadata { + string MergeOperand::serialize_id() const { string s; s.reserve(2); @@ -52,7 +55,7 @@ IncreaseSizeOperand::IncreaseSizeOperand(const rdb::Slice& serialized_op) { //Parse append flag assert(serialized_op[chrs_parsed] == false_char || - serialized_op[chrs_parsed] == true_char); + serialized_op[chrs_parsed] == true_char); append = serialized_op[chrs_parsed] != false_char; //check that we consumed all the input string assert(chrs_parsed + 1 == serialized_op.size()); @@ -173,3 +176,6 @@ const char* MetadataMergeOperator::Name() const { bool MetadataMergeOperator::AllowSingleOperand() const { return true; } + +} // namespace metadata +} // namespace gkfs \ No newline at end of file diff --git a/src/daemon/classes/fs_data.cpp b/src/daemon/classes/fs_data.cpp index 1cd689177..25ed26721 100644 --- a/src/daemon/classes/fs_data.cpp +++ b/src/daemon/classes/fs_data.cpp @@ -25,11 +25,11 @@ void FsData::spdlogger(const std::shared_ptr& spdlogger) { FsData::spdlogger_ = spdlogger; } -const std::shared_ptr& FsData::mdb() const { +const std::shared_ptr& FsData::mdb() const { return mdb_; } -void FsData::mdb(const std::shared_ptr& mdb) { +void FsData::mdb(const std::shared_ptr& mdb) { mdb_ = mdb; } diff --git a/src/daemon/daemon.cpp b/src/daemon/daemon.cpp index c58bffc36..c8460d067 100644 --- a/src/daemon/daemon.cpp +++ b/src/daemon/daemon.cpp @@ -47,7 +47,7 @@ void init_environment() { std::string metadata_path = GKFS_DATA->metadir() + "/rocksdb"s; GKFS_DATA->spdlogger()->debug("{}() Initializing metadata DB: '{}'", __func__, metadata_path); try { - GKFS_DATA->mdb(std::make_shared(metadata_path)); + GKFS_DATA->mdb(std::make_shared(metadata_path)); } catch (const std::exception& e) { GKFS_DATA->spdlogger()->error("{}() Failed to initialize metadata DB: {}", __func__, e.what()); throw; @@ -90,9 +90,9 @@ void init_environment() { GKFS_DATA->link_cnt_state(gkfs::config::metadata::use_link_cnt); GKFS_DATA->blocks_state(gkfs::config::metadata::use_blocks); // Create metadentry for root directory - Metadata root_md{S_IFDIR | S_IRWXU | S_IRWXG | S_IRWXO}; + gkfs::metadata::Metadata root_md{S_IFDIR | S_IRWXU | S_IRWXG | S_IRWXO}; try { - gkfs::metadentry::create("/", root_md); + gkfs::metadata::create("/", root_md); } catch (const std::exception& e) { throw runtime_error("Failed to write root metadentry to KV store: "s + e.what()); } diff --git a/src/daemon/handler/srv_metadata.cpp b/src/daemon/handler/srv_metadata.cpp index cafde927c..31c8fc225 100644 --- a/src/daemon/handler/srv_metadata.cpp +++ b/src/daemon/handler/srv_metadata.cpp @@ -30,10 +30,10 @@ static hg_return_t rpc_srv_create(hg_handle_t handle) { GKFS_DATA->spdlogger()->error("{}() Failed to retrieve input from handle", __func__); assert(ret == HG_SUCCESS); GKFS_DATA->spdlogger()->debug("{}() Got RPC with path '{}'", __func__, in.path); - Metadata md(in.mode); + gkfs::metadata::Metadata md(in.mode); try { // create metadentry - gkfs::metadentry::create(in.path, md); + gkfs::metadata::create(in.path, md); out.err = 0; } catch (const std::exception& e) { GKFS_DATA->spdlogger()->error("{}() Failed to create metadentry: '{}'", __func__, e.what()); @@ -65,7 +65,7 @@ static hg_return_t rpc_srv_stat(hg_handle_t handle) { try { // get the metadata - val = gkfs::metadentry::get_str(in.path); + val = gkfs::metadata::get_str(in.path); out.db_val = val.c_str(); out.err = 0; GKFS_DATA->spdlogger()->debug("{}() Sending output mode '{}'", __func__, out.db_val); @@ -137,7 +137,7 @@ static hg_return_t rpc_srv_remove(hg_handle_t handle) { try { // Remove metadentry if exists on the node // and remove all chunks for that file - gkfs::metadentry::remove_node(in.path); + gkfs::metadata::remove_node(in.path); out.err = 0; } catch (const NotFoundException& e) { /* The metadentry was not found on this node, @@ -180,7 +180,7 @@ static hg_return_t rpc_srv_update_metadentry(hg_handle_t handle) { // do update try { - Metadata md = gkfs::metadentry::get(in.path); + gkfs::metadata::Metadata md = gkfs::metadata::get(in.path); if (in.block_flag == HG_TRUE) md.blocks(in.blocks); if (in.nlink_flag == HG_TRUE) @@ -193,7 +193,7 @@ static hg_return_t rpc_srv_update_metadentry(hg_handle_t handle) { md.mtime(in.mtime); if (in.ctime_flag == HG_TRUE) md.ctime(in.ctime); - gkfs::metadentry::update(in.path, md); + gkfs::metadata::update(in.path, md); out.err = 0; } catch (const std::exception& e) { //TODO handle NotFoundException @@ -228,7 +228,7 @@ static hg_return_t rpc_srv_update_metadentry_size(hg_handle_t handle) { in.offset, in.append); try { - gkfs::metadentry::update_size(in.path, in.size, in.offset, (in.append == HG_TRUE)); + gkfs::metadata::update_size(in.path, in.size, in.offset, (in.append == HG_TRUE)); out.err = 0; //TODO the actual size of the file could be different after the size update // do to concurrency on size @@ -268,7 +268,7 @@ static hg_return_t rpc_srv_get_metadentry_size(hg_handle_t handle) { // do update try { - out.ret_size = gkfs::metadentry::get_size(in.path); + out.ret_size = gkfs::metadata::get_size(in.path); out.err = 0; } catch (const NotFoundException& e) { GKFS_DATA->spdlogger()->debug("{}() Entry not found: '{}'", __func__, in.path); @@ -313,7 +313,7 @@ static hg_return_t rpc_srv_get_dirents(hg_handle_t handle) { auto bulk_size = margo_bulk_get_size(in.bulk_handle); //Get directory entries from local DB - std::vector> entries = gkfs::metadentry::get_dirents(in.path); + std::vector> entries = gkfs::metadata::get_dirents(in.path); out.dirents_size = entries.size(); @@ -394,9 +394,9 @@ static hg_return_t rpc_srv_mk_symlink(hg_handle_t handle) { GKFS_DATA->spdlogger()->debug("{}() Got RPC with path '{}'", __func__, in.path); try { - Metadata md = {LINK_MODE, in.target_path}; + gkfs::metadata::Metadata md = {gkfs::metadata::LINK_MODE, in.target_path}; // create metadentry - gkfs::metadentry::create(in.path, md); + gkfs::metadata::create(in.path, md); out.err = 0; } catch (const std::exception& e) { GKFS_DATA->spdlogger()->error("{}() Failed to create metadentry: {}", __func__, e.what()); diff --git a/src/daemon/ops/metadentry.cpp b/src/daemon/ops/metadentry.cpp index 9ef31a333..169c7ac36 100644 --- a/src/daemon/ops/metadentry.cpp +++ b/src/daemon/ops/metadentry.cpp @@ -18,13 +18,16 @@ using namespace std; +namespace gkfs { +namespace metadata { + /** * Returns the metadata of an object at a specific path. The metadata can be of dummy values if configured * @param path * @param attr * @return */ -Metadata gkfs::metadentry::get(const std::string& path) { +Metadata get(const std::string& path) { return Metadata(get_str(path)); } @@ -33,7 +36,7 @@ Metadata gkfs::metadentry::get(const std::string& path) { * @param path * @return */ -std::string gkfs::metadentry::get_str(const std::string& path) { +std::string get_str(const std::string& path) { return GKFS_DATA->mdb()->get(path); } @@ -43,7 +46,7 @@ std::string gkfs::metadentry::get_str(const std::string& path) { * @param ret_size (return val) * @return err */ -size_t gkfs::metadentry::get_size(const string& path) { +size_t get_size(const string& path) { return get(path).size(); } @@ -52,7 +55,7 @@ size_t gkfs::metadentry::get_size(const string& path) { * @param dir * @return */ -std::vector> gkfs::metadentry::get_dirents(const std::string& dir) { +std::vector> get_dirents(const std::string& dir) { return GKFS_DATA->mdb()->get_dirents(dir); } @@ -61,7 +64,7 @@ std::vector> gkfs::metadentry::get_dirents(const st * @param path * @param mode */ -void gkfs::metadentry::create(const std::string& path, Metadata& md) { +void create(const std::string& path, Metadata& md) { // update metadata object based on what metadata is needed if (GKFS_DATA->atime_state() || GKFS_DATA->mtime_state() || GKFS_DATA->ctime_state()) { @@ -83,7 +86,7 @@ void gkfs::metadentry::create(const std::string& path, Metadata& md) { * @param path * @param md */ -void gkfs::metadentry::update(const string& path, Metadata& md) { +void update(const string& path, Metadata& md) { GKFS_DATA->mdb()->update(path, path, md.serialize()); } @@ -93,7 +96,7 @@ void gkfs::metadentry::update(const string& path, Metadata& md) { * @param io_size * @return the updated size */ -void gkfs::metadentry::update_size(const string& path, size_t io_size, off64_t offset, bool append) { +void update_size(const string& path, size_t io_size, off64_t offset, bool append) { GKFS_DATA->mdb()->increase_size(path, io_size + offset, append); } @@ -102,7 +105,10 @@ void gkfs::metadentry::update_size(const string& path, size_t io_size, off64_t o * @param path * @return */ -void gkfs::metadentry::remove_node(const string& path) { +void remove_node(const string& path) { GKFS_DATA->mdb()->remove(path); // remove metadentry GKFS_DATA->storage()->destroy_chunk_space(path); // destroys all chunks for the path on this node } + +} // namespace metadata +} // namespace gkfs \ No newline at end of file diff --git a/src/global/metadata.cpp b/src/global/metadata.cpp index b3cb40c4d..67ac72063 100644 --- a/src/global/metadata.cpp +++ b/src/global/metadata.cpp @@ -24,6 +24,8 @@ extern "C" { #include #include +namespace gkfs { +namespace metadata { static const char MSP = '|'; // metadata separator @@ -251,4 +253,7 @@ bool Metadata::is_link() const { return S_ISLNK(mode_); } +} // namespace metadata +} // namespace gkfs + #endif -- GitLab From f6ba5693f530620dc7fb0aad11619ad0ef2d4944 Mon Sep 17 00:00:00 2001 From: Marc Vef Date: Thu, 20 Feb 2020 16:16:03 +0100 Subject: [PATCH 16/25] namespace added: gkfs::data --- include/daemon/backend/data/chunk_storage.hpp | 6 ++++++ include/daemon/classes/fs_data.hpp | 11 +++++++---- src/daemon/backend/data/chunk_storage.cpp | 8 +++++++- src/daemon/classes/fs_data.cpp | 4 ++-- src/daemon/daemon.cpp | 3 ++- 5 files changed, 24 insertions(+), 8 deletions(-) diff --git a/include/daemon/backend/data/chunk_storage.hpp b/include/daemon/backend/data/chunk_storage.hpp index af7bd4232..8a6db6784 100644 --- a/include/daemon/backend/data/chunk_storage.hpp +++ b/include/daemon/backend/data/chunk_storage.hpp @@ -27,6 +27,9 @@ namespace spdlog { class logger; } +namespace gkfs { +namespace data { + struct ChunkStat { unsigned long chunk_size; unsigned long chunk_total; @@ -73,4 +76,7 @@ public: ChunkStat chunk_stat() const; }; +} // namespace data +} // namespace gkfs + #endif //GEKKOFS_CHUNK_STORAGE_HPP diff --git a/include/daemon/classes/fs_data.hpp b/include/daemon/classes/fs_data.hpp index 2c6cb63b8..78333283d 100644 --- a/include/daemon/classes/fs_data.hpp +++ b/include/daemon/classes/fs_data.hpp @@ -22,9 +22,12 @@ namespace gkfs { namespace metadata { class MetadataDB; } -} +namespace data { class ChunkStorage; +} +} + #include #include @@ -49,7 +52,7 @@ private: // Database std::shared_ptr mdb_; // Storage backend - std::shared_ptr storage_; + std::shared_ptr storage_; // configurable metadata bool atime_state_; @@ -92,9 +95,9 @@ public: void close_mdb(); - const std::shared_ptr& storage() const; + const std::shared_ptr& storage() const; - void storage(const std::shared_ptr& storage); + void storage(const std::shared_ptr& storage); const std::string& bind_addr() const; diff --git a/src/daemon/backend/data/chunk_storage.cpp b/src/daemon/backend/data/chunk_storage.cpp index 3f194bd20..3d9b69fec 100644 --- a/src/daemon/backend/data/chunk_storage.cpp +++ b/src/daemon/backend/data/chunk_storage.cpp @@ -25,6 +25,9 @@ extern "C" { namespace bfs = boost::filesystem; using namespace std; +namespace gkfs { +namespace data { + string ChunkStorage::absolute(const string& internal_path) const { assert(gkfs::path::is_relative(internal_path)); return root_path + '/' + internal_path; @@ -213,4 +216,7 @@ ChunkStat ChunkStorage::chunk_stat() const { return {chunksize, bytes_total / chunksize, bytes_free / chunksize}; -} \ No newline at end of file +} + +} // namespace data +} // namespace gkfs \ No newline at end of file diff --git a/src/daemon/classes/fs_data.cpp b/src/daemon/classes/fs_data.cpp index 25ed26721..3b519c4e1 100644 --- a/src/daemon/classes/fs_data.cpp +++ b/src/daemon/classes/fs_data.cpp @@ -37,11 +37,11 @@ void FsData::close_mdb() { mdb_.reset(); } -const std::shared_ptr& FsData::storage() const { +const std::shared_ptr& FsData::storage() const { return storage_; } -void FsData::storage(const std::shared_ptr& storage) { +void FsData::storage(const std::shared_ptr& storage) { storage_ = storage; } diff --git a/src/daemon/daemon.cpp b/src/daemon/daemon.cpp index c8460d067..c5667d088 100644 --- a/src/daemon/daemon.cpp +++ b/src/daemon/daemon.cpp @@ -58,7 +58,8 @@ void init_environment() { GKFS_DATA->spdlogger()->debug("{}() Initializing storage backend: '{}'", __func__, chunk_storage_path); bfs::create_directories(chunk_storage_path); try { - GKFS_DATA->storage(std::make_shared(chunk_storage_path, gkfs::config::rpc::chunksize)); + GKFS_DATA->storage( + std::make_shared(chunk_storage_path, gkfs::config::rpc::chunksize)); } catch (const std::exception& e) { GKFS_DATA->spdlogger()->error("{}() Failed to initialize storage backend: {}", __func__, e.what()); throw; -- GitLab From 7946ab2502f2747ca1dbc5e50c03e5f8e6987aec Mon Sep 17 00:00:00 2001 From: Marc Vef Date: Thu, 20 Feb 2020 17:25:59 +0100 Subject: [PATCH 17/25] Chunk_util into gkfs::util namespace + comment added --- include/global/chunk_calc_util.hpp | 22 +++++++++++++++------- src/client/rpc/forward_data.cpp | 25 +++++++++++++------------ src/daemon/handler/srv_data.cpp | 4 ++-- 3 files changed, 30 insertions(+), 21 deletions(-) diff --git a/include/global/chunk_calc_util.hpp b/include/global/chunk_calc_util.hpp index 10dbf9cde..4e6a51597 100644 --- a/include/global/chunk_calc_util.hpp +++ b/include/global/chunk_calc_util.hpp @@ -16,6 +16,9 @@ #include +namespace gkfs { +namespace util { + /** * Compute the base2 logarithm for 64 bit integers */ @@ -23,12 +26,10 @@ inline int log2(uint64_t n) { /* see http://stackoverflow.com/questions/11376288/fast-computing-of-log2-for-64-bit-integers */ static const int table[64] = { - 0, 58, 1, 59, 47, 53, 2, 60, 39, 48, 27, 54, 33, 42, 3, 61, - 51, 37, 40, 49, 18, 28, 20, 55, 30, 34, 11, 43, 14, 22, 4, 62, - 57, 46, 52, 38, 26, 32, 41, 50, 36, 17, 19, 29, 10, 13, 21, 56, - 45, 25, 31, 35, 16, 9, 12, 44, 24, 15, 8, 23, 7, 6, 5, 63 }; - - assert(n > 0); // TODO This needs to be removed and a check for CHUNKSIZE has to be put somewhere + 0, 58, 1, 59, 47, 53, 2, 60, 39, 48, 27, 54, 33, 42, 3, 61, + 51, 37, 40, 49, 18, 28, 20, 55, 30, 34, 11, 43, 14, 22, 4, 62, + 57, 46, 52, 38, 26, 32, 41, 50, 36, 17, 19, 29, 10, 13, 21, 56, + 45, 25, 31, 35, 16, 9, 12, 44, 24, 15, 8, 23, 7, 6, 5, 63 }; n |= n >> 1; n |= n >> 2; @@ -88,7 +89,11 @@ inline size_t chnk_rpad(const off64_t offset, const size_t chnk_size) { * chunk_id(0,4) = 0; */ inline uint64_t chnk_id_for_offset(const off64_t offset, const size_t chnk_size) { - // TODO This does not work for very large offsets: `offset / chnk_size` works + /* + * This does not work for offsets that use the 64th bit, i.e., 9223372036854775808. + * 9223372036854775808 - 1 uses 63 bits and still works. `offset / chnk_size` works with the 64th bit. + * With this number we can address more than 19,300,000 exabytes of data though. Hi future me? + */ return static_cast(chnk_lalign(offset, chnk_size) >> log2(chnk_size)); } @@ -106,4 +111,7 @@ inline uint64_t chnk_count_for_offset(const off64_t offset, const size_t count, (chnk_start >> log2(chnk_size)) + 1); } +} // namespace util +} // namespace gkfs + #endif diff --git a/src/client/rpc/forward_data.cpp b/src/client/rpc/forward_data.cpp index dbe80cc30..f68f65e48 100644 --- a/src/client/rpc/forward_data.cpp +++ b/src/client/rpc/forward_data.cpp @@ -42,8 +42,8 @@ ssize_t forward_write(const string& path, const void* buf, const bool append_fla // which interval to look for chunks off64_t offset = append_flag ? in_offset : (updated_metadentry_size - write_size); - auto chnk_start = chnk_id_for_offset(offset, gkfs::config::rpc::chunksize); - auto chnk_end = chnk_id_for_offset((offset + write_size) - 1, gkfs::config::rpc::chunksize); + auto chnk_start = gkfs::util::chnk_id_for_offset(offset, gkfs::config::rpc::chunksize); + auto chnk_end = gkfs::util::chnk_id_for_offset((offset + write_size) - 1, gkfs::config::rpc::chunksize); // Collect all chunk ids within count that have the same destination so // that those are send in one rpc bulk transfer @@ -108,12 +108,12 @@ ssize_t forward_write(const string& path, const void* buf, const bool append_fla // receiver of first chunk must subtract the offset from first chunk if (target == chnk_start_target) { - total_chunk_size -= chnk_lpad(offset, gkfs::config::rpc::chunksize); + total_chunk_size -= gkfs::util::chnk_lpad(offset, gkfs::config::rpc::chunksize); } // receiver of last chunk must subtract if (target == chnk_end_target) { - total_chunk_size -= chnk_rpad(offset + write_size, gkfs::config::rpc::chunksize); + total_chunk_size -= gkfs::util::chnk_rpad(offset + write_size, gkfs::config::rpc::chunksize); } auto endp = CTX->hosts().at(target); @@ -126,7 +126,7 @@ ssize_t forward_write(const string& path, const void* buf, const bool append_fla path, // first offset in targets is the chunk with // a potential offset - chnk_lpad(offset, gkfs::config::rpc::chunksize), + gkfs::util::chnk_lpad(offset, gkfs::config::rpc::chunksize), target, CTX->hosts().size(), // number of chunks handled by that destination @@ -198,8 +198,8 @@ ssize_t forward_read(const string& path, void* buf, const off64_t offset, const // Calculate chunkid boundaries and numbers so that daemons know in which // interval to look for chunks - auto chnk_start = chnk_id_for_offset(offset, gkfs::config::rpc::chunksize); - auto chnk_end = chnk_id_for_offset((offset + read_size - 1), gkfs::config::rpc::chunksize); + auto chnk_start = gkfs::util::chnk_id_for_offset(offset, gkfs::config::rpc::chunksize); + auto chnk_end = gkfs::util::chnk_id_for_offset((offset + read_size - 1), gkfs::config::rpc::chunksize); // Collect all chunk ids within count that have the same destination so // that those are send in one rpc bulk transfer @@ -264,12 +264,12 @@ ssize_t forward_read(const string& path, void* buf, const off64_t offset, const // receiver of first chunk must subtract the offset from first chunk if (target == chnk_start_target) { - total_chunk_size -= chnk_lpad(offset, gkfs::config::rpc::chunksize); + total_chunk_size -= gkfs::util::chnk_lpad(offset, gkfs::config::rpc::chunksize); } // receiver of last chunk must subtract if (target == chnk_end_target) { - total_chunk_size -= chnk_rpad(offset + read_size, gkfs::config::rpc::chunksize); + total_chunk_size -= gkfs::util::chnk_rpad(offset + read_size, gkfs::config::rpc::chunksize); } auto endp = CTX->hosts().at(target); @@ -282,7 +282,7 @@ ssize_t forward_read(const string& path, void* buf, const off64_t offset, const path, // first offset in targets is the chunk with // a potential offset - chnk_lpad(offset, gkfs::config::rpc::chunksize), + gkfs::util::chnk_lpad(offset, gkfs::config::rpc::chunksize), target, CTX->hosts().size(), // number of chunks handled by that destination @@ -355,8 +355,9 @@ int forward_truncate(const std::string& path, size_t current_size, size_t new_si // Find out which data servers need to delete data chunks in order to // contact only them - const unsigned int chunk_start = chnk_id_for_offset(new_size, gkfs::config::rpc::chunksize); - const unsigned int chunk_end = chnk_id_for_offset(current_size - new_size - 1, gkfs::config::rpc::chunksize); + const unsigned int chunk_start = gkfs::util::chnk_id_for_offset(new_size, gkfs::config::rpc::chunksize); + const unsigned int chunk_end = gkfs::util::chnk_id_for_offset(current_size - new_size - 1, + gkfs::config::rpc::chunksize); std::unordered_set hosts; for (unsigned int chunk_id = chunk_start; chunk_id <= chunk_end; ++chunk_id) { diff --git a/src/daemon/handler/srv_data.cpp b/src/daemon/handler/srv_data.cpp index afaa0fd70..e69bd160f 100644 --- a/src/daemon/handler/srv_data.cpp +++ b/src/daemon/handler/srv_data.cpp @@ -524,10 +524,10 @@ static hg_return_t rpc_srv_truncate(hg_handle_t handle) { } GKFS_DATA->spdlogger()->debug("{}() path: '{}', length: {}", __func__, in.path, in.length); - unsigned int chunk_start = chnk_id_for_offset(in.length, gkfs::config::rpc::chunksize); + unsigned int chunk_start = gkfs::util::chnk_id_for_offset(in.length, gkfs::config::rpc::chunksize); // If we trunc in the the middle of a chunk, do not delete that chunk - auto left_pad = chnk_lpad(in.length, gkfs::config::rpc::chunksize); + auto left_pad = gkfs::util::chnk_lpad(in.length, gkfs::config::rpc::chunksize); if (left_pad != 0) { GKFS_DATA->storage()->truncate_chunk(in.path, chunk_start, left_pad); ++chunk_start; -- GitLab From 942cac61c09384267f196656b05c7e7836062a91 Mon Sep 17 00:00:00 2001 From: Marc Vef Date: Thu, 20 Feb 2020 17:38:23 +0100 Subject: [PATCH 18/25] Distributor into gkfs::rpc namespace --- include/client/preload_context.hpp | 16 ++++++----- include/global/rpc/distributor.hpp | 44 +++++++++++++++++------------- src/client/preload.cpp | 3 +- src/client/preload_context.cpp | 4 +-- src/daemon/handler/srv_data.cpp | 4 +-- src/global/rpc/distributor.cpp | 30 ++++++++++++-------- 6 files changed, 58 insertions(+), 43 deletions(-) diff --git a/include/client/preload_context.hpp b/include/client/preload_context.hpp index e7c0a98b4..f28c26937 100644 --- a/include/client/preload_context.hpp +++ b/include/client/preload_context.hpp @@ -26,12 +26,14 @@ /* Forward declarations */ class OpenFileMap; -class Distributor; namespace gkfs { - namespace log { - struct logger; - } +namespace rpc { +class Distributor; +} +namespace log { +struct logger; +} } struct FsConfig { @@ -65,7 +67,7 @@ private: PreloadContext(); std::shared_ptr ofm_; - std::shared_ptr distributor_; + std::shared_ptr distributor_; std::shared_ptr fs_conf_; std::string cwd_; @@ -124,9 +126,9 @@ public: const std::shared_ptr& file_map() const; - void distributor(std::shared_ptr distributor); + void distributor(std::shared_ptr distributor); - std::shared_ptr distributor() const; + std::shared_ptr distributor() const; const std::shared_ptr& fs_conf() const; diff --git a/include/global/rpc/distributor.hpp b/include/global/rpc/distributor.hpp index 79ac371ae..2a0b79ff3 100644 --- a/include/global/rpc/distributor.hpp +++ b/include/global/rpc/distributor.hpp @@ -18,52 +18,58 @@ #include #include -using ChunkID = unsigned int; -using Host = unsigned int; +namespace gkfs { +namespace rpc { + +using chunkid_t = unsigned int; +using host_t = unsigned int; class Distributor { public: - virtual Host localhost() const = 0; + virtual host_t localhost() const = 0; - virtual Host locate_data(const std::string& path, const ChunkID& chnk_id) const = 0; + virtual host_t locate_data(const std::string& path, const chunkid_t& chnk_id) const = 0; - virtual Host locate_file_metadata(const std::string& path) const = 0; + virtual host_t locate_file_metadata(const std::string& path) const = 0; - virtual std::vector locate_directory_metadata(const std::string& path) const = 0; + virtual std::vector locate_directory_metadata(const std::string& path) const = 0; }; class SimpleHashDistributor : public Distributor { private: - Host localhost_; + host_t localhost_; unsigned int hosts_size_; - std::vector all_hosts_; + std::vector all_hosts_; std::hash str_hash; public: - SimpleHashDistributor(Host localhost, unsigned int hosts_size); + SimpleHashDistributor(host_t localhost, unsigned int hosts_size); - Host localhost() const override; + host_t localhost() const override; - Host locate_data(const std::string& path, const ChunkID& chnk_id) const override; + host_t locate_data(const std::string& path, const chunkid_t& chnk_id) const override; - Host locate_file_metadata(const std::string& path) const override; + host_t locate_file_metadata(const std::string& path) const override; - std::vector locate_directory_metadata(const std::string& path) const override; + std::vector locate_directory_metadata(const std::string& path) const override; }; class LocalOnlyDistributor : public Distributor { private: - Host localhost_; + host_t localhost_; public: - explicit LocalOnlyDistributor(Host localhost); + explicit LocalOnlyDistributor(host_t localhost); - Host localhost() const override; + host_t localhost() const override; - Host locate_data(const std::string& path, const ChunkID& chnk_id) const override; + host_t locate_data(const std::string& path, const chunkid_t& chnk_id) const override; - Host locate_file_metadata(const std::string& path) const override; + host_t locate_file_metadata(const std::string& path) const override; - std::vector locate_directory_metadata(const std::string& path) const override; + std::vector locate_directory_metadata(const std::string& path) const override; }; +} // namespace rpc +} // namespace gkfs + #endif //GEKKOFS_RPC_LOCATOR_HPP diff --git a/src/client/preload.cpp b/src/client/preload.cpp index 370b96667..beb55b4ec 100644 --- a/src/client/preload.cpp +++ b/src/client/preload.cpp @@ -96,7 +96,8 @@ void init_ld_environment_() { } /* Setup distributor */ - auto simple_hash_dist = std::make_shared(CTX->local_host_id(), CTX->hosts().size()); + auto simple_hash_dist = std::make_shared(CTX->local_host_id(), + CTX->hosts().size()); CTX->distributor(simple_hash_dist); LOG(INFO, "Retrieving file system configuration..."); diff --git a/src/client/preload_context.cpp b/src/client/preload_context.cpp index 7457cf66c..9993d127f 100644 --- a/src/client/preload_context.cpp +++ b/src/client/preload_context.cpp @@ -191,11 +191,11 @@ const std::shared_ptr& PreloadContext::file_map() const { return ofm_; } -void PreloadContext::distributor(std::shared_ptr d) { +void PreloadContext::distributor(std::shared_ptr d) { distributor_ = d; } -std::shared_ptr PreloadContext::distributor() const { +std::shared_ptr PreloadContext::distributor() const { return distributor_; } diff --git a/src/daemon/handler/srv_data.cpp b/src/daemon/handler/srv_data.cpp index e69bd160f..4ec7dd5d1 100644 --- a/src/daemon/handler/srv_data.cpp +++ b/src/daemon/handler/srv_data.cpp @@ -163,7 +163,7 @@ static hg_return_t rpc_srv_write(hg_handle_t handle) { } auto const host_id = in.host_id; auto const host_size = in.host_size; - SimpleHashDistributor distributor(host_id, host_size); + gkfs::rpc::SimpleHashDistributor distributor(host_id, host_size); auto path = make_shared(in.path); // chnk_ids used by this host @@ -369,7 +369,7 @@ static hg_return_t rpc_srv_read(hg_handle_t handle) { } auto const host_id = in.host_id; auto const host_size = in.host_size; - SimpleHashDistributor distributor(host_id, host_size); + gkfs::rpc::SimpleHashDistributor distributor(host_id, host_size); auto path = make_shared(in.path); // chnk_ids used by this host diff --git a/src/global/rpc/distributor.cpp b/src/global/rpc/distributor.cpp index d8ea2803a..ab0597853 100644 --- a/src/global/rpc/distributor.cpp +++ b/src/global/rpc/distributor.cpp @@ -15,52 +15,58 @@ using namespace std; +namespace gkfs { +namespace rpc { + SimpleHashDistributor:: -SimpleHashDistributor(Host localhost, unsigned int hosts_size) : +SimpleHashDistributor(host_t localhost, unsigned int hosts_size) : localhost_(localhost), hosts_size_(hosts_size), all_hosts_(hosts_size) { ::iota(all_hosts_.begin(), all_hosts_.end(), 0); } -Host SimpleHashDistributor:: +host_t SimpleHashDistributor:: localhost() const { return localhost_; } -Host SimpleHashDistributor:: -locate_data(const string& path, const ChunkID& chnk_id) const { +host_t SimpleHashDistributor:: +locate_data(const string& path, const chunkid_t& chnk_id) const { return str_hash(path + ::to_string(chnk_id)) % hosts_size_; } -Host SimpleHashDistributor:: +host_t SimpleHashDistributor:: locate_file_metadata(const string& path) const { return str_hash(path) % hosts_size_; } -::vector SimpleHashDistributor:: +::vector SimpleHashDistributor:: locate_directory_metadata(const string& path) const { return all_hosts_; } -LocalOnlyDistributor::LocalOnlyDistributor(Host localhost) : localhost_(localhost) {} +LocalOnlyDistributor::LocalOnlyDistributor(host_t localhost) : localhost_(localhost) {} -Host LocalOnlyDistributor:: +host_t LocalOnlyDistributor:: localhost() const { return localhost_; } -Host LocalOnlyDistributor:: -locate_data(const string& path, const ChunkID& chnk_id) const { +host_t LocalOnlyDistributor:: +locate_data(const string& path, const chunkid_t& chnk_id) const { return localhost_; } -Host LocalOnlyDistributor:: +host_t LocalOnlyDistributor:: locate_file_metadata(const string& path) const { return localhost_; } -::vector LocalOnlyDistributor:: +::vector LocalOnlyDistributor:: locate_directory_metadata(const string& path) const { return {localhost_}; } + +} // namespace rpc +} // namespace gkfs \ No newline at end of file -- GitLab From 653e20e458a6deb38b26f39c8390eb03de45f467 Mon Sep 17 00:00:00 2001 From: Marc Vef Date: Thu, 20 Feb 2020 17:50:59 +0100 Subject: [PATCH 19/25] Moving rpc_utils global code to daemon and put it into gkfs::rpc --- .../handler/rpc_util.hpp} | 22 ++++++------- include/global/rpc/rpc_util.hpp | 31 +++++++++++++++++++ src/client/CMakeLists.txt | 4 +-- src/client/preload_util.cpp | 2 +- src/client/rpc/forward_metadata.cpp | 2 +- src/daemon/CMakeLists.txt | 5 +-- src/daemon/daemon.cpp | 2 +- src/daemon/handler/srv_data.cpp | 26 ++++++++-------- src/daemon/handler/srv_metadata.cpp | 12 +++---- src/daemon/util.cpp | 2 +- .../rpc/{rpc_utils.cpp => rpc_util.cpp} | 2 +- 11 files changed, 71 insertions(+), 39 deletions(-) rename include/{global/rpc/rpc_utils.hpp => daemon/handler/rpc_util.hpp} (71%) create mode 100644 include/global/rpc/rpc_util.hpp rename src/global/rpc/{rpc_utils.cpp => rpc_util.cpp} (98%) diff --git a/include/global/rpc/rpc_utils.hpp b/include/daemon/handler/rpc_util.hpp similarity index 71% rename from include/global/rpc/rpc_utils.hpp rename to include/daemon/handler/rpc_util.hpp index f1cfa3f57..38ef10941 100644 --- a/include/global/rpc/rpc_utils.hpp +++ b/include/daemon/handler/rpc_util.hpp @@ -11,9 +11,8 @@ SPDX-License-Identifier: MIT */ - -#ifndef GEKKOFS_RPC_UTILS_HPP -#define GEKKOFS_RPC_UTILS_HPP +#ifndef GEKKOFS_DAEMON_RPC_UTIL_HPP +#define GEKKOFS_DAEMON_RPC_UTIL_HPP extern "C" { #include @@ -23,8 +22,11 @@ extern "C" { #include +namespace gkfs { +namespace rpc { + template -inline hg_return_t rpc_cleanup(hg_handle_t* handle, I* input, O* output, hg_bulk_t* bulk_handle) { +inline hg_return_t cleanup(hg_handle_t* handle, I* input, O* output, hg_bulk_t* bulk_handle) { auto ret = HG_SUCCESS; if (bulk_handle) { ret = margo_bulk_free(*bulk_handle); @@ -50,21 +52,19 @@ inline hg_return_t rpc_cleanup(hg_handle_t* handle, I* input, O* output, hg_bulk } template -inline hg_return_t rpc_cleanup_respond(hg_handle_t* handle, I* input, O* output, hg_bulk_t* bulk_handle) { +inline hg_return_t cleanup_respond(hg_handle_t* handle, I* input, O* output, hg_bulk_t* bulk_handle) { auto ret = HG_SUCCESS; if (output && handle) { ret = margo_respond(*handle, output); if (ret != HG_SUCCESS) return ret; } - return rpc_cleanup(handle, input, static_cast(nullptr), bulk_handle); + return cleanup(handle, input, static_cast(nullptr), bulk_handle); } -hg_bool_t bool_to_merc_bool(bool state); - -std::string get_my_hostname(bool short_hostname = false); +} // namespace rpc +} // namespace gkfs -std::string get_host_by_name(const std::string& hostname); -#endif //GEKKOFS_RPC_UTILS_HPP +#endif //GEKKOFS_DAEMON_RPC_UTIL_HPP diff --git a/include/global/rpc/rpc_util.hpp b/include/global/rpc/rpc_util.hpp new file mode 100644 index 000000000..2822c67c0 --- /dev/null +++ b/include/global/rpc/rpc_util.hpp @@ -0,0 +1,31 @@ +/* + Copyright 2018-2020, Barcelona Supercomputing Center (BSC), Spain + Copyright 2015-2020, Johannes Gutenberg Universitaet Mainz, Germany + + This software was partially supported by the + EC H2020 funded project NEXTGenIO (Project ID: 671951, www.nextgenio.eu). + + This software was partially supported by the + ADA-FS project under the SPPEXA project funded by the DFG. + + SPDX-License-Identifier: MIT +*/ + + +#ifndef GEKKOFS_GLOBAL_RPC_UTILS_HPP +#define GEKKOFS_GLOBAL_RPC_UTILS_HPP + +extern "C" { +#include +#include +} + +#include + +hg_bool_t bool_to_merc_bool(bool state); + +std::string get_my_hostname(bool short_hostname = false); + +std::string get_host_by_name(const std::string& hostname); + +#endif //GEKKOFS_GLOBAL_RPC_UTILS_HPP diff --git a/src/client/CMakeLists.txt b/src/client/CMakeLists.txt index 12498247b..0be20f8ea 100644 --- a/src/client/CMakeLists.txt +++ b/src/client/CMakeLists.txt @@ -10,7 +10,7 @@ set(PRELOAD_SRC preload_context.cpp preload_util.cpp ../global/path_util.cpp - ../global/rpc/rpc_utils.cpp + ../global/rpc/rpc_util.cpp rpc/rpc_types.cpp rpc/forward_data.cpp rpc/forward_management.cpp @@ -46,7 +46,7 @@ set(PRELOAD_HEADERS ../../include/global/global_defs.hpp ../../include/global/path_util.hpp ../../include/global/rpc/rpc_types.hpp - ../../include/global/rpc/rpc_utils.hpp + ../../include/global/rpc/rpc_util.hpp ) add_library(gkfs_intercept SHARED ${PRELOAD_SRC} ${PRELOAD_HEADERS}) diff --git a/src/client/preload_util.cpp b/src/client/preload_util.cpp index 796930d61..9038a1d5e 100644 --- a/src/client/preload_util.cpp +++ b/src/client/preload_util.cpp @@ -16,7 +16,7 @@ #include #include -#include +#include #include #include diff --git a/src/client/rpc/forward_metadata.cpp b/src/client/rpc/forward_metadata.cpp index 6e992a7b0..b8d43f96b 100644 --- a/src/client/rpc/forward_metadata.cpp +++ b/src/client/rpc/forward_metadata.cpp @@ -18,7 +18,7 @@ #include #include -#include +#include #include #include diff --git a/src/daemon/CMakeLists.txt b/src/daemon/CMakeLists.txt index 72d0a7b0e..e4ba89177 100644 --- a/src/daemon/CMakeLists.txt +++ b/src/daemon/CMakeLists.txt @@ -2,7 +2,7 @@ add_subdirectory(backend/metadata) add_subdirectory(backend/data) set(DAEMON_SRC - ../global/rpc/rpc_utils.cpp + ../global/rpc/rpc_util.cpp ../global/path_util.cpp daemon.cpp util.cpp @@ -19,7 +19,7 @@ set(DAEMON_HEADERS ../../include/global/cmake_configure.hpp ../../include/global/global_defs.hpp ../../include/global/rpc/rpc_types.hpp - ../../include/global/rpc/rpc_utils.hpp + ../../include/global/rpc/rpc_util.hpp ../../include/global/path_util.hpp ../../include/daemon/daemon.hpp ../../include/daemon/util.hpp @@ -27,6 +27,7 @@ set(DAEMON_HEADERS ../../include/daemon/classes/fs_data.hpp ../../include/daemon/classes/rpc_data.hpp ../../include/daemon/handler/rpc_defs.hpp + ../../include/daemon/handler/rpc_util.hpp ) add_executable(gkfs_daemon ${DAEMON_SRC} ${DAEMON_HEADERS}) target_link_libraries(gkfs_daemon diff --git a/src/daemon/daemon.cpp b/src/daemon/daemon.cpp index c5667d088..e199b9f1a 100644 --- a/src/daemon/daemon.cpp +++ b/src/daemon/daemon.cpp @@ -17,7 +17,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/src/daemon/handler/srv_data.cpp b/src/daemon/handler/srv_data.cpp index 4ec7dd5d1..553a9eaa5 100644 --- a/src/daemon/handler/srv_data.cpp +++ b/src/daemon/handler/srv_data.cpp @@ -14,10 +14,10 @@ #include #include +#include #include #include -#include #include #include @@ -135,7 +135,7 @@ static hg_return_t rpc_srv_write(hg_handle_t handle) { auto ret = margo_get_input(handle, &in); if (ret != HG_SUCCESS) { GKFS_DATA->spdlogger()->error("{}() Could not get RPC input data with err {}", __func__, ret); - return rpc_cleanup_respond(&handle, &in, &out, &bulk_handle); + return gkfs::rpc::cleanup_respond(&handle, &in, &out, &bulk_handle); } auto hgi = margo_get_info(handle); auto mid = margo_hg_info_get_instance(hgi); @@ -151,7 +151,7 @@ static hg_return_t rpc_srv_write(hg_handle_t handle) { ret = margo_bulk_create(mid, 1, nullptr, &in.total_chunk_size, HG_BULK_READWRITE, &bulk_handle); if (ret != HG_SUCCESS) { GKFS_DATA->spdlogger()->error("{}() Failed to create bulk handle", __func__); - return rpc_cleanup_respond(&handle, &in, &out, static_cast(nullptr)); + return gkfs::rpc::cleanup_respond(&handle, &in, &out, static_cast(nullptr)); } // access the internally allocated memory buffer and put it into buf_ptrs uint32_t actual_count; @@ -159,7 +159,7 @@ static hg_return_t rpc_srv_write(hg_handle_t handle) { &in.total_chunk_size, &actual_count); if (ret != HG_SUCCESS || actual_count != 1) { GKFS_DATA->spdlogger()->error("{}() Failed to access allocated buffer from bulk handle", __func__); - return rpc_cleanup_respond(&handle, &in, &out, &bulk_handle); + return gkfs::rpc::cleanup_respond(&handle, &in, &out, &bulk_handle); } auto const host_id = in.host_id; auto const host_size = in.host_size; @@ -215,7 +215,7 @@ static hg_return_t rpc_srv_write(hg_handle_t handle) { "{}() Failed to pull data from client for chunk {} (startchunk {}; endchunk {}", __func__, chnk_id_file, in.chunk_start, in.chunk_end - 1); cancel_abt_io(&abt_tasks, &task_eventuals, chnk_id_curr); - return rpc_cleanup_respond(&handle, &in, &out, &bulk_handle); + return gkfs::rpc::cleanup_respond(&handle, &in, &out, &bulk_handle); } bulk_buf_ptrs[chnk_id_curr] = chnk_ptr; chnk_sizes[chnk_id_curr] = offset_transfer_size; @@ -244,7 +244,7 @@ static hg_return_t rpc_srv_write(hg_handle_t handle) { "{}() Failed to pull data from client. file {} chunk {} (startchunk {}; endchunk {})", __func__, *path, chnk_id_file, in.chunk_start, (in.chunk_end - 1)); cancel_abt_io(&abt_tasks, &task_eventuals, chnk_id_curr); - return rpc_cleanup_respond(&handle, &in, &out, &bulk_handle); + return gkfs::rpc::cleanup_respond(&handle, &in, &out, &bulk_handle); } bulk_buf_ptrs[chnk_id_curr] = chnk_ptr; chnk_sizes[chnk_id_curr] = transfer_size; @@ -267,7 +267,7 @@ static hg_return_t rpc_srv_write(hg_handle_t handle) { if (abt_ret != ABT_SUCCESS) { GKFS_DATA->spdlogger()->error("{}() task create failed", __func__); cancel_abt_io(&abt_tasks, &task_eventuals, chnk_id_curr + 1); - return rpc_cleanup_respond(&handle, &in, &out, &bulk_handle); + return gkfs::rpc::cleanup_respond(&handle, &in, &out, &bulk_handle); } // next chunk chnk_id_curr++; @@ -315,7 +315,7 @@ static hg_return_t rpc_srv_write(hg_handle_t handle) { * 5. Respond and cleanup */ GKFS_DATA->spdlogger()->debug("{}() Sending output response {}", __func__, out.err); - ret = rpc_cleanup_respond(&handle, &in, &out, &bulk_handle); + ret = gkfs::rpc::cleanup_respond(&handle, &in, &out, &bulk_handle); // free tasks after responding for (auto&& task : abt_tasks) { ABT_task_join(task); @@ -340,7 +340,7 @@ static hg_return_t rpc_srv_read(hg_handle_t handle) { auto ret = margo_get_input(handle, &in); if (ret != HG_SUCCESS) { GKFS_DATA->spdlogger()->error("{}() Could not get RPC input data with err {}", __func__, ret); - return rpc_cleanup_respond(&handle, &in, &out, &bulk_handle); + return gkfs::rpc::cleanup_respond(&handle, &in, &out, &bulk_handle); } auto hgi = margo_get_info(handle); auto mid = margo_hg_info_get_instance(hgi); @@ -357,7 +357,7 @@ static hg_return_t rpc_srv_read(hg_handle_t handle) { ret = margo_bulk_create(mid, 1, nullptr, &in.total_chunk_size, HG_BULK_READWRITE, &bulk_handle); if (ret != HG_SUCCESS) { GKFS_DATA->spdlogger()->error("{}() Failed to create bulk handle", __func__); - return rpc_cleanup_respond(&handle, &in, &out, static_cast(nullptr)); + return gkfs::rpc::cleanup_respond(&handle, &in, &out, static_cast(nullptr)); } // access the internally allocated memory buffer and put it into buf_ptrs uint32_t actual_count; @@ -365,7 +365,7 @@ static hg_return_t rpc_srv_read(hg_handle_t handle) { &in.total_chunk_size, &actual_count); if (ret != HG_SUCCESS || actual_count != 1) { GKFS_DATA->spdlogger()->error("{}() Failed to access allocated buffer from bulk handle", __func__); - return rpc_cleanup_respond(&handle, &in, &out, &bulk_handle); + return gkfs::rpc::cleanup_respond(&handle, &in, &out, &bulk_handle); } auto const host_id = in.host_id; auto const host_size = in.host_size; @@ -448,7 +448,7 @@ static hg_return_t rpc_srv_read(hg_handle_t handle) { if (abt_ret != ABT_SUCCESS) { GKFS_DATA->spdlogger()->error("{}() task create failed", __func__); cancel_abt_io(&abt_tasks, &task_eventuals, chnk_id_curr + 1); - return rpc_cleanup_respond(&handle, &in, &out, &bulk_handle); + return gkfs::rpc::cleanup_respond(&handle, &in, &out, &bulk_handle); } chnk_id_curr++; } @@ -505,7 +505,7 @@ static hg_return_t rpc_srv_read(hg_handle_t handle) { * 5. Respond and cleanup */ GKFS_DATA->spdlogger()->debug("{}() Sending output response, err: {}", __func__, out.err); - ret = rpc_cleanup_respond(&handle, &in, &out, &bulk_handle); + ret = gkfs::rpc::cleanup_respond(&handle, &in, &out, &bulk_handle); // free tasks after responding cancel_abt_io(&abt_tasks, &task_eventuals, in.chunk_n); return ret; diff --git a/src/daemon/handler/srv_metadata.cpp b/src/daemon/handler/srv_metadata.cpp index 31c8fc225..ce6de804f 100644 --- a/src/daemon/handler/srv_metadata.cpp +++ b/src/daemon/handler/srv_metadata.cpp @@ -13,11 +13,11 @@ #include +#include #include #include #include -#include using namespace std; @@ -319,7 +319,7 @@ static hg_return_t rpc_srv_get_dirents(hg_handle_t handle) { if (entries.empty()) { out.err = 0; - return rpc_cleanup_respond(&handle, &in, &out, &bulk_handle); + return gkfs::rpc::cleanup_respond(&handle, &in, &out, &bulk_handle); } //Calculate total output size @@ -334,7 +334,7 @@ static hg_return_t rpc_srv_get_dirents(hg_handle_t handle) { //Source buffer is smaller than total output size GKFS_DATA->spdlogger()->error("{}() Entries do not fit source buffer", __func__); out.err = ENOBUFS; - return rpc_cleanup_respond(&handle, &in, &out, &bulk_handle); + return gkfs::rpc::cleanup_respond(&handle, &in, &out, &bulk_handle); } //Serialize output data on local buffer @@ -357,7 +357,7 @@ static hg_return_t rpc_srv_get_dirents(hg_handle_t handle) { if (ret != HG_SUCCESS) { GKFS_DATA->spdlogger()->error("{}() Failed to create bulk handle", __func__); out.err = EBUSY; - return rpc_cleanup_respond(&handle, &in, &out, &bulk_handle); + return gkfs::rpc::cleanup_respond(&handle, &in, &out, &bulk_handle); } ret = margo_bulk_transfer(mid, HG_BULK_PUSH, hgi->addr, @@ -369,14 +369,14 @@ static hg_return_t rpc_srv_get_dirents(hg_handle_t handle) { "{}() Failed push dirents on path {} to client", __func__, in.path ); - return rpc_cleanup_respond(&handle, &in, &out, &bulk_handle); + return gkfs::rpc::cleanup_respond(&handle, &in, &out, &bulk_handle); } out.dirents_size = entries.size(); out.err = 0; GKFS_DATA->spdlogger()->debug( "{}() Sending output response", __func__); - return rpc_cleanup_respond(&handle, &in, &out, &bulk_handle); + return gkfs::rpc::cleanup_respond(&handle, &in, &out, &bulk_handle); } DEFINE_MARGO_RPC_HANDLER(rpc_srv_get_dirents) diff --git a/src/daemon/util.cpp b/src/daemon/util.cpp index 636005975..4090aa933 100644 --- a/src/daemon/util.cpp +++ b/src/daemon/util.cpp @@ -13,7 +13,7 @@ #include #include -#include +#include #include #include diff --git a/src/global/rpc/rpc_utils.cpp b/src/global/rpc/rpc_util.cpp similarity index 98% rename from src/global/rpc/rpc_utils.cpp rename to src/global/rpc/rpc_util.cpp index dd06d3a4d..8a38fa597 100644 --- a/src/global/rpc/rpc_utils.cpp +++ b/src/global/rpc/rpc_util.cpp @@ -12,7 +12,7 @@ */ -#include +#include extern "C" { #include -- GitLab From 8a4c292cbffca724aa7ca50bd3805a3a21265182 Mon Sep 17 00:00:00 2001 From: Marc Vef Date: Fri, 21 Feb 2020 10:22:06 +0100 Subject: [PATCH 20/25] Fix path bug in compile script --- scripts/compile_dep.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/compile_dep.sh b/scripts/compile_dep.sh index f76e61319..9b8335ab6 100755 --- a/scripts/compile_dep.sh +++ b/scripts/compile_dep.sh @@ -290,7 +290,7 @@ if check_dependency "capstone" "${DEP_CONFIG[@]}"; then CURR=${SOURCE}/capstone prepare_build_dir "${CURR}" cd "${CURR}"/build - $CMAKE -DCMAKE_INSTALL_PREFIX=/home/vef/gekkofs_deps/install -DCMAKE_BUILD_TYPE:STRING=Release .. + $CMAKE -DCMAKE_INSTALL_PREFIX="${INSTALL}" -DCMAKE_BUILD_TYPE:STRING=Release .. make -j"${CORES}" install fi -- GitLab From 8f63862e921553b2eac27792fdaff99e7264a032 Mon Sep 17 00:00:00 2001 From: Marc Vef Date: Mon, 24 Feb 2020 18:56:31 +0100 Subject: [PATCH 21/25] Reformating code for consistency --- include/client/gkfs_functions.hpp | 72 +++--- include/client/make_array.hpp | 18 +- include/client/preload_util.hpp | 14 +- include/daemon/util.hpp | 8 +- include/global/chunk_calc_util.hpp | 2 +- include/global/env_util.hpp | 6 +- include/global/log_util.hpp | 8 +- include/global/rpc/rpc_types.hpp | 33 +-- src/client/logging.cpp | 323 +++++++++++++------------- src/daemon/backend/metadata/merge.cpp | 2 +- src/daemon/daemon.cpp | 14 +- 11 files changed, 252 insertions(+), 248 deletions(-) diff --git a/include/client/gkfs_functions.hpp b/include/client/gkfs_functions.hpp index c830b379e..5aa78a405 100644 --- a/include/client/gkfs_functions.hpp +++ b/include/client/gkfs_functions.hpp @@ -36,77 +36,77 @@ using sys_statfs = struct statfs; using sys_statvfs = struct statvfs; namespace gkfs { - namespace func { +namespace func { - std::shared_ptr metadata(const std::string& path, bool follow_links = false); +std::shared_ptr metadata(const std::string& path, bool follow_links = false); - int check_parent_dir(const std::string& path); +int check_parent_dir(const std::string& path); - int open(const std::string& path, mode_t mode, int flags); +int open(const std::string& path, mode_t mode, int flags); - int mk_node(const std::string& path, mode_t mode); +int mk_node(const std::string& path, mode_t mode); - int rm_node(const std::string& path); +int rm_node(const std::string& path); - int access(const std::string& path, int mask, bool follow_links = true); +int access(const std::string& path, int mask, bool follow_links = true); - int stat(const std::string& path, struct stat* buf, bool follow_links = true); +int stat(const std::string& path, struct stat* buf, bool follow_links = true); - int statfs(sys_statfs* buf); +int statfs(sys_statfs* buf); - int statvfs(sys_statvfs* buf); +int statvfs(sys_statvfs* buf); - off64_t lseek(unsigned int fd, off64_t offset, unsigned int whence); +off64_t lseek(unsigned int fd, off64_t offset, unsigned int whence); - off64_t lseek(std::shared_ptr gkfs_fd, off64_t offset, unsigned int whence); +off64_t lseek(std::shared_ptr gkfs_fd, off64_t offset, unsigned int whence); - int truncate(const std::string& path, off_t offset); +int truncate(const std::string& path, off_t offset); - int truncate(const std::string& path, off_t old_size, off_t new_size); +int truncate(const std::string& path, off_t old_size, off_t new_size); - int dup(int oldfd); +int dup(int oldfd); - int dup2(int oldfd, int newfd); +int dup2(int oldfd, int newfd); #ifdef HAS_SYMLINKS - int mk_symlink(const std::string& path, const std::string& target_path); +int mk_symlink(const std::string& path, const std::string& target_path); - int readlink(const std::string& path, char* buf, int bufsize); +int readlink(const std::string& path, char* buf, int bufsize); #endif - ssize_t pwrite(std::shared_ptr file, - const char* buf, size_t count, off64_t offset); +ssize_t pwrite(std::shared_ptr file, + const char* buf, size_t count, off64_t offset); - ssize_t pwrite_ws(int fd, const void* buf, size_t count, off64_t offset); +ssize_t pwrite_ws(int fd, const void* buf, size_t count, off64_t offset); - ssize_t write(int fd, const void* buf, size_t count); +ssize_t write(int fd, const void* buf, size_t count); - ssize_t pwritev(int fd, const struct iovec* iov, int iovcnt, off_t offset); +ssize_t pwritev(int fd, const struct iovec* iov, int iovcnt, off_t offset); - ssize_t writev(int fd, const struct iovec* iov, int iovcnt); +ssize_t writev(int fd, const struct iovec* iov, int iovcnt); - ssize_t pread(std::shared_ptr file, char* buf, size_t count, off64_t offset); +ssize_t pread(std::shared_ptr file, char* buf, size_t count, off64_t offset); - ssize_t pread_ws(int fd, void* buf, size_t count, off64_t offset); +ssize_t pread_ws(int fd, void* buf, size_t count, off64_t offset); - ssize_t read(int fd, void* buf, size_t count); +ssize_t read(int fd, void* buf, size_t count); - int opendir(const std::string& path); +int opendir(const std::string& path); - int getdents(unsigned int fd, - struct linux_dirent* dirp, - unsigned int count); +int getdents(unsigned int fd, + struct linux_dirent* dirp, + unsigned int count); - int getdents64(unsigned int fd, - struct linux_dirent64* dirp, - unsigned int count); +int getdents64(unsigned int fd, + struct linux_dirent64* dirp, + unsigned int count); - int rmdir(const std::string& path); - } +int rmdir(const std::string& path); +} } #endif //GEKKOFS_GKFS_FUNCTIONS_HPP diff --git a/include/client/make_array.hpp b/include/client/make_array.hpp index 5a971f35b..60f308270 100644 --- a/include/client/make_array.hpp +++ b/include/client/make_array.hpp @@ -17,16 +17,16 @@ namespace gkfs { namespace util { -template -constexpr auto make_array(T&&... values) -> - std::array< - typename std::decay< - typename std::common_type::type>::type, - sizeof...(T)> { - return std::array< +template +constexpr auto make_array(T&& ... values) -> +std::array< typename std::decay< - typename std::common_type::type>::type, - sizeof...(T)>{std::forward(values)...}; + typename std::common_type::type>::type, + sizeof...(T)> { + return std::array< + typename std::decay< + typename std::common_type::type>::type, + sizeof...(T)>{std::forward(values)...}; } } // namespace util diff --git a/include/client/preload_util.hpp b/include/client/preload_util.hpp index 4382e8a04..1c411a372 100644 --- a/include/client/preload_util.hpp +++ b/include/client/preload_util.hpp @@ -43,17 +43,17 @@ extern std::unique_ptr ld_network_service; // function definitions namespace gkfs { namespace util { - template - constexpr typename std::underlying_type::type to_underlying(E e) { - return static_cast::type>(e); - } +template +constexpr typename std::underlying_type::type to_underlying(E e) { + return static_cast::type>(e); +} int metadata_to_stat(const std::string& path, const gkfs::metadata::Metadata& md, struct stat& attr); - std::vector> load_hostfile(const std::string& lfpath); +std::vector> load_hostfile(const std::string& lfpath); - void load_hosts(); - } +void load_hosts(); +} } #endif //GEKKOFS_PRELOAD_UTIL_HPP diff --git a/include/daemon/util.hpp b/include/daemon/util.hpp index e2a6b910a..ceb5f70d5 100644 --- a/include/daemon/util.hpp +++ b/include/daemon/util.hpp @@ -15,11 +15,11 @@ #define GEKKOFS_DAEMON_UTIL_HPP namespace gkfs { - namespace util { - void populate_hosts_file(); +namespace util { +void populate_hosts_file(); - void destroy_hosts_file(); - } +void destroy_hosts_file(); +} } #endif //GEKKOFS_DAEMON_UTIL_HPP diff --git a/include/global/chunk_calc_util.hpp b/include/global/chunk_calc_util.hpp index 4e6a51597..20696fb81 100644 --- a/include/global/chunk_calc_util.hpp +++ b/include/global/chunk_calc_util.hpp @@ -29,7 +29,7 @@ inline int log2(uint64_t n) { 0, 58, 1, 59, 47, 53, 2, 60, 39, 48, 27, 54, 33, 42, 3, 61, 51, 37, 40, 49, 18, 28, 20, 55, 30, 34, 11, 43, 14, 22, 4, 62, 57, 46, 52, 38, 26, 32, 41, 50, 36, 17, 19, 29, 10, 13, 21, 56, - 45, 25, 31, 35, 16, 9, 12, 44, 24, 15, 8, 23, 7, 6, 5, 63 }; + 45, 25, 31, 35, 16, 9, 12, 44, 24, 15, 8, 23, 7, 6, 5, 63}; n |= n >> 1; n |= n >> 2; diff --git a/include/global/env_util.hpp b/include/global/env_util.hpp index 3c4263eb0..098d8a427 100644 --- a/include/global/env_util.hpp +++ b/include/global/env_util.hpp @@ -17,11 +17,11 @@ #include namespace gkfs { - namespace env { +namespace env { - std::string get_var(const std::string& name, const std::string& default_value = ""); +std::string get_var(const std::string& name, const std::string& default_value = ""); - } // namespace env +} // namespace env } // namespace gkfs #endif // GKFS_COMMON_ENV_UTIL_HPP diff --git a/include/global/log_util.hpp b/include/global/log_util.hpp index 6ef05d3ef..ea70b34ca 100644 --- a/include/global/log_util.hpp +++ b/include/global/log_util.hpp @@ -19,12 +19,12 @@ namespace gkfs { namespace log { - spdlog::level::level_enum get_level(std::string level_str); +spdlog::level::level_enum get_level(std::string level_str); - spdlog::level::level_enum get_level(unsigned long level); +spdlog::level::level_enum get_level(unsigned long level); - void setup(const std::vector& loggers, spdlog::level::level_enum level, const std::string& path); - } +void setup(const std::vector& loggers, spdlog::level::level_enum level, const std::string& path); +} } #endif diff --git a/include/global/rpc/rpc_types.hpp b/include/global/rpc/rpc_types.hpp index 5fbc68375..5e6d57f12 100644 --- a/include/global/rpc/rpc_types.hpp +++ b/include/global/rpc/rpc_types.hpp @@ -39,7 +39,7 @@ MERCURY_GEN_PROC(rpc_rm_node_in_t, ((hg_const_string_t) (path))) MERCURY_GEN_PROC(rpc_trunc_in_t, ((hg_const_string_t) (path)) \ - ((hg_uint64_t) (length))) +((hg_uint64_t) (length))) MERCURY_GEN_PROC(rpc_update_metadentry_in_t, ((hg_const_string_t) (path))\ @@ -73,17 +73,18 @@ MERCURY_GEN_PROC(rpc_get_metadentry_size_out_t, ((hg_int32_t) (err)) #ifdef HAS_SYMLINKS MERCURY_GEN_PROC(rpc_mk_symlink_in_t, - ((hg_const_string_t) (path))\ - ((hg_const_string_t) (target_path)) + ((hg_const_string_t) (path))\ +((hg_const_string_t) (target_path)) ) + #endif // data MERCURY_GEN_PROC(rpc_read_data_in_t, ((hg_const_string_t) (path))\ ((int64_t) (offset))\ - ((hg_uint64_t) (host_id))\ - ((hg_uint64_t) (host_size))\ +((hg_uint64_t) (host_id))\ +((hg_uint64_t) (host_size))\ ((hg_uint64_t) (chunk_n))\ ((hg_uint64_t) (chunk_start))\ ((hg_uint64_t) (chunk_end))\ @@ -91,14 +92,14 @@ MERCURY_GEN_PROC(rpc_read_data_in_t, ((hg_bulk_t) (bulk_handle))) MERCURY_GEN_PROC(rpc_data_out_t, - ((int32_t) (err))\ + ((int32_t) (err))\ ((hg_size_t) (io_size))) MERCURY_GEN_PROC(rpc_write_data_in_t, ((hg_const_string_t) (path))\ ((int64_t) (offset))\ - ((hg_uint64_t) (host_id))\ - ((hg_uint64_t) (host_size))\ +((hg_uint64_t) (host_id))\ +((hg_uint64_t) (host_size))\ ((hg_uint64_t) (chunk_n))\ ((hg_uint64_t) (chunk_start))\ ((hg_uint64_t) (chunk_end))\ @@ -106,13 +107,13 @@ MERCURY_GEN_PROC(rpc_write_data_in_t, ((hg_bulk_t) (bulk_handle))) MERCURY_GEN_PROC(rpc_get_dirents_in_t, - ((hg_const_string_t) (path)) - ((hg_bulk_t) (bulk_handle)) + ((hg_const_string_t) (path)) + ((hg_bulk_t) (bulk_handle)) ) MERCURY_GEN_PROC(rpc_get_dirents_out_t, - ((hg_int32_t) (err)) - ((hg_size_t) (dirents_size)) + ((hg_int32_t) (err)) + ((hg_size_t) (dirents_size)) ) @@ -129,13 +130,13 @@ MERCURY_GEN_PROC(rpc_config_out_t, ((hg_const_string_t) (mountdir)) MERCURY_GEN_PROC(rpc_chunk_stat_in_t, - ((hg_int32_t) (dummy)) + ((hg_int32_t) (dummy)) ) MERCURY_GEN_PROC(rpc_chunk_stat_out_t, - ((hg_uint64_t) (chunk_size)) - ((hg_uint64_t) (chunk_total)) - ((hg_uint64_t) (chunk_free)) + ((hg_uint64_t) (chunk_size)) + ((hg_uint64_t) (chunk_total)) + ((hg_uint64_t) (chunk_free)) ) #endif //LFS_RPC_TYPES_HPP diff --git a/src/client/logging.cpp b/src/client/logging.cpp index a79c5b30c..207ab935e 100644 --- a/src/client/logging.cpp +++ b/src/client/logging.cpp @@ -23,7 +23,9 @@ extern "C" { } #ifdef GKFS_ENABLE_LOGGING + #include + #endif namespace gkfs { @@ -41,87 +43,87 @@ struct opt_info { static const auto constexpr debug_opts = util::make_array( - opt_info{STR_AND_LEN("none"), - {"don't print any messages"}, - log::none}, + opt_info{STR_AND_LEN("none"), + {"don't print any messages"}, + log::none}, #ifdef GKFS_DEBUG_BUILD - opt_info{STR_AND_LEN("syscalls"), - {"Trace system calls: print the name of each system call,", - "its arguments, and its return value. All system calls are", - "printed after being executed save for those that may not", - "return, such as execve() and execve_at()", - "[ default: off ]"}, - log::syscall}, - - opt_info{STR_AND_LEN("syscalls_at_entry"), - {"Trace system calls: print the name of each system call", - "and its arguments. All system calls are printed before ", - "being executed and therefore their return values are not", - "available in the log", - "[ default: off ]"}, - log::syscall_at_entry}, + opt_info{STR_AND_LEN("syscalls"), + {"Trace system calls: print the name of each system call,", + "its arguments, and its return value. All system calls are", + "printed after being executed save for those that may not", + "return, such as execve() and execve_at()", + "[ default: off ]"}, + log::syscall}, + + opt_info{STR_AND_LEN("syscalls_at_entry"), + {"Trace system calls: print the name of each system call", + "and its arguments. All system calls are printed before ", + "being executed and therefore their return values are not", + "available in the log", + "[ default: off ]"}, + log::syscall_at_entry}, #endif // !GKFS_DEBUG_BUILD - opt_info{STR_AND_LEN("info"), - {"Print information messages", - "[ default: on ]"}, - log::info}, + opt_info{STR_AND_LEN("info"), + {"Print information messages", + "[ default: on ]"}, + log::info}, - opt_info{STR_AND_LEN("critical"), - {"Print critical errors", - "[ default: on ]"}, - log::critical}, + opt_info{STR_AND_LEN("critical"), + {"Print critical errors", + "[ default: on ]"}, + log::critical}, - opt_info{STR_AND_LEN("errors"), - {"Print errors", - "[ default: on ]"}, - log::error}, + opt_info{STR_AND_LEN("errors"), + {"Print errors", + "[ default: on ]"}, + log::error}, - opt_info{STR_AND_LEN("warnings"), - {"Print warnings", - "[ default: on ]"}, - log::warning}, + opt_info{STR_AND_LEN("warnings"), + {"Print warnings", + "[ default: on ]"}, + log::warning}, - opt_info{STR_AND_LEN("hermes"), - {"Print messages from Hermes (GekkoFS high-level RPC library)", - "[ default: on ]"}, - log::hermes}, + opt_info{STR_AND_LEN("hermes"), + {"Print messages from Hermes (GekkoFS high-level RPC library)", + "[ default: on ]"}, + log::hermes}, - opt_info{STR_AND_LEN("mercury"), - {"Print messages from Mercury (GekkoFS low-level RPC library)", - "[ default: on ]"}, - log::mercury}, + opt_info{STR_AND_LEN("mercury"), + {"Print messages from Mercury (GekkoFS low-level RPC library)", + "[ default: on ]"}, + log::mercury}, #ifdef GKFS_DEBUG_BUILD - opt_info{STR_AND_LEN("debug"), - {"Print debug messages", - "[ default: off ]"}, - log::debug}, + opt_info{STR_AND_LEN("debug"), + {"Print debug messages", + "[ default: off ]"}, + log::debug}, - opt_info{STR_AND_LEN("most"), - {"All previous options except 'syscalls_at_entry' combined."}, - log::most }, + opt_info{STR_AND_LEN("most"), + {"All previous options except 'syscalls_at_entry' combined."}, + log::most}, #endif // !GKFS_DEBUG_BUILD - opt_info{STR_AND_LEN("all"), - {"All previous options combined."}, - log::all }, + opt_info{STR_AND_LEN("all"), + {"All previous options combined."}, + log::all}, - opt_info{STR_AND_LEN("help"), - {"Print this help message and exit."}, - log::help} + opt_info{STR_AND_LEN("help"), + {"Print this help message and exit."}, + log::help} ); -static const auto constexpr max_debug_opt_length = - sizeof("syscalls_at_entry") - 1; +static const auto constexpr max_debug_opt_length = + sizeof("syscalls_at_entry") - 1; static const auto constexpr max_help_text_rows = - sizeof(debug_opts[0].help_text_) / sizeof(debug_opts[0].help_text_[0]); + sizeof(debug_opts[0].help_text_) / sizeof(debug_opts[0].help_text_[0]); /** * process_log_options -- process the string given as parameter to determine @@ -147,45 +149,45 @@ process_log_options(const std::string gkfs_debug) { // skip separating white spaces and commas boost::split(tokens, gkfs_debug, boost::is_any_of(" ,")); - for(const auto& t : tokens) { + for (const auto& t : tokens) { bool is_known = false; - for(const auto& opt : debug_opts) { + for (const auto& opt : debug_opts) { // none disables any future and previous flags observed - if(t == "none") { + if (t == "none") { return log::none; } - if(t == opt.name_) { + if (t == opt.name_) { dm |= opt.mask_; is_known = true; break; } } - if(!is_known) { + if (!is_known) { logger::log_message(stdout, "warning: logging option '{}' unknown; " - "try {}=help", t, gkfs::env::LOG); + "try {}=help", t, gkfs::env::LOG); } } - if(!!(dm & log::help)) { + if (!!(dm & log::help)) { logger::log_message(stdout, "Valid options for the {} " - "environment variable are:\n", gkfs::env::LOG); + "environment variable are:\n", gkfs::env::LOG); - for(const auto& opt : debug_opts) { + for (const auto& opt : debug_opts) { const auto padding = max_debug_opt_length - opt.length_ + 2; - logger::log_message(stdout, " {}{:>{}}{}", opt.name_, "", + logger::log_message(stdout, " {}{:>{}}{}", opt.name_, "", padding, opt.help_text_[0]); - for(auto i = 1lu; i < max_help_text_rows; ++i) { - if(opt.help_text_[i][0] != 0) { - logger::log_message(stdout, " {:>{}}{}", "", - max_debug_opt_length + 2, + for (auto i = 1lu; i < max_help_text_rows; ++i) { + if (opt.help_text_[i][0] != 0) { + logger::log_message(stdout, " {:>{}}{}", "", + max_debug_opt_length + 2, opt.help_text_[i]); } } @@ -194,10 +196,10 @@ process_log_options(const std::string gkfs_debug) { } logger::log_message(stdout, "\n" - "To direct the logging output into a file " - "instead of standard output\n" - "a filename can be specified using the " - "{} environment variable.", gkfs::env::LOG_OUTPUT); + "To direct the logging output into a file " + "instead of standard output\n" + "a filename can be specified using the " + "{} environment variable.", gkfs::env::LOG_OUTPUT); ::_exit(0); } @@ -205,46 +207,48 @@ process_log_options(const std::string gkfs_debug) { } #ifdef GKFS_DEBUG_BUILD + std::bitset<512> process_log_filter(const std::string& log_filter) { std::bitset<512> filtered_syscalls; std::vector tokens; - if(log_filter.empty()) { + if (log_filter.empty()) { return filtered_syscalls; } // skip separating white spaces and commas - boost::split(tokens, log_filter, - [](char c) { return c == ' ' || c == ','; }); + boost::split(tokens, log_filter, + [](char c) { return c == ' ' || c == ','; }); - for(const auto& t : tokens) { + for (const auto& t : tokens) { const auto sc = syscall::lookup_by_name(t); - if(std::strcmp(sc.name(), "unknown_syscall") == 0) { + if (std::strcmp(sc.name(), "unknown_syscall") == 0) { logger::log_message(stdout, "warning: system call '{}' unknown; " - "will not filter", t); + "will not filter", t); continue; } - + filtered_syscalls.set(sc.number()); } return filtered_syscalls; } + #endif // GKFS_DEBUG_BUILD -logger::logger(const std::string& opts, - const std::string& path, +logger::logger(const std::string& opts, + const std::string& path, bool trunc #ifdef GKFS_DEBUG_BUILD - , + , const std::string& filter, int verbosity #endif - ) : - timezone_(nullptr) { +) : + timezone_(nullptr) { /* use stderr by default */ log_fd_ = 2; @@ -255,22 +259,22 @@ logger::logger(const std::string& opts, debug_verbosity_ = verbosity; #endif - if(!path.empty()) { - int flags = O_CREAT | O_RDWR | O_APPEND | O_TRUNC; + if (!path.empty()) { + int flags = O_CREAT | O_RDWR | O_APPEND | O_TRUNC; - if(!trunc) { - flags &= ~O_TRUNC; + if (!trunc) { + flags &= ~O_TRUNC; } // we use ::open() here rather than ::syscall_no_intercept(SYS_open) // because we want the call to be intercepted by our hooks, which // allows us to categorize the resulting fd as 'internal' and // relocate it to our private range - int fd = ::open(path.c_str(), flags, 0600); + int fd = ::open(path.c_str(), flags, 0600); - if(fd == -1) { + if (fd == -1) { log(gkfs::log::error, __func__, __LINE__, "Failed to open log " - "file '{}'. Logging will fall back to stderr", path); + "file '{}'. Logging will fall back to stderr", path); return; } @@ -297,75 +301,75 @@ logger::logger(const std::string& opts, timezone_ = date::current_zone(); #ifdef GKFS_DEBUG_BUILD using namespace date; - timezone_->get_info(date::sys_days{January/1/1970}); + timezone_->get_info(date::sys_days{January / 1 / 1970}); #endif // GKFS_DEBUG_BUILD } - catch(const std::exception& ex) { + catch (const std::exception& ex) { // if timezone initialization fails, setting timezone_ to nullptr // makes format_timestamp_to() default to producing epoch timestamps timezone_ = nullptr; } #ifdef GKFS_ENABLE_LOGGING - const auto log_hermes_message = - [](const std::string& msg, hermes::log::level l, int severity, - const std::string& file, const std::string& func, int lineno) { - - const auto name = [](hermes::log::level l, int severity) { - using namespace std::string_literals; - - switch(l) { - case hermes::log::info: - return "info"s; - case hermes::log::warning: - return "warning"s; - case hermes::log::error: - return "error"s; - case hermes::log::fatal: - return "fatal"s; - case hermes::log::mercury: - return "mercury"s; - default: - return "unknown"s; - } - }; - - LOG(HERMES, "[{}] {}", name(l, severity), msg); - }; + const auto log_hermes_message = + [](const std::string& msg, hermes::log::level l, int severity, + const std::string& file, const std::string& func, int lineno) { + + const auto name = [](hermes::log::level l, int severity) { + using namespace std::string_literals; + + switch (l) { + case hermes::log::info: + return "info"s; + case hermes::log::warning: + return "warning"s; + case hermes::log::error: + return "error"s; + case hermes::log::fatal: + return "fatal"s; + case hermes::log::mercury: + return "mercury"s; + default: + return "unknown"s; + } + }; + + LOG(HERMES, "[{}] {}", name(l, severity), msg); + }; #ifdef GKFS_DEBUG_BUILD - const auto log_hermes_debug_message = - [this](const std::string& msg, hermes::log::level l, - int severity, const std::string& file, - const std::string& func, int lineno) { + const auto log_hermes_debug_message = + [this](const std::string& msg, hermes::log::level l, + int severity, const std::string& file, + const std::string& func, int lineno) { - if(severity > debug_verbosity_) { - return; - } + if (severity > debug_verbosity_) { + return; + } - LOG(HERMES, "[debug{}] <{}():{}> {}", - (severity == 0 ? "" : std::to_string(severity + 1)), - func, lineno, msg); - }; + LOG(HERMES, "[debug{}] <{}():{}> {}", + (severity == 0 ? "" : std::to_string(severity + 1)), + func, lineno, msg); + }; #endif // GKFS_DEBUG_BUILD - const auto log_hg_message = - [](const std::string& msg, hermes::log::level l, int severity, - const std::string& file, const std::string& func, int lineno) { + const auto log_hg_message = + [](const std::string& msg, hermes::log::level l, int severity, + const std::string& file, const std::string& func, int lineno) { - (void) l; + (void) l; - // mercury message might contain one or more sub-messages - // separated by '\n' - std::vector sub_msgs; - boost::split(sub_msgs, msg, boost::is_any_of("\n"), boost::token_compress_on); + // mercury message might contain one or more sub-messages + // separated by '\n' + std::vector sub_msgs; + boost::split(sub_msgs, msg, boost::is_any_of("\n"), boost::token_compress_on); - for(const auto& m : sub_msgs) { - if(!m.empty()) { - LOG(MERCURY, "{}", m); - } - } - }; + for (const auto& m : sub_msgs) { + if (!m.empty()) { + LOG(MERCURY, "{}", m); + } + } + }; // register log callbacks into hermes so that we can manage // both its and mercury's log messages @@ -393,7 +397,7 @@ logger::~logger() { void logger::log_syscall(syscall::info info, - const long syscall_number, + const long syscall_number, const long args[6], boost::optional result) { @@ -402,12 +406,12 @@ logger::log_syscall(syscall::info info, const bool log_syscall_result = !!(log::syscall & log_mask_); // log the syscall if and only if logging for syscalls is enabled - if(!log_syscall_entry && !log_syscall_result) { + if (!log_syscall_entry && !log_syscall_result) { return; } #ifdef GKFS_DEBUG_BUILD - if(filtered_syscalls_[syscall_number]) { + if (filtered_syscalls_[syscall_number]) { return; } #endif @@ -415,45 +419,44 @@ logger::log_syscall(syscall::info info, // log the syscall even if we don't have information on it, since it may // be important to the user (we assume that the syscall has completed // though) - if(info == syscall::no_info) { + if (info == syscall::no_info) { goto print_syscall; } // log the syscall entry if the syscall may not return (e.g. execve) or // if we are sure that it won't ever return (e.g. exit), even if // log::syscall_at_entry is disabled - if(syscall::may_not_return(syscall_number) || - syscall::never_returns(syscall_number)) { + if (syscall::may_not_return(syscall_number) || + syscall::never_returns(syscall_number)) { goto print_syscall; } - if(log_syscall_entry && syscall::execution_is_pending(info)) { + if (log_syscall_entry && syscall::execution_is_pending(info)) { goto print_syscall; } - if(log_syscall_result && !syscall::execution_is_pending(info)) { + if (log_syscall_result && !syscall::execution_is_pending(info)) { goto print_syscall; } return; -print_syscall: + print_syscall: static_buffer buffer; detail::format_timestamp_to(buffer, timezone_); detail::format_syscall_info_to(buffer, info); - if(result) { + if (result) { syscall::decode(buffer, syscall_number, args, *result); - } - else { + } else { syscall::decode(buffer, syscall_number, args); } fmt::format_to(buffer, "\n"); - ::syscall_no_intercept(SYS_write, log_fd_, buffer.data(), buffer.size()); + ::syscall_no_intercept(SYS_write, log_fd_, buffer.data(), buffer.size()); } } // namespace log diff --git a/src/daemon/backend/metadata/merge.cpp b/src/daemon/backend/metadata/merge.cpp index 067b08354..68cf69c41 100644 --- a/src/daemon/backend/metadata/merge.cpp +++ b/src/daemon/backend/metadata/merge.cpp @@ -55,7 +55,7 @@ IncreaseSizeOperand::IncreaseSizeOperand(const rdb::Slice& serialized_op) { //Parse append flag assert(serialized_op[chrs_parsed] == false_char || - serialized_op[chrs_parsed] == true_char); + serialized_op[chrs_parsed] == true_char); append = serialized_op[chrs_parsed] != false_char; //check that we consumed all the input string assert(chrs_parsed + 1 == serialized_op.size()); diff --git a/src/daemon/daemon.cpp b/src/daemon/daemon.cpp index e199b9f1a..5a198dbe4 100644 --- a/src/daemon/daemon.cpp +++ b/src/daemon/daemon.cpp @@ -150,7 +150,7 @@ void init_io_tasklet_pool() { vector xstreams(xstreams_num); for (unsigned int i = 0; i < xstreams_num; ++i) { ret = ABT_xstream_create_basic(ABT_SCHED_BASIC_WAIT, 1, &pool, - ABT_SCHED_CONFIG_NULL, &xstreams[i]); + ABT_SCHED_CONFIG_NULL, &xstreams[i]); if (ret != ABT_SUCCESS) { throw runtime_error("Failed to create task execution streams for I/O operations"); } @@ -160,7 +160,7 @@ void init_io_tasklet_pool() { RPC_DATA->io_pool(pool); } -void init_rpc_server(const string & protocol_port) { +void init_rpc_server(const string& protocol_port) { hg_addr_t addr_self; hg_size_t addr_self_cstring_sz = 128; char addr_self_cstring[128]; @@ -262,9 +262,9 @@ void initialize_loggers() { } auto logger_names = std::vector{ - "main", - "MetadataDB", - "ChunkStorage", + "main", + "MetadataDB", + "ChunkStorage", }; gkfs::log::setup(logger_names, level, path); @@ -281,8 +281,8 @@ int main(int argc, const char* argv[]) { ("metadir,i", po::value(), "metadata directory, if not set rootdir is used for metadata ") ("listen,l", po::value(), "Address or interface to bind the daemon on. Default: local hostname") ("hosts-file,H", po::value(), - "Shared file used by deamons to register their " - "enpoints. (default './gkfs_hosts.txt')") + "Shared file used by deamons to register their " + "enpoints. (default './gkfs_hosts.txt')") ("version,h", "print version and exit"); po::variables_map vm; po::store(po::parse_command_line(argc, argv, desc), vm); -- GitLab From 75a623eb0d60c43fddfb558bc3e65c4cb4e93207 Mon Sep 17 00:00:00 2001 From: Marc Vef Date: Mon, 24 Feb 2020 20:19:43 +0100 Subject: [PATCH 22/25] Renaming gkfs::func to gkfs::sys_call + function renaming --- include/client/gkfs_functions.hpp | 72 +++++++-------- include/client/preload_util.hpp | 7 +- src/client/gkfs_functions.cpp | 142 ++++++++++++++---------------- src/client/hooks.cpp | 55 ++++++------ src/client/preload_util.cpp | 107 ++++++++++++++-------- 5 files changed, 202 insertions(+), 181 deletions(-) diff --git a/include/client/gkfs_functions.hpp b/include/client/gkfs_functions.hpp index 5aa78a405..53faf923f 100644 --- a/include/client/gkfs_functions.hpp +++ b/include/client/gkfs_functions.hpp @@ -36,77 +36,73 @@ using sys_statfs = struct statfs; using sys_statvfs = struct statvfs; namespace gkfs { -namespace func { +namespace syscall { +int gkfs_open(const std::string& path, mode_t mode, int flags); -std::shared_ptr metadata(const std::string& path, bool follow_links = false); +int gkfs_create(const std::string& path, mode_t mode); -int check_parent_dir(const std::string& path); +int gkfs_remove(const std::string& path); -int open(const std::string& path, mode_t mode, int flags); +int gkfs_access(const std::string& path, int mask, bool follow_links = true); -int mk_node(const std::string& path, mode_t mode); +int gkfs_stat(const std::string& path, struct stat* buf, bool follow_links = true); -int rm_node(const std::string& path); +int gkfs_statfs(sys_statfs* buf); -int access(const std::string& path, int mask, bool follow_links = true); +int gkfs_statvfs(sys_statvfs* buf); -int stat(const std::string& path, struct stat* buf, bool follow_links = true); +off64_t gkfs_lseek(unsigned int fd, off64_t offset, unsigned int whence); -int statfs(sys_statfs* buf); +off64_t gkfs_lseek(std::shared_ptr gkfs_fd, off64_t offset, unsigned int whence); -int statvfs(sys_statvfs* buf); +int gkfs_truncate(const std::string& path, off_t offset); -off64_t lseek(unsigned int fd, off64_t offset, unsigned int whence); +int gkfs_truncate(const std::string& path, off_t old_size, off_t new_size); -off64_t lseek(std::shared_ptr gkfs_fd, off64_t offset, unsigned int whence); +int gkfs_dup(int oldfd); -int truncate(const std::string& path, off_t offset); - -int truncate(const std::string& path, off_t old_size, off_t new_size); - -int dup(int oldfd); - -int dup2(int oldfd, int newfd); +int gkfs_dup2(int oldfd, int newfd); #ifdef HAS_SYMLINKS -int mk_symlink(const std::string& path, const std::string& target_path); +int gkfs_mk_symlink(const std::string& path, const std::string& target_path); -int readlink(const std::string& path, char* buf, int bufsize); +int gkfs_readlink(const std::string& path, char* buf, int bufsize); #endif -ssize_t pwrite(std::shared_ptr file, - const char* buf, size_t count, off64_t offset); +ssize_t gkfs_pwrite(std::shared_ptr file, + const char* buf, size_t count, off64_t offset); -ssize_t pwrite_ws(int fd, const void* buf, size_t count, off64_t offset); +ssize_t gkfs_pwrite_ws(int fd, const void* buf, size_t count, off64_t offset); -ssize_t write(int fd, const void* buf, size_t count); +ssize_t gkfs_write(int fd, const void* buf, size_t count); -ssize_t pwritev(int fd, const struct iovec* iov, int iovcnt, off_t offset); +ssize_t gkfs_pwritev(int fd, const struct iovec* iov, int iovcnt, off_t offset); -ssize_t writev(int fd, const struct iovec* iov, int iovcnt); +ssize_t gkfs_writev(int fd, const struct iovec* iov, int iovcnt); -ssize_t pread(std::shared_ptr file, char* buf, size_t count, off64_t offset); +ssize_t gkfs_pread(std::shared_ptr file, char* buf, size_t count, off64_t offset); -ssize_t pread_ws(int fd, void* buf, size_t count, off64_t offset); +ssize_t gkfs_pread_ws(int fd, void* buf, size_t count, off64_t offset); -ssize_t read(int fd, void* buf, size_t count); +ssize_t gkfs_read(int fd, void* buf, size_t count); -int opendir(const std::string& path); +int gkfs_opendir(const std::string& path); -int getdents(unsigned int fd, +int gkfs_getdents(unsigned int fd, struct linux_dirent* dirp, unsigned int count); -int getdents64(unsigned int fd, - struct linux_dirent64* dirp, - unsigned int count); +int gkfs_getdents64(unsigned int fd, + struct linux_dirent64* dirp, + unsigned int count); + +int gkfs_rmdir(const std::string& path); -int rmdir(const std::string& path); -} -} +} // namespace syscall +} // namespace gkfs #endif //GEKKOFS_GKFS_FUNCTIONS_HPP diff --git a/include/client/preload_util.hpp b/include/client/preload_util.hpp index 1c411a372..26bf0dd3b 100644 --- a/include/client/preload_util.hpp +++ b/include/client/preload_util.hpp @@ -48,12 +48,15 @@ constexpr typename std::underlying_type::type to_underlying(E e) { return static_cast::type>(e); } +std::shared_ptr get_metadata(const std::string& path, bool follow_links = false); + int metadata_to_stat(const std::string& path, const gkfs::metadata::Metadata& md, struct stat& attr); std::vector> load_hostfile(const std::string& lfpath); void load_hosts(); -} -} + +} // namespace util +} // namespace gkfs #endif //GEKKOFS_PRELOAD_UTIL_HPP diff --git a/src/client/gkfs_functions.cpp b/src/client/gkfs_functions.cpp index 0fb053297..2ca4952b6 100644 --- a/src/client/gkfs_functions.cpp +++ b/src/client/gkfs_functions.cpp @@ -34,31 +34,12 @@ extern "C" { using namespace std; -std::shared_ptr gkfs::func::metadata(const string& path, bool follow_links) { - std::string attr; - auto err = gkfs::rpc::forward_stat(path, attr); - if (err) { - return nullptr; - } -#ifdef HAS_SYMLINKS - if (follow_links) { - gkfs::metadata::Metadata md{attr}; - while (md.is_link()) { - err = gkfs::rpc::forward_stat(md.target_path(), attr); - if (err) { - return nullptr; - } - md = gkfs::metadata::Metadata{attr}; - } - } -#endif - return make_shared(attr); -} +namespace { -int gkfs::func::check_parent_dir(const std::string& path) { +int check_parent_dir(const std::string& path) { #if CREATE_CHECK_PARENTS - auto p_comp = path::dirname(path); - auto md = gkfs::func::metadata(p_comp); + auto p_comp = gkfs::path::dirname(path); + auto md = gkfs::util::get_metadata(p_comp); if (!md) { if (errno == ENOENT) { LOG(DEBUG, "Parent component does not exist: '{}'", p_comp); @@ -75,8 +56,12 @@ int gkfs::func::check_parent_dir(const std::string& path) { #endif // CREATE_CHECK_PARENTS return 0; } +} // namespace -int gkfs::func::open(const std::string& path, mode_t mode, int flags) { +namespace gkfs { +namespace syscall { + +int gkfs_open(const std::string& path, mode_t mode, int flags) { if (flags & O_PATH) { LOG(ERROR, "`O_PATH` flag is not supported"); @@ -91,7 +76,7 @@ int gkfs::func::open(const std::string& path, mode_t mode, int flags) { } bool exists = true; - auto md = gkfs::func::metadata(path); + auto md = gkfs::util::get_metadata(path); if (!md) { if (errno == ENOENT) { exists = false; @@ -118,7 +103,7 @@ int gkfs::func::open(const std::string& path, mode_t mode, int flags) { } // no access check required here. If one is using our FS they have the permissions. - if (gkfs::func::mk_node(path, mode | S_IFREG)) { + if (gkfs_create(path, mode | S_IFREG)) { LOG(ERROR, "Error creating non-existent file: '{}'", strerror(errno)); return -1; } @@ -138,12 +123,12 @@ int gkfs::func::open(const std::string& path, mode_t mode, int flags) { errno = ELOOP; return -1; } - return gkfs::func::open(md->target_path(), mode, flags); + return gkfs_open(md->target_path(), mode, flags); } #endif if (S_ISDIR(md->mode())) { - return gkfs::func::opendir(path); + return gkfs_opendir(path); } @@ -151,7 +136,7 @@ int gkfs::func::open(const std::string& path, mode_t mode, int flags) { assert(S_ISREG(md->mode())); if ((flags & O_TRUNC) && ((flags & O_RDWR) || (flags & O_WRONLY))) { - if (gkfs::func::truncate(path, md->size(), 0)) { + if (gkfs_truncate(path, md->size(), 0)) { LOG(ERROR, "Error truncating file"); return -1; } @@ -161,7 +146,7 @@ int gkfs::func::open(const std::string& path, mode_t mode, int flags) { return CTX->file_map()->add(std::make_shared(path, flags)); } -int gkfs::func::mk_node(const std::string& path, mode_t mode) { +int gkfs_create(const std::string& path, mode_t mode) { //file type must be set switch (mode & S_IFMT) { @@ -195,8 +180,8 @@ int gkfs::func::mk_node(const std::string& path, mode_t mode) { * @param path * @return */ -int gkfs::func::rm_node(const std::string& path) { - auto md = gkfs::func::metadata(path); +int gkfs_remove(const std::string& path) { + auto md = gkfs::util::get_metadata(path); if (!md) { return -1; } @@ -204,8 +189,8 @@ int gkfs::func::rm_node(const std::string& path) { return gkfs::rpc::forward_remove(path, !has_data, md->size()); } -int gkfs::func::access(const std::string& path, const int mask, bool follow_links) { - auto md = gkfs::func::metadata(path, follow_links); +int gkfs_access(const std::string& path, const int mask, bool follow_links) { + auto md = gkfs::util::get_metadata(path, follow_links); if (!md) { errno = ENOENT; return -1; @@ -213,8 +198,8 @@ int gkfs::func::access(const std::string& path, const int mask, bool follow_link return 0; } -int gkfs::func::stat(const string& path, struct stat* buf, bool follow_links) { - auto md = gkfs::func::metadata(path, follow_links); +int gkfs_stat(const string& path, struct stat* buf, bool follow_links) { + auto md = gkfs::util::get_metadata(path, follow_links); if (!md) { return -1; } @@ -222,7 +207,7 @@ int gkfs::func::stat(const string& path, struct stat* buf, bool follow_links) { return 0; } -int gkfs::func::statfs(sys_statfs* buf) { +int gkfs_statfs(sys_statfs* buf) { auto blk_stat = gkfs::rpc::forward_get_chunk_stat(); buf->f_type = 0; buf->f_bsize = blk_stat.chunk_size; @@ -239,7 +224,7 @@ int gkfs::func::statfs(sys_statfs* buf) { return 0; } -int gkfs::func::statvfs(sys_statvfs* buf) { +int gkfs_statvfs(sys_statvfs* buf) { init_ld_env_if_needed(); auto blk_stat = gkfs::rpc::forward_get_chunk_stat(); buf->f_bsize = blk_stat.chunk_size; @@ -257,11 +242,11 @@ int gkfs::func::statvfs(sys_statvfs* buf) { return 0; } -off_t gkfs::func::lseek(unsigned int fd, off_t offset, unsigned int whence) { - return gkfs::func::lseek(CTX->file_map()->get(fd), offset, whence); +off_t gkfs_lseek(unsigned int fd, off_t offset, unsigned int whence) { + return gkfs_lseek(CTX->file_map()->get(fd), offset, whence); } -off_t gkfs::func::lseek(shared_ptr gkfs_fd, off_t offset, unsigned int whence) { +off_t gkfs_lseek(shared_ptr gkfs_fd, off_t offset, unsigned int whence) { switch (whence) { case SEEK_SET: gkfs_fd->pos(offset); @@ -297,7 +282,7 @@ off_t gkfs::func::lseek(shared_ptr gkfs_fd, off_t offset, unsigned int return gkfs_fd->pos(); } -int gkfs::func::truncate(const std::string& path, off_t old_size, off_t new_size) { +int gkfs_truncate(const std::string& path, off_t old_size, off_t new_size) { assert(new_size >= 0); assert(new_size <= old_size); @@ -317,7 +302,7 @@ int gkfs::func::truncate(const std::string& path, off_t old_size, off_t new_size return 0; } -int gkfs::func::truncate(const std::string& path, off_t length) { +int gkfs_truncate(const std::string& path, off_t length) { /* TODO CONCURRENCY: * At the moment we first ask the length to the metadata-server in order to * know which data-server have data to be deleted. @@ -332,7 +317,7 @@ int gkfs::func::truncate(const std::string& path, off_t length) { return -1; } - auto md = gkfs::func::metadata(path, true); + auto md = gkfs::util::get_metadata(path, true); if (!md) { return -1; } @@ -342,18 +327,18 @@ int gkfs::func::truncate(const std::string& path, off_t length) { errno = EINVAL; return -1; } - return gkfs::func::truncate(path, size, length); + return gkfs_truncate(path, size, length); } -int gkfs::func::dup(const int oldfd) { +int gkfs_dup(const int oldfd) { return CTX->file_map()->dup(oldfd); } -int gkfs::func::dup2(const int oldfd, const int newfd) { +int gkfs_dup2(const int oldfd, const int newfd) { return CTX->file_map()->dup2(oldfd, newfd); } -ssize_t gkfs::func::pwrite(std::shared_ptr file, const char* buf, size_t count, off64_t offset) { +ssize_t gkfs_pwrite(std::shared_ptr file, const char* buf, size_t count, off64_t offset) { if (file->type() != FileType::regular) { assert(file->type() == FileType::directory); LOG(WARNING, "Cannot read from directory"); @@ -377,9 +362,9 @@ ssize_t gkfs::func::pwrite(std::shared_ptr file, const char* buf, size return ret; // return written size or -1 as error } -ssize_t gkfs::func::pwrite_ws(int fd, const void* buf, size_t count, off64_t offset) { +ssize_t gkfs_pwrite_ws(int fd, const void* buf, size_t count, off64_t offset) { auto file = CTX->file_map()->get(fd); - return gkfs::func::pwrite(file, reinterpret_cast(buf), count, offset); + return gkfs_pwrite(file, reinterpret_cast(buf), count, offset); } /* Write counts bytes starting from current file position @@ -387,12 +372,12 @@ ssize_t gkfs::func::pwrite_ws(int fd, const void* buf, size_t count, off64_t off * * Same as write syscall. */ -ssize_t gkfs::func::write(int fd, const void* buf, size_t count) { +ssize_t gkfs_write(int fd, const void* buf, size_t count) { auto gkfs_fd = CTX->file_map()->get(fd); auto pos = gkfs_fd->pos(); //retrieve the current offset if (gkfs_fd->get_flag(OpenFile_flags::append)) - gkfs::func::lseek(gkfs_fd, 0, SEEK_END); - auto ret = gkfs::func::pwrite(gkfs_fd, reinterpret_cast(buf), count, pos); + gkfs_lseek(gkfs_fd, 0, SEEK_END); + auto ret = gkfs_pwrite(gkfs_fd, reinterpret_cast(buf), count, pos); // Update offset in file descriptor in the file map if (ret > 0) { gkfs_fd->pos(pos + count); @@ -400,7 +385,7 @@ ssize_t gkfs::func::write(int fd, const void* buf, size_t count) { return ret; } -ssize_t gkfs::func::pwritev(int fd, const struct iovec* iov, int iovcnt, off_t offset) { +ssize_t gkfs_pwritev(int fd, const struct iovec* iov, int iovcnt, off_t offset) { auto file = CTX->file_map()->get(fd); auto pos = offset; // keep truck of current position @@ -412,7 +397,7 @@ ssize_t gkfs::func::pwritev(int fd, const struct iovec* iov, int iovcnt, off_t o continue; } auto buf = (iov + i)->iov_base; - ret = gkfs::func::pwrite(file, reinterpret_cast(buf), count, pos); + ret = gkfs_pwrite(file, reinterpret_cast(buf), count, pos); if (ret == -1) { break; } @@ -430,11 +415,11 @@ ssize_t gkfs::func::pwritev(int fd, const struct iovec* iov, int iovcnt, off_t o return written; } -ssize_t gkfs::func::writev(int fd, const struct iovec* iov, int iovcnt) { +ssize_t gkfs_writev(int fd, const struct iovec* iov, int iovcnt) { auto gkfs_fd = CTX->file_map()->get(fd); auto pos = gkfs_fd->pos(); // retrieve the current offset - auto ret = gkfs::func::pwritev(fd, iov, iovcnt, pos); + auto ret = gkfs_pwritev(fd, iov, iovcnt, pos); assert(ret != 0); if (ret < 0) { return -1; @@ -443,7 +428,7 @@ ssize_t gkfs::func::writev(int fd, const struct iovec* iov, int iovcnt) { return ret; } -ssize_t gkfs::func::pread(std::shared_ptr file, char* buf, size_t count, off64_t offset) { +ssize_t gkfs_pread(std::shared_ptr file, char* buf, size_t count, off64_t offset) { if (file->type() != FileType::regular) { assert(file->type() == FileType::directory); LOG(WARNING, "Cannot read from directory"); @@ -463,10 +448,10 @@ ssize_t gkfs::func::pread(std::shared_ptr file, char* buf, size_t coun return ret; // return read size or -1 as error } -ssize_t gkfs::func::read(int fd, void* buf, size_t count) { +ssize_t gkfs_read(int fd, void* buf, size_t count) { auto gkfs_fd = CTX->file_map()->get(fd); auto pos = gkfs_fd->pos(); //retrieve the current offset - auto ret = gkfs::func::pread(gkfs_fd, reinterpret_cast(buf), count, pos); + auto ret = gkfs_pread(gkfs_fd, reinterpret_cast(buf), count, pos); // Update offset in file descriptor in the file map if (ret > 0) { gkfs_fd->pos(pos + ret); @@ -474,14 +459,14 @@ ssize_t gkfs::func::read(int fd, void* buf, size_t count) { return ret; } -ssize_t gkfs::func::pread_ws(int fd, void* buf, size_t count, off64_t offset) { +ssize_t gkfs_pread_ws(int fd, void* buf, size_t count, off64_t offset) { auto gkfs_fd = CTX->file_map()->get(fd); - return gkfs::func::pread(gkfs_fd, reinterpret_cast(buf), count, offset); + return gkfs_pread(gkfs_fd, reinterpret_cast(buf), count, offset); } -int gkfs::func::opendir(const std::string& path) { +int gkfs_opendir(const std::string& path) { - auto md = gkfs::func::metadata(path); + auto md = gkfs::util::get_metadata(path); if (!md) { return -1; } @@ -496,8 +481,8 @@ int gkfs::func::opendir(const std::string& path) { return CTX->file_map()->add(open_dir); } -int gkfs::func::rmdir(const std::string& path) { - auto md = gkfs::func::metadata(path); +int gkfs_rmdir(const std::string& path) { + auto md = gkfs::util::get_metadata(path); if (!md) { LOG(DEBUG, "Path '{}' does not exist: ", path); errno = ENOENT; @@ -518,9 +503,9 @@ int gkfs::func::rmdir(const std::string& path) { return gkfs::rpc::forward_remove(path, true, 0); } -int gkfs::func::getdents(unsigned int fd, - struct linux_dirent* dirp, - unsigned int count) { +int gkfs_getdents(unsigned int fd, + struct linux_dirent* dirp, + unsigned int count) { auto open_dir = CTX->file_map()->get_dir(fd); if (open_dir == nullptr) { @@ -571,9 +556,9 @@ int gkfs::func::getdents(unsigned int fd, } -int gkfs::func::getdents64(unsigned int fd, - struct linux_dirent64* dirp, - unsigned int count) { +int gkfs_getdents64(unsigned int fd, + struct linux_dirent64* dirp, + unsigned int count) { auto open_dir = CTX->file_map()->get_dir(fd); if (open_dir == nullptr) { @@ -624,14 +609,14 @@ int gkfs::func::getdents64(unsigned int fd, #ifdef HAS_SYMLINKS -int gkfs::func::mk_symlink(const std::string& path, const std::string& target_path) { +int gkfs_mk_symlink(const std::string& path, const std::string& target_path) { init_ld_env_if_needed(); /* The following check is not POSIX compliant. * In POSIX the target is not checked at all. * Here if the target is a directory we raise a NOTSUP error. * So that application know we don't support link to directory. */ - auto target_md = gkfs::func::metadata(target_path, false); + auto target_md = gkfs::util::get_metadata(target_path, false); if (target_md != nullptr) { auto trg_mode = target_md->mode(); if (!(S_ISREG(trg_mode) || S_ISLNK(trg_mode))) { @@ -646,7 +631,7 @@ int gkfs::func::mk_symlink(const std::string& path, const std::string& target_pa return -1; } - auto link_md = gkfs::func::metadata(path, false); + auto link_md = gkfs::util::get_metadata(path, false); if (link_md != nullptr) { LOG(DEBUG, "Link exists: '{}'", path); errno = EEXIST; @@ -656,9 +641,9 @@ int gkfs::func::mk_symlink(const std::string& path, const std::string& target_pa return gkfs::rpc::forward_mk_symlink(path, target_path); } -int gkfs::func::readlink(const std::string& path, char* buf, int bufsize) { +int gkfs_readlink(const std::string& path, char* buf, int bufsize) { init_ld_env_if_needed(); - auto md = gkfs::func::metadata(path, false); + auto md = gkfs::util::get_metadata(path, false); if (md == nullptr) { LOG(DEBUG, "Named link doesn't exist"); return -1; @@ -680,4 +665,7 @@ int gkfs::func::readlink(const std::string& path, char* buf, int bufsize) { return path_size; } +} // namespace syscall +} // namespace gkfs + #endif \ No newline at end of file diff --git a/src/client/hooks.cpp b/src/client/hooks.cpp index 68a586fd6..aa80b3bbc 100644 --- a/src/client/hooks.cpp +++ b/src/client/hooks.cpp @@ -13,6 +13,7 @@ #include #include +#include #include #include #include @@ -51,7 +52,7 @@ int hook_openat(int dirfd, const char* cpath, int flags, mode_t mode) { return -ENOTDIR; case RelativizeStatus::internal: - return with_errno(gkfs::func::open(resolved, mode, flags)); + return with_errno(gkfs::syscall::gkfs_open(resolved, mode, flags)); default: LOG(ERROR, "{}() relativize status unknown: {}", __func__); @@ -85,7 +86,7 @@ int hook_stat(const char* path, struct stat* buf) { std::string rel_path; if (CTX->relativize_path(path, rel_path, false)) { - return with_errno(gkfs::func::stat(rel_path, buf)); + return with_errno(gkfs::syscall::gkfs_stat(rel_path, buf)); } return syscall_no_intercept(SYS_stat, rel_path.c_str(), buf); } @@ -97,7 +98,7 @@ int hook_lstat(const char* path, struct stat* buf) { std::string rel_path; if (CTX->relativize_path(path, rel_path)) { - return with_errno(gkfs::func::stat(rel_path, buf)); + return with_errno(gkfs::syscall::gkfs_stat(rel_path, buf)); } return syscall_no_intercept(SYS_lstat, rel_path.c_str(), buf); } @@ -109,7 +110,7 @@ int hook_fstat(unsigned int fd, struct stat* buf) { if (CTX->file_map()->exist(fd)) { auto path = CTX->file_map()->get(fd)->path(); - return with_errno(gkfs::func::stat(path, buf)); + return with_errno(gkfs::syscall::gkfs_stat(path, buf)); } return syscall_no_intercept(SYS_fstat, fd, buf); } @@ -137,7 +138,7 @@ int hook_fstatat(int dirfd, const char* cpath, struct stat* buf, int flags) { return -ENOTDIR; case RelativizeStatus::internal: - return with_errno(gkfs::func::stat(resolved, buf)); + return with_errno(gkfs::syscall::gkfs_stat(resolved, buf)); default: LOG(ERROR, "{}() relativize status unknown: {}", __func__); @@ -151,7 +152,7 @@ int hook_read(unsigned int fd, void* buf, size_t count) { __func__, fd, fmt::ptr(buf), count); if (CTX->file_map()->exist(fd)) { - return with_errno(gkfs::func::read(fd, buf, count)); + return with_errno(gkfs::syscall::gkfs_read(fd, buf, count)); } return syscall_no_intercept(SYS_read, fd, buf, count); } @@ -162,7 +163,7 @@ int hook_pread(unsigned int fd, char* buf, size_t count, loff_t pos) { __func__, fd, fmt::ptr(buf), count, pos); if (CTX->file_map()->exist(fd)) { - return with_errno(gkfs::func::pread_ws(fd, buf, count, pos)); + return with_errno(gkfs::syscall::gkfs_pread_ws(fd, buf, count, pos)); } /* Since kernel 2.6: pread() became pread64(), and pwrite() became pwrite64(). */ return syscall_no_intercept(SYS_pread64, fd, buf, count, pos); @@ -174,7 +175,7 @@ int hook_write(unsigned int fd, const char* buf, size_t count) { __func__, fd, fmt::ptr(buf), count); if (CTX->file_map()->exist(fd)) { - return with_errno(gkfs::func::write(fd, buf, count)); + return with_errno(gkfs::syscall::gkfs_write(fd, buf, count)); } return syscall_no_intercept(SYS_write, fd, buf, count); } @@ -185,7 +186,7 @@ int hook_pwrite(unsigned int fd, const char* buf, size_t count, loff_t pos) { __func__, fd, fmt::ptr(buf), count, pos); if (CTX->file_map()->exist(fd)) { - return with_errno(gkfs::func::pwrite_ws(fd, buf, count, pos)); + return with_errno(gkfs::syscall::gkfs_pwrite_ws(fd, buf, count, pos)); } /* Since kernel 2.6: pread() became pread64(), and pwrite() became pwrite64(). */ return syscall_no_intercept(SYS_pwrite64, fd, buf, count, pos); @@ -197,7 +198,7 @@ int hook_writev(unsigned long fd, const struct iovec* iov, unsigned long iovcnt) __func__, fd, fmt::ptr(iov), iovcnt); if (CTX->file_map()->exist(fd)) { - return with_errno(gkfs::func::writev(fd, iov, iovcnt)); + return with_errno(gkfs::syscall::gkfs_writev(fd, iov, iovcnt)); } return syscall_no_intercept(SYS_writev, fd, iov, iovcnt); } @@ -240,9 +241,9 @@ int hook_unlinkat(int dirfd, const char* cpath, int flags) { case RelativizeStatus::internal: if (flags & AT_REMOVEDIR) { - return with_errno(gkfs::func::rmdir(resolved)); + return with_errno(gkfs::syscall::gkfs_rmdir(resolved)); } else { - return with_errno(gkfs::func::rm_node(resolved)); + return with_errno(gkfs::syscall::gkfs_remove(resolved)); } default: @@ -292,7 +293,7 @@ int hook_access(const char* path, int mask) { std::string rel_path; if (CTX->relativize_path(path, rel_path)) { - auto ret = gkfs::func::access(rel_path, mask); + auto ret = gkfs::syscall::gkfs_access(rel_path, mask); if (ret < 0) { return -errno; } @@ -319,7 +320,7 @@ int hook_faccessat(int dirfd, const char* cpath, int mode) { return -ENOTDIR; case RelativizeStatus::internal: - return with_errno(gkfs::func::access(resolved, mode)); + return with_errno(gkfs::syscall::gkfs_access(resolved, mode)); default: LOG(ERROR, "{}() relativize status unknown: {}", __func__); @@ -333,7 +334,7 @@ off_t hook_lseek(unsigned int fd, off_t offset, unsigned int whence) { __func__, fd, offset, whence); if (CTX->file_map()->exist(fd)) { - auto off_ret = gkfs::func::lseek(fd, static_cast(offset), whence); + auto off_ret = gkfs::syscall::gkfs_lseek(fd, static_cast(offset), whence); if (off_ret > std::numeric_limits::max()) { return -EOVERFLOW; } else if (off_ret < 0) { @@ -352,7 +353,7 @@ int hook_truncate(const char* path, long length) { std::string rel_path; if (CTX->relativize_path(path, rel_path)) { - return with_errno(gkfs::func::truncate(rel_path, length)); + return with_errno(gkfs::syscall::gkfs_truncate(rel_path, length)); } return syscall_no_intercept(SYS_truncate, rel_path.c_str(), length); } @@ -364,7 +365,7 @@ int hook_ftruncate(unsigned int fd, unsigned long length) { if (CTX->file_map()->exist(fd)) { auto path = CTX->file_map()->get(fd)->path(); - return with_errno(gkfs::func::truncate(path, length)); + return with_errno(gkfs::syscall::gkfs_truncate(path, length)); } return syscall_no_intercept(SYS_ftruncate, fd, length); } @@ -375,7 +376,7 @@ int hook_dup(unsigned int fd) { __func__, fd); if (CTX->file_map()->exist(fd)) { - return with_errno(gkfs::func::dup(fd)); + return with_errno(gkfs::syscall::gkfs_dup(fd)); } return syscall_no_intercept(SYS_dup, fd); } @@ -386,7 +387,7 @@ int hook_dup2(unsigned int oldfd, unsigned int newfd) { __func__, oldfd, newfd); if (CTX->file_map()->exist(oldfd)) { - return with_errno(gkfs::func::dup2(oldfd, newfd)); + return with_errno(gkfs::syscall::gkfs_dup2(oldfd, newfd)); } return syscall_no_intercept(SYS_dup2, oldfd, newfd); } @@ -411,7 +412,7 @@ int hook_getdents(unsigned int fd, struct linux_dirent* dirp, unsigned int count __func__, fd, fmt::ptr(dirp), count); if (CTX->file_map()->exist(fd)) { - return with_errno(gkfs::func::getdents(fd, dirp, count)); + return with_errno(gkfs::syscall::gkfs_getdents(fd, dirp, count)); } return syscall_no_intercept(SYS_getdents, fd, dirp, count); } @@ -423,7 +424,7 @@ int hook_getdents64(unsigned int fd, struct linux_dirent64* dirp, unsigned int c __func__, fd, fmt::ptr(dirp), count); if (CTX->file_map()->exist(fd)) { - return with_errno(gkfs::func::getdents64(fd, dirp, count)); + return with_errno(gkfs::syscall::gkfs_getdents64(fd, dirp, count)); } return syscall_no_intercept(SYS_getdents64, fd, dirp, count); } @@ -447,7 +448,7 @@ int hook_mkdirat(int dirfd, const char* cpath, mode_t mode) { return -ENOTDIR; case RelativizeStatus::internal: - return with_errno(gkfs::func::mk_node(resolved, mode | S_IFDIR)); + return with_errno(gkfs::syscall::gkfs_create(resolved, mode | S_IFDIR)); default: LOG(ERROR, "{}() relativize status unknown: {}", __func__); @@ -503,7 +504,7 @@ int hook_chdir(const char* path) { bool internal = CTX->relativize_path(path, rel_path); if (internal) { //path falls in our namespace - auto md = gkfs::func::metadata(rel_path); + auto md = gkfs::util::get_metadata(rel_path); if (md == nullptr) { LOG(ERROR, "{}() path does not exists", __func__); return -ENOENT; @@ -619,11 +620,11 @@ int hook_fcntl(unsigned int fd, unsigned int cmd, unsigned long arg) { case F_DUPFD: LOG(DEBUG, "{}() F_DUPFD on fd {}", __func__, fd); - return with_errno(gkfs::func::dup(fd)); + return with_errno(gkfs::syscall::gkfs_dup(fd)); case F_DUPFD_CLOEXEC: LOG(DEBUG, "{}() F_DUPFD_CLOEXEC on fd {}", __func__, fd); - ret = gkfs::func::dup(fd); + ret = gkfs::syscall::gkfs_dup(fd); if (ret == -1) { return -errno; } @@ -736,7 +737,7 @@ int hook_statfs(const char* path, struct statfs* buf) { std::string rel_path; if (CTX->relativize_path(path, rel_path)) { - return with_errno(gkfs::func::statfs(buf)); + return with_errno(gkfs::syscall::gkfs_statfs(buf)); } return syscall_no_intercept(SYS_statfs, rel_path.c_str(), buf); } @@ -747,7 +748,7 @@ int hook_fstatfs(unsigned int fd, struct statfs* buf) { __func__, fd, fmt::ptr(buf)); if (CTX->file_map()->exist(fd)) { - return with_errno(gkfs::func::statfs(buf)); + return with_errno(gkfs::syscall::gkfs_statfs(buf)); } return syscall_no_intercept(SYS_fstatfs, fd, buf); } diff --git a/src/client/preload_util.cpp b/src/client/preload_util.cpp index 9038a1d5e..4db07f20e 100644 --- a/src/client/preload_util.cpp +++ b/src/client/preload_util.cpp @@ -14,6 +14,7 @@ #include #include #include +#include #include #include @@ -33,6 +34,66 @@ extern "C" { using namespace std; +namespace { + +hermes::endpoint lookup_endpoint(const std::string& uri, + std::size_t max_retries = 3) { + + LOG(DEBUG, "Looking up address \"{}\"", uri); + + std::random_device rd; // obtain a random number from hardware + std::size_t attempts = 0; + std::string error_msg; + + do { + try { + return ld_network_service->lookup(uri); + } catch (const exception& ex) { + error_msg = ex.what(); + + LOG(WARNING, "Failed to lookup address '{}'. Attempts [{}/{}]", + uri, attempts + 1, max_retries); + + // Wait a random amount of time and try again + std::mt19937 g(rd()); // seed the random generator + std::uniform_int_distribution<> distr(50, 50 * (attempts + 2)); // define the range + std::this_thread::sleep_for(std::chrono::milliseconds(distr(g))); + continue; + } + } while (++attempts < max_retries); + + throw std::runtime_error( + fmt::format("Endpoint for address '{}' could not be found ({})", + uri, error_msg)); +} + +} // namespace + +namespace gkfs { +namespace util { + + +std::shared_ptr get_metadata(const string& path, bool follow_links) { + std::string attr; + auto err = gkfs::rpc::forward_stat(path, attr); + if (err) { + return nullptr; + } +#ifdef HAS_SYMLINKS + if (follow_links) { + gkfs::metadata::Metadata md{attr}; + while (md.is_link()) { + err = gkfs::rpc::forward_stat(md.target_path(), attr); + if (err) { + return nullptr; + } + md = gkfs::metadata::Metadata{attr}; + } + } +#endif + return make_shared(attr); +} + /** * Converts the Metadata object into a stat struct, which is needed by Linux * @param path @@ -40,7 +101,7 @@ using namespace std; * @param attr * @return */ -int gkfs::util::metadata_to_stat(const std::string& path, const gkfs::metadata::Metadata& md, struct stat& attr) { +int metadata_to_stat(const std::string& path, const gkfs::metadata::Metadata& md, struct stat& attr) { /* Populate default values */ attr.st_dev = makedev(0, 0); @@ -83,7 +144,7 @@ int gkfs::util::metadata_to_stat(const std::string& path, const gkfs::metadata:: return 0; } -vector> gkfs::util::load_hostfile(const std::string& lfpath) { +vector> load_hostfile(const std::string& lfpath) { LOG(DEBUG, "Loading hosts file: \"{}\"", lfpath); @@ -115,51 +176,20 @@ vector> gkfs::util::load_hostfile(const std::string& lfpath return hosts; } -hermes::endpoint lookup_endpoint(const std::string& uri, - std::size_t max_retries = 3) { - - LOG(DEBUG, "Looking up address \"{}\"", uri); - - std::random_device rd; // obtain a random number from hardware - std::size_t attempts = 0; - std::string error_msg; - - do { - try { - return ld_network_service->lookup(uri); - } catch (const exception& ex) { - error_msg = ex.what(); - - LOG(WARNING, "Failed to lookup address '{}'. Attempts [{}/{}]", - uri, attempts + 1, max_retries); - - // Wait a random amount of time and try again - std::mt19937 g(rd()); // seed the random generator - std::uniform_int_distribution<> distr(50, 50 * (attempts + 2)); // define the range - std::this_thread::sleep_for(std::chrono::milliseconds(distr(g))); - continue; - } - } while (++attempts < max_retries); - - throw std::runtime_error( - fmt::format("Endpoint for address '{}' could not be found ({})", - uri, error_msg)); -} - -void gkfs::util::load_hosts() { +void load_hosts() { string hostfile; hostfile = gkfs::env::get_var(gkfs::env::HOSTS_FILE, gkfs::config::hostfile_path); vector> hosts; try { - hosts = gkfs::util::load_hostfile(hostfile); + hosts = load_hostfile(hostfile); } catch (const exception& e) { auto emsg = fmt::format("Failed to load hosts file: {}", e.what()); throw runtime_error(emsg); } - if (hosts.size() == 0) { + if (hosts.empty()) { throw runtime_error(fmt::format("Hostfile empty: '{}'", hostfile)); } @@ -189,7 +219,7 @@ void gkfs::util::load_hosts() { const auto& hostname = hosts.at(id).first; const auto& uri = hosts.at(id).second; - addrs[id] = ::lookup_endpoint(uri); + addrs[id] = lookup_endpoint(uri); if (!local_host_found && hostname == local_hostname) { LOG(DEBUG, "Found local host: {}", hostname); @@ -207,3 +237,6 @@ void gkfs::util::load_hosts() { CTX->hosts(addrs); } + +} // namespace util +} // namespace gkfs \ No newline at end of file -- GitLab From dee19edc531e1534b9557881c465eedec7a8f6a3 Mon Sep 17 00:00:00 2001 From: Marc Vef Date: Mon, 24 Feb 2020 22:44:23 +0100 Subject: [PATCH 23/25] Adding gkfs::filemap for OpenFileMap, OpenFile, DirEntry, OpenDir classes --- include/client/gkfs_functions.hpp | 6 ++--- include/client/open_dir.hpp | 4 ++++ include/client/open_file_map.hpp | 5 ++++ include/client/preload_context.hpp | 10 ++++---- include/client/rpc/forward_metadata.hpp | 13 ++++++---- src/client/gkfs_functions.cpp | 32 ++++++++++++------------- src/client/hooks.cpp | 12 +++++----- src/client/open_dir.cpp | 6 +++++ src/client/open_file_map.cpp | 6 +++++ src/client/preload_context.cpp | 4 ++-- src/client/rpc/forward_metadata.cpp | 5 ++-- 11 files changed, 66 insertions(+), 37 deletions(-) diff --git a/include/client/gkfs_functions.hpp b/include/client/gkfs_functions.hpp index 53faf923f..9ff57c90c 100644 --- a/include/client/gkfs_functions.hpp +++ b/include/client/gkfs_functions.hpp @@ -54,7 +54,7 @@ int gkfs_statvfs(sys_statvfs* buf); off64_t gkfs_lseek(unsigned int fd, off64_t offset, unsigned int whence); -off64_t gkfs_lseek(std::shared_ptr gkfs_fd, off64_t offset, unsigned int whence); +off64_t gkfs_lseek(std::shared_ptr gkfs_fd, off64_t offset, unsigned int whence); int gkfs_truncate(const std::string& path, off_t offset); @@ -72,7 +72,7 @@ int gkfs_readlink(const std::string& path, char* buf, int bufsize); #endif -ssize_t gkfs_pwrite(std::shared_ptr file, +ssize_t gkfs_pwrite(std::shared_ptr file, const char* buf, size_t count, off64_t offset); ssize_t gkfs_pwrite_ws(int fd, const void* buf, size_t count, off64_t offset); @@ -83,7 +83,7 @@ ssize_t gkfs_pwritev(int fd, const struct iovec* iov, int iovcnt, off_t offset); ssize_t gkfs_writev(int fd, const struct iovec* iov, int iovcnt); -ssize_t gkfs_pread(std::shared_ptr file, char* buf, size_t count, off64_t offset); +ssize_t gkfs_pread(std::shared_ptr file, char* buf, size_t count, off64_t offset); ssize_t gkfs_pread_ws(int fd, void* buf, size_t count, off64_t offset); diff --git a/include/client/open_dir.hpp b/include/client/open_dir.hpp index 4ecb310a6..46fbc796d 100644 --- a/include/client/open_dir.hpp +++ b/include/client/open_dir.hpp @@ -22,6 +22,8 @@ #include +namespace gkfs { +namespace filemap { class DirEntry { private: @@ -50,5 +52,7 @@ public: size_t size(); }; +} // namespace filemap +} // namespace gkfs #endif //GEKKOFS_OPEN_DIR_HPP diff --git a/include/client/open_file_map.hpp b/include/client/open_file_map.hpp index 7f9d8e170..5199d2e60 100644 --- a/include/client/open_file_map.hpp +++ b/include/client/open_file_map.hpp @@ -20,6 +20,9 @@ #include #include +namespace gkfs { +namespace filemap { + /* Forward declaration */ class OpenDir; @@ -115,5 +118,7 @@ public: int get_fd_idx(); }; +} // namespace filemap +} // namespace gkfs #endif //GEKKOFS_OPEN_FILE_MAP_HPP diff --git a/include/client/preload_context.hpp b/include/client/preload_context.hpp index f28c26937..149311d0f 100644 --- a/include/client/preload_context.hpp +++ b/include/client/preload_context.hpp @@ -24,10 +24,12 @@ #include /* Forward declarations */ -class OpenFileMap; +namespace gkfs { +namespace filemap { +class OpenFileMap; +} -namespace gkfs { namespace rpc { class Distributor; } @@ -66,7 +68,7 @@ class PreloadContext { private: PreloadContext(); - std::shared_ptr ofm_; + std::shared_ptr ofm_; std::shared_ptr distributor_; std::shared_ptr fs_conf_; @@ -124,7 +126,7 @@ public: bool relativize_path(const char* raw_path, std::string& relative_path, bool resolve_last_link = true) const; - const std::shared_ptr& file_map() const; + const std::shared_ptr& file_map() const; void distributor(std::shared_ptr distributor); diff --git a/include/client/rpc/forward_metadata.hpp b/include/client/rpc/forward_metadata.hpp index 75bbd7d86..572eaee05 100644 --- a/include/client/rpc/forward_metadata.hpp +++ b/include/client/rpc/forward_metadata.hpp @@ -20,11 +20,15 @@ /* Forward declaration */ struct MetadentryUpdateFlags; -class OpenDir; +namespace gkfs { +namespace filemap { +class OpenDir; +} +namespace metadata { class Metadata; +} -namespace gkfs { namespace rpc { int forward_create(const std::string& path, mode_t mode); @@ -35,14 +39,15 @@ int forward_remove(const std::string& path, bool remove_metadentry_only, ssize_t int forward_decr_size(const std::string& path, size_t length); -int forward_update_metadentry(const std::string& path, const Metadata& md, const MetadentryUpdateFlags& md_flags); +int forward_update_metadentry(const std::string& path, const gkfs::metadata::Metadata& md, + const MetadentryUpdateFlags& md_flags); int forward_update_metadentry_size(const std::string& path, size_t size, off64_t offset, bool append_flag, off64_t& ret_size); int forward_get_metadentry_size(const std::string& path, off64_t& ret_size); -void forward_get_dirents(OpenDir& open_dir); +void forward_get_dirents(gkfs::filemap::OpenDir& open_dir); #ifdef HAS_SYMLINKS diff --git a/src/client/gkfs_functions.cpp b/src/client/gkfs_functions.cpp index 2ca4952b6..e724f790a 100644 --- a/src/client/gkfs_functions.cpp +++ b/src/client/gkfs_functions.cpp @@ -143,7 +143,7 @@ int gkfs_open(const std::string& path, mode_t mode, int flags) { } } - return CTX->file_map()->add(std::make_shared(path, flags)); + return CTX->file_map()->add(std::make_shared(path, flags)); } int gkfs_create(const std::string& path, mode_t mode) { @@ -246,7 +246,7 @@ off_t gkfs_lseek(unsigned int fd, off_t offset, unsigned int whence) { return gkfs_lseek(CTX->file_map()->get(fd), offset, whence); } -off_t gkfs_lseek(shared_ptr gkfs_fd, off_t offset, unsigned int whence) { +off_t gkfs_lseek(shared_ptr gkfs_fd, off_t offset, unsigned int whence) { switch (whence) { case SEEK_SET: gkfs_fd->pos(offset); @@ -338,15 +338,15 @@ int gkfs_dup2(const int oldfd, const int newfd) { return CTX->file_map()->dup2(oldfd, newfd); } -ssize_t gkfs_pwrite(std::shared_ptr file, const char* buf, size_t count, off64_t offset) { - if (file->type() != FileType::regular) { - assert(file->type() == FileType::directory); +ssize_t gkfs_pwrite(std::shared_ptr file, const char* buf, size_t count, off64_t offset) { + if (file->type() != gkfs::filemap::FileType::regular) { + assert(file->type() == gkfs::filemap::FileType::directory); LOG(WARNING, "Cannot read from directory"); errno = EISDIR; return -1; } auto path = make_shared(file->path()); - auto append_flag = file->get_flag(OpenFile_flags::append); + auto append_flag = file->get_flag(gkfs::filemap::OpenFile_flags::append); ssize_t ret = 0; long updated_size = 0; @@ -375,7 +375,7 @@ ssize_t gkfs_pwrite_ws(int fd, const void* buf, size_t count, off64_t offset) { ssize_t gkfs_write(int fd, const void* buf, size_t count) { auto gkfs_fd = CTX->file_map()->get(fd); auto pos = gkfs_fd->pos(); //retrieve the current offset - if (gkfs_fd->get_flag(OpenFile_flags::append)) + if (gkfs_fd->get_flag(gkfs::filemap::OpenFile_flags::append)) gkfs_lseek(gkfs_fd, 0, SEEK_END); auto ret = gkfs_pwrite(gkfs_fd, reinterpret_cast(buf), count, pos); // Update offset in file descriptor in the file map @@ -428,9 +428,9 @@ ssize_t gkfs_writev(int fd, const struct iovec* iov, int iovcnt) { return ret; } -ssize_t gkfs_pread(std::shared_ptr file, char* buf, size_t count, off64_t offset) { - if (file->type() != FileType::regular) { - assert(file->type() == FileType::directory); +ssize_t gkfs_pread(std::shared_ptr file, char* buf, size_t count, off64_t offset) { + if (file->type() != gkfs::filemap::FileType::regular) { + assert(file->type() == gkfs::filemap::FileType::directory); LOG(WARNING, "Cannot read from directory"); errno = EISDIR; return -1; @@ -476,7 +476,7 @@ int gkfs_opendir(const std::string& path) { return -1; } - auto open_dir = std::make_shared(path); + auto open_dir = std::make_shared(path); gkfs::rpc::forward_get_dirents(*open_dir); return CTX->file_map()->add(open_dir); } @@ -494,7 +494,7 @@ int gkfs_rmdir(const std::string& path) { return -1; } - auto open_dir = std::make_shared(path); + auto open_dir = std::make_shared(path); gkfs::rpc::forward_get_dirents(*open_dir); if (open_dir->size() != 0) { errno = ENOTEMPTY; @@ -522,7 +522,7 @@ int gkfs_getdents(unsigned int fd, unsigned int written = 0; struct linux_dirent* current_dirp = nullptr; while (pos < open_dir->size()) { - DirEntry de = open_dir->getdent(pos); + gkfs::filemap::DirEntry de = open_dir->getdent(pos); auto total_size = ALIGN(offsetof( struct linux_dirent, d_name) + de.name().size() + 3, sizeof(long)); @@ -538,7 +538,7 @@ int gkfs_getdents(unsigned int fd, current_dirp->d_reclen = total_size; *(reinterpret_cast(current_dirp) + total_size - 1) = - ((de.type() == FileType::regular) ? DT_REG : DT_DIR); + ((de.type() == gkfs::filemap::FileType::regular) ? DT_REG : DT_DIR); LOG(DEBUG, "name {}: {}", pos, de.name()); std::strcpy(&(current_dirp->d_name[0]), de.name().c_str()); @@ -575,7 +575,7 @@ int gkfs_getdents64(unsigned int fd, unsigned int written = 0; struct linux_dirent64* current_dirp = nullptr; while (pos < open_dir->size()) { - DirEntry de = open_dir->getdent(pos); + gkfs::filemap::DirEntry de = open_dir->getdent(pos); auto total_size = ALIGN(offsetof( struct linux_dirent64, d_name) + de.name().size() + 3, sizeof(long)); @@ -589,7 +589,7 @@ int gkfs_getdents64(unsigned int fd, open_dir->path() + "/" + de.name()); current_dirp->d_reclen = total_size; - current_dirp->d_type = ((de.type() == FileType::regular) ? DT_REG : DT_DIR); + current_dirp->d_type = ((de.type() == gkfs::filemap::FileType::regular) ? DT_REG : DT_DIR); LOG(DEBUG, "name {}: {}", pos, de.name()); std::strcpy(&(current_dirp->d_name[0]), de.name().c_str()); diff --git a/src/client/hooks.cpp b/src/client/hooks.cpp index aa80b3bbc..3da77eb3e 100644 --- a/src/client/hooks.cpp +++ b/src/client/hooks.cpp @@ -628,13 +628,13 @@ int hook_fcntl(unsigned int fd, unsigned int cmd, unsigned long arg) { if (ret == -1) { return -errno; } - CTX->file_map()->get(fd)->set_flag(OpenFile_flags::cloexec, true); + CTX->file_map()->get(fd)->set_flag(gkfs::filemap::OpenFile_flags::cloexec, true); return ret; case F_GETFD: LOG(DEBUG, "{}() F_GETFD on fd {}", __func__, fd); if (CTX->file_map()->get(fd) - ->get_flag(OpenFile_flags::cloexec)) { + ->get_flag(gkfs::filemap::OpenFile_flags::cloexec)) { return FD_CLOEXEC; } return 0; @@ -643,15 +643,15 @@ int hook_fcntl(unsigned int fd, unsigned int cmd, unsigned long arg) { LOG(DEBUG, "{}() F_GETFL on fd {}", __func__, fd); ret = 0; if (CTX->file_map()->get(fd) - ->get_flag(OpenFile_flags::rdonly)) { + ->get_flag(gkfs::filemap::OpenFile_flags::rdonly)) { ret |= O_RDONLY; } if (CTX->file_map()->get(fd) - ->get_flag(OpenFile_flags::wronly)) { + ->get_flag(gkfs::filemap::OpenFile_flags::wronly)) { ret |= O_WRONLY; } if (CTX->file_map()->get(fd) - ->get_flag(OpenFile_flags::rdwr)) { + ->get_flag(gkfs::filemap::OpenFile_flags::rdwr)) { ret |= O_RDWR; } return ret; @@ -660,7 +660,7 @@ int hook_fcntl(unsigned int fd, unsigned int cmd, unsigned long arg) { LOG(DEBUG, "{}() [fd: {}, cmd: F_SETFD, FD_CLOEXEC: {}]", __func__, fd, (arg & FD_CLOEXEC)); CTX->file_map()->get(fd) - ->set_flag(OpenFile_flags::cloexec, (arg & FD_CLOEXEC)); + ->set_flag(gkfs::filemap::OpenFile_flags::cloexec, (arg & FD_CLOEXEC)); return 0; diff --git a/src/client/open_dir.cpp b/src/client/open_dir.cpp index 92638de74..9deabe86f 100644 --- a/src/client/open_dir.cpp +++ b/src/client/open_dir.cpp @@ -15,6 +15,9 @@ #include #include +namespace gkfs { +namespace filemap { + DirEntry::DirEntry(const std::string& name, const FileType type) : name_(name), type_(type) { } @@ -44,3 +47,6 @@ const DirEntry& OpenDir::getdent(unsigned int pos) { size_t OpenDir::size() { return entries.size(); } + +} // namespace filemap +} // namespace gkfs \ No newline at end of file diff --git a/src/client/open_file_map.cpp b/src/client/open_file_map.cpp index e443df2f2..d06a2e3d2 100644 --- a/src/client/open_file_map.cpp +++ b/src/client/open_file_map.cpp @@ -24,6 +24,9 @@ extern "C" { using namespace std; +namespace gkfs { +namespace filemap { + OpenFile::OpenFile(const string& path, const int flags, FileType type) : type_(type), path_(path) { @@ -202,3 +205,6 @@ int OpenFileMap::get_fd_idx() { std::lock_guard inode_lock(fd_idx_mutex); return fd_idx; } + +} // namespace filemap +} // namespace gkfs \ No newline at end of file diff --git a/src/client/preload_context.cpp b/src/client/preload_context.cpp index 9993d127f..2f449cafd 100644 --- a/src/client/preload_context.cpp +++ b/src/client/preload_context.cpp @@ -39,7 +39,7 @@ decltype(PreloadContext::MAX_USER_FDS) constexpr PreloadContext::MAX_USER_FDS; PreloadContext::PreloadContext() : - ofm_(std::make_shared()), + ofm_(std::make_shared()), fs_conf_(std::make_shared()) { internal_fds_.set(); @@ -187,7 +187,7 @@ bool PreloadContext::relativize_path(const char* raw_path, std::string& relative return gkfs::path::resolve(path, relative_path, resolve_last_link); } -const std::shared_ptr& PreloadContext::file_map() const { +const std::shared_ptr& PreloadContext::file_map() const { return ofm_; } diff --git a/src/client/rpc/forward_metadata.cpp b/src/client/rpc/forward_metadata.cpp index b8d43f96b..58ecc5e27 100644 --- a/src/client/rpc/forward_metadata.cpp +++ b/src/client/rpc/forward_metadata.cpp @@ -352,7 +352,7 @@ int forward_get_metadentry_size(const std::string& path, off64_t& ret_size) { /** * Sends an RPC request to a specific node to push all chunks that belong to him */ -void forward_get_dirents(OpenDir& open_dir) { +void forward_get_dirents(gkfs::filemap::OpenDir& open_dir) { auto const root_dir = open_dir.path(); auto const targets = CTX->distributor()->locate_directory_metadata(root_dir); @@ -444,7 +444,8 @@ void forward_get_dirents(OpenDir& open_dir) { for (std::size_t j = 0; j < out.dirents_size(); j++) { - FileType ftype = (*bool_ptr) ? FileType::directory : FileType::regular; + gkfs::filemap::FileType ftype = (*bool_ptr) ? gkfs::filemap::FileType::directory + : gkfs::filemap::FileType::regular; bool_ptr++; // Check that we are not outside the recv_buff for this specific host -- GitLab From 15cee4538d54643ba21ff459c10ac5c38b1902b2 Mon Sep 17 00:00:00 2001 From: Marc Vef Date: Tue, 25 Feb 2020 01:07:34 +0100 Subject: [PATCH 24/25] Adding gkfs::hook and gkfs::preload namespaces, cleanup --- include/client/gkfs_functions.hpp | 33 +-- include/client/hooks.hpp | 12 +- include/client/intercept.hpp | 6 + include/client/preload.hpp | 7 +- include/client/preload_context.hpp | 13 +- include/client/preload_util.hpp | 7 + include/client/rpc/forward_metadata.hpp | 6 +- src/client/gkfs_functions.cpp | 27 ++- src/client/hooks.cpp | 99 ++++---- src/client/intercept.cpp | 301 ++++++++++++------------ src/client/preload.cpp | 34 ++- src/client/preload_context.cpp | 11 +- src/client/rpc/forward_metadata.cpp | 2 +- 13 files changed, 300 insertions(+), 258 deletions(-) diff --git a/include/client/gkfs_functions.hpp b/include/client/gkfs_functions.hpp index 9ff57c90c..328a64a18 100644 --- a/include/client/gkfs_functions.hpp +++ b/include/client/gkfs_functions.hpp @@ -17,23 +17,10 @@ #include #include -struct linux_dirent { - unsigned long d_ino; - unsigned long d_off; - unsigned short d_reclen; - char d_name[1]; -}; - -struct linux_dirent64 { - unsigned long long d_ino; - unsigned long long d_off; - unsigned short d_reclen; - unsigned char d_type; - char d_name[1]; -}; - -using sys_statfs = struct statfs; -using sys_statvfs = struct statvfs; +struct statfs; +struct statvfs; +struct dirent; +struct dirent64; namespace gkfs { namespace syscall { @@ -48,9 +35,9 @@ int gkfs_access(const std::string& path, int mask, bool follow_links = true); int gkfs_stat(const std::string& path, struct stat* buf, bool follow_links = true); -int gkfs_statfs(sys_statfs* buf); +int gkfs_statfs(struct statfs* buf); -int gkfs_statvfs(sys_statvfs* buf); +int gkfs_statvfs(struct statvfs* buf); off64_t gkfs_lseek(unsigned int fd, off64_t offset, unsigned int whence); @@ -92,13 +79,9 @@ ssize_t gkfs_read(int fd, void* buf, size_t count); int gkfs_opendir(const std::string& path); -int gkfs_getdents(unsigned int fd, - struct linux_dirent* dirp, - unsigned int count); +int gkfs_getdents(unsigned int fd, struct dirent* dirp, unsigned int count); -int gkfs_getdents64(unsigned int fd, - struct linux_dirent64* dirp, - unsigned int count); +int gkfs_getdents64(unsigned int fd, struct dirent64* dirp, unsigned int count); int gkfs_rmdir(const std::string& path); diff --git a/include/client/hooks.hpp b/include/client/hooks.hpp index dc0a2179a..3c639d036 100644 --- a/include/client/hooks.hpp +++ b/include/client/hooks.hpp @@ -17,6 +17,12 @@ #include #include +struct statfs; +struct dirent; +struct dirent64; + +namespace gkfs { +namespace hook { int hook_openat(int dirfd, const char* cpath, int flags, mode_t mode); @@ -63,9 +69,9 @@ int hook_dup2(unsigned int oldfd, unsigned int newfd); int hook_dup3(unsigned int oldfd, unsigned int newfd, int flags); -int hook_getdents(unsigned int fd, struct linux_dirent* dirp, unsigned int count); +int hook_getdents(unsigned int fd, struct dirent* dirp, unsigned int count); -int hook_getdents64(unsigned int fd, struct linux_dirent64* dirp, unsigned int count); +int hook_getdents64(unsigned int fd, struct dirent64* dirp, unsigned int count); int hook_mkdirat(int dirfd, const char* cpath, mode_t mode); @@ -90,5 +96,7 @@ int hook_statfs(const char* path, struct statfs* buf); int hook_fstatfs(unsigned int fd, struct statfs* buf); +} // namespace hook +} // namespace gkfs #endif diff --git a/include/client/intercept.hpp b/include/client/intercept.hpp index 4748850e5..4174b9829 100644 --- a/include/client/intercept.hpp +++ b/include/client/intercept.hpp @@ -14,6 +14,9 @@ #ifndef GEKKOFS_INTERCEPT_HPP #define GEKKOFS_INTERCEPT_HPP +namespace gkfs { +namespace preload { + int internal_hook_guard_wrapper(long syscall_number, long arg0, long arg1, long arg2, @@ -32,4 +35,7 @@ void start_interception(); void stop_interception(); +} // namespace preload +} // namespace gkfs + #endif diff --git a/include/client/preload.hpp b/include/client/preload.hpp index 63d888c89..a6e0e7a8c 100644 --- a/include/client/preload.hpp +++ b/include/client/preload.hpp @@ -18,9 +18,12 @@ #define EUNKNOWN (-1) -#define CTX PreloadContext::getInstance() - +#define CTX gkfs::preload::PreloadContext::getInstance() +namespace gkfs { +namespace preload { void init_ld_env_if_needed(); +} // namespace preload +} // namespace gkfs void init_preload() __attribute__((constructor)); diff --git a/include/client/preload_context.hpp b/include/client/preload_context.hpp index 149311d0f..0eb43be26 100644 --- a/include/client/preload_context.hpp +++ b/include/client/preload_context.hpp @@ -25,19 +25,20 @@ /* Forward declarations */ namespace gkfs { - namespace filemap { class OpenFileMap; } - namespace rpc { class Distributor; } namespace log { struct logger; } -} +namespace preload { +/* + * Client file system config + */ struct FsConfig { // configurable metadata bool atime_state; @@ -60,6 +61,9 @@ enum class RelativizeStatus { fd_not_a_dir }; +/** + * Singleton class of the client context with all relevant global data + */ class PreloadContext { static auto constexpr MIN_INTERNAL_FD = MAX_OPEN_FDS - MAX_INTERNAL_FDS; @@ -151,6 +155,9 @@ public: void unprotect_user_fds(); }; +} // namespace preload +} // namespace gkfs + #endif //GEKKOFS_PRELOAD_CTX_HPP diff --git a/include/client/preload_util.hpp b/include/client/preload_util.hpp index 26bf0dd3b..37cf4fc51 100644 --- a/include/client/preload_util.hpp +++ b/include/client/preload_util.hpp @@ -23,6 +23,9 @@ #include #include +namespace gkfs { +namespace metadata { + struct MetadentryUpdateFlags { bool atime = false; bool mtime = false; @@ -36,8 +39,12 @@ struct MetadentryUpdateFlags { bool path = false; }; +} // namespace metadata +} // namespace gkfs + // Hermes instance namespace hermes { class async_engine; } + extern std::unique_ptr ld_network_service; // function definitions diff --git a/include/client/rpc/forward_metadata.hpp b/include/client/rpc/forward_metadata.hpp index 572eaee05..9475aaa4e 100644 --- a/include/client/rpc/forward_metadata.hpp +++ b/include/client/rpc/forward_metadata.hpp @@ -18,14 +18,12 @@ #include /* Forward declaration */ -struct MetadentryUpdateFlags; - - namespace gkfs { namespace filemap { class OpenDir; } namespace metadata { +struct MetadentryUpdateFlags; class Metadata; } @@ -40,7 +38,7 @@ int forward_remove(const std::string& path, bool remove_metadentry_only, ssize_t int forward_decr_size(const std::string& path, size_t length); int forward_update_metadentry(const std::string& path, const gkfs::metadata::Metadata& md, - const MetadentryUpdateFlags& md_flags); + const gkfs::metadata::MetadentryUpdateFlags& md_flags); int forward_update_metadentry_size(const std::string& path, size_t size, off64_t offset, bool append_flag, off64_t& ret_size); diff --git a/src/client/gkfs_functions.cpp b/src/client/gkfs_functions.cpp index e724f790a..f46e57b93 100644 --- a/src/client/gkfs_functions.cpp +++ b/src/client/gkfs_functions.cpp @@ -26,6 +26,7 @@ extern "C" { #include #include +#include } #define __ALIGN_KERNEL_MASK(x, mask) (((x) + (mask)) & ~(mask)) @@ -207,7 +208,7 @@ int gkfs_stat(const string& path, struct stat* buf, bool follow_links) { return 0; } -int gkfs_statfs(sys_statfs* buf) { +int gkfs_statfs(struct statfs* buf) { auto blk_stat = gkfs::rpc::forward_get_chunk_stat(); buf->f_type = 0; buf->f_bsize = blk_stat.chunk_size; @@ -224,8 +225,8 @@ int gkfs_statfs(sys_statfs* buf) { return 0; } -int gkfs_statvfs(sys_statvfs* buf) { - init_ld_env_if_needed(); +int gkfs_statvfs(struct statvfs* buf) { + gkfs::preload::init_ld_env_if_needed(); auto blk_stat = gkfs::rpc::forward_get_chunk_stat(); buf->f_bsize = blk_stat.chunk_size; buf->f_blocks = blk_stat.chunk_total; @@ -504,7 +505,7 @@ int gkfs_rmdir(const std::string& path) { } int gkfs_getdents(unsigned int fd, - struct linux_dirent* dirp, + struct dirent* dirp, unsigned int count) { auto open_dir = CTX->file_map()->get_dir(fd); @@ -520,17 +521,17 @@ int gkfs_getdents(unsigned int fd, } unsigned int written = 0; - struct linux_dirent* current_dirp = nullptr; + struct dirent* current_dirp = nullptr; while (pos < open_dir->size()) { gkfs::filemap::DirEntry de = open_dir->getdent(pos); auto total_size = ALIGN(offsetof( - struct linux_dirent, d_name) + + struct dirent, d_name) + de.name().size() + 3, sizeof(long)); if (total_size > (count - written)) { //no enough space left on user buffer to insert next dirent break; } - current_dirp = reinterpret_cast( + current_dirp = reinterpret_cast( reinterpret_cast(dirp) + written); current_dirp->d_ino = std::hash()( open_dir->path() + "/" + de.name()); @@ -557,7 +558,7 @@ int gkfs_getdents(unsigned int fd, int gkfs_getdents64(unsigned int fd, - struct linux_dirent64* dirp, + struct dirent64* dirp, unsigned int count) { auto open_dir = CTX->file_map()->get_dir(fd); @@ -573,17 +574,17 @@ int gkfs_getdents64(unsigned int fd, } unsigned int written = 0; - struct linux_dirent64* current_dirp = nullptr; + struct dirent64* current_dirp = nullptr; while (pos < open_dir->size()) { gkfs::filemap::DirEntry de = open_dir->getdent(pos); auto total_size = ALIGN(offsetof( - struct linux_dirent64, d_name) + + struct dirent64, d_name) + de.name().size() + 3, sizeof(long)); if (total_size > (count - written)) { //no enough space left on user buffer to insert next dirent break; } - current_dirp = reinterpret_cast( + current_dirp = reinterpret_cast( reinterpret_cast(dirp) + written); current_dirp->d_ino = std::hash()( open_dir->path() + "/" + de.name()); @@ -610,7 +611,7 @@ int gkfs_getdents64(unsigned int fd, #ifdef HAS_SYMLINKS int gkfs_mk_symlink(const std::string& path, const std::string& target_path) { - init_ld_env_if_needed(); + gkfs::preload::init_ld_env_if_needed(); /* The following check is not POSIX compliant. * In POSIX the target is not checked at all. * Here if the target is a directory we raise a NOTSUP error. @@ -642,7 +643,7 @@ int gkfs_mk_symlink(const std::string& path, const std::string& target_path) { } int gkfs_readlink(const std::string& path, char* buf, int bufsize) { - init_ld_env_if_needed(); + gkfs::preload::init_ld_env_if_needed(); auto md = gkfs::util::get_metadata(path, false); if (md == nullptr) { LOG(DEBUG, "Named link doesn't exist"); diff --git a/src/client/hooks.cpp b/src/client/hooks.cpp index 3da77eb3e..dbe2b8655 100644 --- a/src/client/hooks.cpp +++ b/src/client/hooks.cpp @@ -25,14 +25,22 @@ extern "C" { #include -#include +#include #include +#include +#include } -static inline int with_errno(int ret) { +namespace { + +inline int with_errno(int ret) { return (ret < 0) ? -errno : ret; } +} // namespace + +namespace gkfs { +namespace hook { int hook_openat(int dirfd, const char* cpath, int flags, mode_t mode) { @@ -42,16 +50,16 @@ int hook_openat(int dirfd, const char* cpath, int flags, mode_t mode) { std::string resolved; auto rstatus = CTX->relativize_fd_path(dirfd, cpath, resolved); switch (rstatus) { - case RelativizeStatus::fd_unknown: + case gkfs::preload::RelativizeStatus::fd_unknown: return syscall_no_intercept(SYS_openat, dirfd, cpath, flags, mode); - case RelativizeStatus::external: + case gkfs::preload::RelativizeStatus::external: return syscall_no_intercept(SYS_openat, dirfd, resolved.c_str(), flags, mode); - case RelativizeStatus::fd_not_a_dir: + case gkfs::preload::RelativizeStatus::fd_not_a_dir: return -ENOTDIR; - case RelativizeStatus::internal: + case gkfs::preload::RelativizeStatus::internal: return with_errno(gkfs::syscall::gkfs_open(resolved, mode, flags)); default: @@ -128,16 +136,16 @@ int hook_fstatat(int dirfd, const char* cpath, struct stat* buf, int flags) { std::string resolved; auto rstatus = CTX->relativize_fd_path(dirfd, cpath, resolved); switch (rstatus) { - case RelativizeStatus::fd_unknown: + case gkfs::preload::RelativizeStatus::fd_unknown: return syscall_no_intercept(SYS_newfstatat, dirfd, cpath, buf, flags); - case RelativizeStatus::external: + case gkfs::preload::RelativizeStatus::external: return syscall_no_intercept(SYS_newfstatat, dirfd, resolved.c_str(), buf, flags); - case RelativizeStatus::fd_not_a_dir: + case gkfs::preload::RelativizeStatus::fd_not_a_dir: return -ENOTDIR; - case RelativizeStatus::internal: + case gkfs::preload::RelativizeStatus::internal: return with_errno(gkfs::syscall::gkfs_stat(resolved, buf)); default: @@ -230,16 +238,16 @@ int hook_unlinkat(int dirfd, const char* cpath, int flags) { std::string resolved; auto rstatus = CTX->relativize_fd_path(dirfd, cpath, resolved, false); switch (rstatus) { - case RelativizeStatus::fd_unknown: + case gkfs::preload::RelativizeStatus::fd_unknown: return syscall_no_intercept(SYS_unlinkat, dirfd, cpath, flags); - case RelativizeStatus::external: + case gkfs::preload::RelativizeStatus::external: return syscall_no_intercept(SYS_unlinkat, dirfd, resolved.c_str(), flags); - case RelativizeStatus::fd_not_a_dir: + case gkfs::preload::RelativizeStatus::fd_not_a_dir: return -ENOTDIR; - case RelativizeStatus::internal: + case gkfs::preload::RelativizeStatus::internal: if (flags & AT_REMOVEDIR) { return with_errno(gkfs::syscall::gkfs_rmdir(resolved)); } else { @@ -266,16 +274,16 @@ int hook_symlinkat(const char* oldname, int newdfd, const char* newname) { std::string newname_resolved; auto rstatus = CTX->relativize_fd_path(newdfd, newname, newname_resolved, false); switch (rstatus) { - case RelativizeStatus::fd_unknown: + case gkfs::preload::RelativizeStatus::fd_unknown: return syscall_no_intercept(SYS_symlinkat, oldname, newdfd, newname); - case RelativizeStatus::external: + case gkfs::preload::RelativizeStatus::external: return syscall_no_intercept(SYS_symlinkat, oldname, newdfd, newname_resolved.c_str()); - case RelativizeStatus::fd_not_a_dir: + case gkfs::preload::RelativizeStatus::fd_not_a_dir: return -ENOTDIR; - case RelativizeStatus::internal: + case gkfs::preload::RelativizeStatus::internal: LOG(WARNING, "{}() operation not supported", __func__); return -ENOTSUP; @@ -310,16 +318,16 @@ int hook_faccessat(int dirfd, const char* cpath, int mode) { std::string resolved; auto rstatus = CTX->relativize_fd_path(dirfd, cpath, resolved); switch (rstatus) { - case RelativizeStatus::fd_unknown: + case gkfs::preload::RelativizeStatus::fd_unknown: return syscall_no_intercept(SYS_faccessat, dirfd, cpath, mode); - case RelativizeStatus::external: + case gkfs::preload::RelativizeStatus::external: return syscall_no_intercept(SYS_faccessat, dirfd, resolved.c_str(), mode); - case RelativizeStatus::fd_not_a_dir: + case gkfs::preload::RelativizeStatus::fd_not_a_dir: return -ENOTDIR; - case RelativizeStatus::internal: + case gkfs::preload::RelativizeStatus::internal: return with_errno(gkfs::syscall::gkfs_access(resolved, mode)); default: @@ -406,7 +414,7 @@ int hook_dup3(unsigned int oldfd, unsigned int newfd, int flags) { return syscall_no_intercept(SYS_dup3, oldfd, newfd, flags); } -int hook_getdents(unsigned int fd, struct linux_dirent* dirp, unsigned int count) { +int hook_getdents(unsigned int fd, struct dirent* dirp, unsigned int count) { LOG(DEBUG, "{}() called with fd: {}, dirp: {}, count: {}", __func__, fd, fmt::ptr(dirp), count); @@ -418,7 +426,7 @@ int hook_getdents(unsigned int fd, struct linux_dirent* dirp, unsigned int count } -int hook_getdents64(unsigned int fd, struct linux_dirent64* dirp, unsigned int count) { +int hook_getdents64(unsigned int fd, struct dirent64* dirp, unsigned int count) { LOG(DEBUG, "{}() called with fd: {}, dirp: {}, count: {}", __func__, fd, fmt::ptr(dirp), count); @@ -438,16 +446,16 @@ int hook_mkdirat(int dirfd, const char* cpath, mode_t mode) { std::string resolved; auto rstatus = CTX->relativize_fd_path(dirfd, cpath, resolved); switch (rstatus) { - case RelativizeStatus::external: + case gkfs::preload::RelativizeStatus::external: return syscall_no_intercept(SYS_mkdirat, dirfd, resolved.c_str(), mode); - case RelativizeStatus::fd_unknown: + case gkfs::preload::RelativizeStatus::fd_unknown: return syscall_no_intercept(SYS_mkdirat, dirfd, cpath, mode); - case RelativizeStatus::fd_not_a_dir: + case gkfs::preload::RelativizeStatus::fd_not_a_dir: return -ENOTDIR; - case RelativizeStatus::internal: + case gkfs::preload::RelativizeStatus::internal: return with_errno(gkfs::syscall::gkfs_create(resolved, mode | S_IFDIR)); default: @@ -464,16 +472,16 @@ int hook_fchmodat(int dirfd, const char* cpath, mode_t mode) { std::string resolved; auto rstatus = CTX->relativize_fd_path(dirfd, cpath, resolved); switch (rstatus) { - case RelativizeStatus::fd_unknown: + case gkfs::preload::RelativizeStatus::fd_unknown: return syscall_no_intercept(SYS_fchmodat, dirfd, cpath, mode); - case RelativizeStatus::external: + case gkfs::preload::RelativizeStatus::external: return syscall_no_intercept(SYS_fchmodat, dirfd, resolved.c_str(), mode); - case RelativizeStatus::fd_not_a_dir: + case gkfs::preload::RelativizeStatus::fd_not_a_dir: return -ENOTDIR; - case RelativizeStatus::internal: + case gkfs::preload::RelativizeStatus::internal: LOG(WARNING, "{}() operation not supported", __func__); return -ENOTSUP; @@ -588,16 +596,16 @@ int hook_readlinkat(int dirfd, const char* cpath, char* buf, int bufsiz) { std::string resolved; auto rstatus = CTX->relativize_fd_path(dirfd, cpath, resolved, false); switch (rstatus) { - case RelativizeStatus::fd_unknown: + case gkfs::preload::RelativizeStatus::fd_unknown: return syscall_no_intercept(SYS_readlinkat, dirfd, cpath, buf, bufsiz); - case RelativizeStatus::external: + case gkfs::preload::RelativizeStatus::external: return syscall_no_intercept(SYS_readlinkat, dirfd, resolved.c_str(), buf, bufsiz); - case RelativizeStatus::fd_not_a_dir: + case gkfs::preload::RelativizeStatus::fd_not_a_dir: return -ENOTDIR; - case RelativizeStatus::internal: + case gkfs::preload::RelativizeStatus::internal: LOG(WARNING, "{}() not supported", __func__); return -ENOTSUP; @@ -683,18 +691,18 @@ int hook_renameat(int olddfd, const char* oldname, std::string oldpath_resolved; auto oldpath_status = CTX->relativize_fd_path(olddfd, oldname, oldpath_resolved); switch (oldpath_status) { - case RelativizeStatus::fd_unknown: + case gkfs::preload::RelativizeStatus::fd_unknown: oldpath_pass = oldname; break; - case RelativizeStatus::external: + case gkfs::preload::RelativizeStatus::external: oldpath_pass = oldpath_resolved.c_str(); break; - case RelativizeStatus::fd_not_a_dir: + case gkfs::preload::RelativizeStatus::fd_not_a_dir: return -ENOTDIR; - case RelativizeStatus::internal: + case gkfs::preload::RelativizeStatus::internal: LOG(WARNING, "{}() not supported", __func__); return -ENOTSUP; @@ -707,18 +715,18 @@ int hook_renameat(int olddfd, const char* oldname, std::string newpath_resolved; auto newpath_status = CTX->relativize_fd_path(newdfd, newname, newpath_resolved); switch (newpath_status) { - case RelativizeStatus::fd_unknown: + case gkfs::preload::RelativizeStatus::fd_unknown: newpath_pass = newname; break; - case RelativizeStatus::external: + case gkfs::preload::RelativizeStatus::external: newpath_pass = newpath_resolved.c_str(); break; - case RelativizeStatus::fd_not_a_dir: + case gkfs::preload::RelativizeStatus::fd_not_a_dir: return -ENOTDIR; - case RelativizeStatus::internal: + case gkfs::preload::RelativizeStatus::internal: LOG(WARNING, "{}() not supported", __func__); return -ENOTSUP; @@ -752,3 +760,6 @@ int hook_fstatfs(unsigned int fd, struct statfs* buf) { } return syscall_no_intercept(SYS_fstatfs, fd, buf); } + +} // namespace hook +} // namespace gkfs \ No newline at end of file diff --git a/src/client/intercept.cpp b/src/client/intercept.cpp index 1c71e21aa..56471c712 100644 --- a/src/client/intercept.cpp +++ b/src/client/intercept.cpp @@ -17,6 +17,7 @@ #include #include +#include #include @@ -26,25 +27,22 @@ extern "C" { #include #include #include -#include } -static thread_local bool reentrance_guard_flag; -static thread_local gkfs::syscall::info saved_syscall_info; +namespace { +thread_local bool reentrance_guard_flag; +thread_local gkfs::syscall::info saved_syscall_info; -static constexpr void -save_current_syscall_info(gkfs::syscall::info info) { +constexpr void save_current_syscall_info(gkfs::syscall::info info) { saved_syscall_info = info; } -static constexpr void -reset_current_syscall_info() { +constexpr void reset_current_syscall_info() { saved_syscall_info = gkfs::syscall::no_info; } -static inline gkfs::syscall::info -get_current_syscall_info() { +inline gkfs::syscall::info get_current_syscall_info() { return saved_syscall_info; } @@ -59,7 +57,7 @@ get_current_syscall_info() { * We forward syscalls to the kernel but we keep track of any syscalls that may * create or destroy a file descriptor so that we can mark them as 'internal'. */ -static inline int +inline int hook_internal(long syscall_number, long arg0, long arg1, long arg2, long arg3, long arg4, long arg5, @@ -402,7 +400,7 @@ hook_internal(long syscall_number, * * This hook is used to implement any application filesystem-related syscalls. */ -static inline +inline int hook(long syscall_number, long arg0, long arg1, long arg2, long arg3, long arg4, long arg5, @@ -439,269 +437,269 @@ int hook(long syscall_number, #endif case SYS_open: - *result = hook_openat(AT_FDCWD, - reinterpret_cast(arg0), - static_cast(arg1), - static_cast(arg2)); + *result = gkfs::hook::hook_openat(AT_FDCWD, + reinterpret_cast(arg0), + static_cast(arg1), + static_cast(arg2)); break; case SYS_creat: - *result = hook_openat(AT_FDCWD, - reinterpret_cast(arg0), + *result = gkfs::hook::hook_openat(AT_FDCWD, + reinterpret_cast(arg0), O_WRONLY | O_CREAT | O_TRUNC, - static_cast(arg1)); + static_cast(arg1)); break; case SYS_openat: - *result = hook_openat(static_cast(arg0), - reinterpret_cast(arg1), - static_cast(arg2), - static_cast(arg3)); + *result = gkfs::hook::hook_openat(static_cast(arg0), + reinterpret_cast(arg1), + static_cast(arg2), + static_cast(arg3)); break; case SYS_close: - *result = hook_close(static_cast(arg0)); + *result = gkfs::hook::hook_close(static_cast(arg0)); break; case SYS_stat: - *result = hook_stat(reinterpret_cast(arg0), - reinterpret_cast(arg1)); + *result = gkfs::hook::hook_stat(reinterpret_cast(arg0), + reinterpret_cast(arg1)); break; case SYS_lstat: - *result = hook_lstat(reinterpret_cast(arg0), - reinterpret_cast(arg1)); + *result = gkfs::hook::hook_lstat(reinterpret_cast(arg0), + reinterpret_cast(arg1)); break; case SYS_fstat: - *result = hook_fstat(static_cast(arg0), - reinterpret_cast(arg1)); + *result = gkfs::hook::hook_fstat(static_cast(arg0), + reinterpret_cast(arg1)); break; case SYS_newfstatat: - *result = hook_fstatat(static_cast(arg0), - reinterpret_cast(arg1), - reinterpret_cast(arg2), - static_cast(arg3)); + *result = gkfs::hook::hook_fstatat(static_cast(arg0), + reinterpret_cast(arg1), + reinterpret_cast(arg2), + static_cast(arg3)); break; case SYS_read: - *result = hook_read(static_cast(arg0), - reinterpret_cast(arg1), - static_cast(arg2)); + *result = gkfs::hook::hook_read(static_cast(arg0), + reinterpret_cast(arg1), + static_cast(arg2)); break; case SYS_pread64: - *result = hook_pread(static_cast(arg0), - reinterpret_cast(arg1), - static_cast(arg2), - static_cast(arg3)); + *result = gkfs::hook::hook_pread(static_cast(arg0), + reinterpret_cast(arg1), + static_cast(arg2), + static_cast(arg3)); break; case SYS_pwrite64: - *result = hook_pwrite(static_cast(arg0), - reinterpret_cast(arg1), - static_cast(arg2), - static_cast(arg3)); + *result = gkfs::hook::hook_pwrite(static_cast(arg0), + reinterpret_cast(arg1), + static_cast(arg2), + static_cast(arg3)); break; case SYS_write: - *result = hook_write(static_cast(arg0), - reinterpret_cast(arg1), - static_cast(arg2)); + *result = gkfs::hook::hook_write(static_cast(arg0), + reinterpret_cast(arg1), + static_cast(arg2)); break; case SYS_writev: - *result = hook_writev(static_cast(arg0), - reinterpret_cast(arg1), - static_cast(arg2)); + *result = gkfs::hook::hook_writev(static_cast(arg0), + reinterpret_cast(arg1), + static_cast(arg2)); break; case SYS_pwritev: - *result = hook_pwritev(static_cast(arg0), - reinterpret_cast(arg1), - static_cast(arg2), - static_cast(arg3), - static_cast(arg4)); + *result = gkfs::hook::hook_pwritev(static_cast(arg0), + reinterpret_cast(arg1), + static_cast(arg2), + static_cast(arg3), + static_cast(arg4)); break; case SYS_unlink: - *result = hook_unlinkat(AT_FDCWD, - reinterpret_cast(arg0), - 0); + *result = gkfs::hook::hook_unlinkat(AT_FDCWD, + reinterpret_cast(arg0), + 0); break; case SYS_unlinkat: - *result = hook_unlinkat(static_cast(arg0), - reinterpret_cast(arg1), - static_cast(arg2)); + *result = gkfs::hook::hook_unlinkat(static_cast(arg0), + reinterpret_cast(arg1), + static_cast(arg2)); break; case SYS_rmdir: - *result = hook_unlinkat(AT_FDCWD, - reinterpret_cast(arg0), - AT_REMOVEDIR); + *result = gkfs::hook::hook_unlinkat(AT_FDCWD, + reinterpret_cast(arg0), + AT_REMOVEDIR); break; case SYS_symlink: - *result = hook_symlinkat(reinterpret_cast(arg0), - AT_FDCWD, - reinterpret_cast(arg1)); + *result = gkfs::hook::hook_symlinkat(reinterpret_cast(arg0), + AT_FDCWD, + reinterpret_cast(arg1)); break; case SYS_symlinkat: - *result = hook_symlinkat(reinterpret_cast(arg0), - static_cast(arg1), - reinterpret_cast(arg2)); + *result = gkfs::hook::hook_symlinkat(reinterpret_cast(arg0), + static_cast(arg1), + reinterpret_cast(arg2)); break; case SYS_access: - *result = hook_access(reinterpret_cast(arg0), - static_cast(arg1)); + *result = gkfs::hook::hook_access(reinterpret_cast(arg0), + static_cast(arg1)); break; case SYS_faccessat: - *result = hook_faccessat(static_cast(arg0), - reinterpret_cast(arg1), - static_cast(arg2)); + *result = gkfs::hook::hook_faccessat(static_cast(arg0), + reinterpret_cast(arg1), + static_cast(arg2)); break; case SYS_lseek: - *result = hook_lseek(static_cast(arg0), - static_cast(arg1), - static_cast(arg2)); + *result = gkfs::hook::hook_lseek(static_cast(arg0), + static_cast(arg1), + static_cast(arg2)); break; case SYS_truncate: - *result = hook_truncate(reinterpret_cast(arg0), - static_cast(arg1)); + *result = gkfs::hook::hook_truncate(reinterpret_cast(arg0), + static_cast(arg1)); break; case SYS_ftruncate: - *result = hook_ftruncate(static_cast(arg0), - static_cast(arg1)); + *result = gkfs::hook::hook_ftruncate(static_cast(arg0), + static_cast(arg1)); break; case SYS_dup: - *result = hook_dup(static_cast(arg0)); + *result = gkfs::hook::hook_dup(static_cast(arg0)); break; case SYS_dup2: - *result = hook_dup2(static_cast(arg0), - static_cast(arg1)); + *result = gkfs::hook::hook_dup2(static_cast(arg0), + static_cast(arg1)); break; case SYS_dup3: - *result = hook_dup3(static_cast(arg0), - static_cast(arg1), - static_cast(arg2)); + *result = gkfs::hook::hook_dup3(static_cast(arg0), + static_cast(arg1), + static_cast(arg2)); break; case SYS_getdents: - *result = hook_getdents(static_cast(arg0), - reinterpret_cast(arg1), - static_cast(arg2)); + *result = gkfs::hook::hook_getdents(static_cast(arg0), + reinterpret_cast(arg1), + static_cast(arg2)); break; case SYS_getdents64: - *result = hook_getdents64(static_cast(arg0), - reinterpret_cast(arg1), - static_cast(arg2)); + *result = gkfs::hook::hook_getdents64(static_cast(arg0), + reinterpret_cast(arg1), + static_cast(arg2)); break; case SYS_mkdirat: - *result = hook_mkdirat(static_cast(arg0), - reinterpret_cast(arg1), - static_cast(arg2)); + *result = gkfs::hook::hook_mkdirat(static_cast(arg0), + reinterpret_cast(arg1), + static_cast(arg2)); break; case SYS_mkdir: - *result = hook_mkdirat(AT_FDCWD, - reinterpret_cast(arg0), - static_cast(arg1)); + *result = gkfs::hook::hook_mkdirat(AT_FDCWD, + reinterpret_cast(arg0), + static_cast(arg1)); break; case SYS_chmod: - *result = hook_fchmodat(AT_FDCWD, - reinterpret_cast(arg0), - static_cast(arg1)); + *result = gkfs::hook::hook_fchmodat(AT_FDCWD, + reinterpret_cast(arg0), + static_cast(arg1)); break; case SYS_fchmod: - *result = hook_fchmod(static_cast(arg0), - static_cast(arg1)); + *result = gkfs::hook::hook_fchmod(static_cast(arg0), + static_cast(arg1)); break; case SYS_fchmodat: - *result = hook_fchmodat(static_cast(arg0), - reinterpret_cast(arg1), - static_cast(arg2)); + *result = gkfs::hook::hook_fchmodat(static_cast(arg0), + reinterpret_cast(arg1), + static_cast(arg2)); break; case SYS_chdir: - *result = hook_chdir(reinterpret_cast(arg0)); + *result = gkfs::hook::hook_chdir(reinterpret_cast(arg0)); break; case SYS_fchdir: - *result = hook_fchdir(static_cast(arg0)); + *result = gkfs::hook::hook_fchdir(static_cast(arg0)); break; case SYS_getcwd: - *result = hook_getcwd(reinterpret_cast(arg0), - static_cast(arg1)); + *result = gkfs::hook::hook_getcwd(reinterpret_cast(arg0), + static_cast(arg1)); break; case SYS_readlink: - *result = hook_readlinkat(AT_FDCWD, - reinterpret_cast(arg0), - reinterpret_cast(arg1), - static_cast(arg2)); + *result = gkfs::hook::hook_readlinkat(AT_FDCWD, + reinterpret_cast(arg0), + reinterpret_cast(arg1), + static_cast(arg2)); break; case SYS_readlinkat: - *result = hook_readlinkat(static_cast(arg0), - reinterpret_cast(arg1), - reinterpret_cast(arg2), - static_cast(arg3)); + *result = gkfs::hook::hook_readlinkat(static_cast(arg0), + reinterpret_cast(arg1), + reinterpret_cast(arg2), + static_cast(arg3)); break; case SYS_fcntl: - *result = hook_fcntl(static_cast(arg0), - static_cast(arg1), - static_cast(arg2)); + *result = gkfs::hook::hook_fcntl(static_cast(arg0), + static_cast(arg1), + static_cast(arg2)); break; case SYS_rename: - *result = hook_renameat(AT_FDCWD, - reinterpret_cast(arg0), - AT_FDCWD, - reinterpret_cast(arg1), - 0); + *result = gkfs::hook::hook_renameat(AT_FDCWD, + reinterpret_cast(arg0), + AT_FDCWD, + reinterpret_cast(arg1), + 0); break; case SYS_renameat: - *result = hook_renameat(static_cast(arg0), - reinterpret_cast(arg1), - static_cast(arg2), - reinterpret_cast(arg3), - 0); + *result = gkfs::hook::hook_renameat(static_cast(arg0), + reinterpret_cast(arg1), + static_cast(arg2), + reinterpret_cast(arg3), + 0); break; case SYS_renameat2: - *result = hook_renameat(static_cast(arg0), - reinterpret_cast(arg1), - static_cast(arg2), - reinterpret_cast(arg3), - static_cast(arg4)); + *result = gkfs::hook::hook_renameat(static_cast(arg0), + reinterpret_cast(arg1), + static_cast(arg2), + reinterpret_cast(arg3), + static_cast(arg4)); break; case SYS_fstatfs: - *result = hook_fstatfs(static_cast(arg0), - reinterpret_cast(arg1)); + *result = gkfs::hook::hook_fstatfs(static_cast(arg0), + reinterpret_cast(arg1)); break; case SYS_statfs: - *result = hook_statfs(reinterpret_cast(arg0), - reinterpret_cast(arg1)); + *result = gkfs::hook::hook_statfs(reinterpret_cast(arg0), + reinterpret_cast(arg1)); break; default: @@ -722,7 +720,7 @@ int hook(long syscall_number, return gkfs::syscall::hooked; } -static void +void hook_forwarded_syscall(long syscall_number, long arg0, long arg1, long arg2, long arg3, long arg4, long arg5, @@ -746,7 +744,7 @@ hook_forwarded_syscall(long syscall_number, ::reset_current_syscall_info(); } -static void +void hook_clone_at_child(unsigned long flags, void* child_stack, int* ptid, @@ -773,7 +771,7 @@ hook_clone_at_child(unsigned long flags, reentrance_guard_flag = false; } -static void +void hook_clone_at_parent(unsigned long flags, void* child_stack, int* ptid, @@ -801,6 +799,10 @@ hook_clone_at_parent(unsigned long flags, reentrance_guard_flag = false; } +} // namespace + +namespace gkfs { +namespace preload { int internal_hook_guard_wrapper(long syscall_number, @@ -867,9 +869,9 @@ hook_guard_wrapper(long syscall_number, reentrance_guard_flag = true; int oerrno = errno; - was_hooked = hook(syscall_number, - arg0, arg1, arg2, arg3, arg4, arg5, - syscall_return_value); + was_hooked = ::hook(syscall_number, + arg0, arg1, arg2, arg3, arg4, arg5, + syscall_return_value); errno = oerrno; reentrance_guard_flag = false; @@ -910,3 +912,6 @@ void stop_interception() { intercept_hook_point_clone_child = nullptr; intercept_hook_point_clone_parent = nullptr; } + +} // namespace preload +} // namespace gkfs \ No newline at end of file diff --git a/src/client/preload.cpp b/src/client/preload.cpp index beb55b4ec..ad09cff32 100644 --- a/src/client/preload.cpp +++ b/src/client/preload.cpp @@ -30,12 +30,14 @@ extern "C" { using namespace std; +std::unique_ptr ld_network_service; // extern variable + +namespace { // make sure that things are only initialized once -static pthread_once_t init_env_thread = PTHREAD_ONCE_INIT; +pthread_once_t init_env_thread = PTHREAD_ONCE_INIT; -std::unique_ptr ld_network_service; -static inline void exit_error_msg(int errcode, const string& msg) { +inline void exit_error_msg(int errcode, const string& msg) { LOG_ERROR("{}", msg); gkfs::log::logger::log_message(stderr, "{}\n", msg); @@ -43,7 +45,7 @@ static inline void exit_error_msg(int errcode, const string& msg) { // if we don't disable interception before calling ::exit() // syscall hooks may find an inconsistent in shared state // (e.g. the logger) and thus, crash - stop_interception(); + gkfs::preload::stop_interception(); CTX->disable_interception(); ::exit(errcode); } @@ -109,10 +111,6 @@ void init_ld_environment_() { LOG(INFO, "Environment initialization successful."); } -void init_ld_env_if_needed() { - pthread_once(&init_env_thread, init_ld_environment_); -} - void log_prog_name() { std::string line; std::ifstream cmdline("/proc/self/cmdline"); @@ -129,13 +127,25 @@ void log_prog_name() { cmdline.close(); } +} // namespace + +namespace gkfs { +namespace preload { + +void init_ld_env_if_needed() { + pthread_once(&init_env_thread, init_ld_environment_); +} + +} // namespace preload +} // namespace gkfs + /** * Called initially ONCE when preload library is used with the LD_PRELOAD environment variable */ void init_preload() { CTX->enable_interception(); - start_self_interception(); + gkfs::preload::start_self_interception(); CTX->init_logging(); // from here ownwards it is safe to print messages @@ -155,12 +165,12 @@ void init_preload() { gkfs::path::init_cwd(); LOG(DEBUG, "Current working directory: '{}'", CTX->cwd()); - init_ld_env_if_needed(); + gkfs::preload::init_ld_env_if_needed(); CTX->enable_interception(); CTX->unprotect_user_fds(); - start_interception(); + gkfs::preload::start_interception(); } /** @@ -174,7 +184,7 @@ void destroy_preload() { ld_network_service.reset(); LOG(DEBUG, "RPC subsystem shut down"); - stop_interception(); + gkfs::preload::stop_interception(); CTX->disable_interception(); LOG(DEBUG, "Syscall interception stopped"); diff --git a/src/client/preload_context.cpp b/src/client/preload_context.cpp index 2f449cafd..ce696e74e 100644 --- a/src/client/preload_context.cpp +++ b/src/client/preload_context.cpp @@ -10,9 +10,6 @@ SPDX-License-Identifier: MIT */ - - - #include #include #include @@ -33,6 +30,9 @@ extern "C" { #include } +namespace gkfs { +namespace preload { + decltype(PreloadContext::MIN_INTERNAL_FD) constexpr PreloadContext::MIN_INTERNAL_FD; decltype(PreloadContext::MAX_USER_FDS) constexpr @@ -323,7 +323,7 @@ PreloadContext::protect_user_fds() { const auto fd_is_open = [](int fd) -> bool { const int ret = ::syscall_no_intercept(SYS_fcntl, fd, F_GETFD); return ::syscall_error_code(ret) == 0 || - ::syscall_error_code(ret) != EBADF; + ::syscall_error_code(ret) != EBADF; }; for (int fd = 0; fd < MAX_USER_FDS; ++fd) { @@ -358,3 +358,6 @@ PreloadContext::unprotect_user_fds() { internal_fds_must_relocate_ = true; } + +} // namespace preload +} // namespace gkfs \ No newline at end of file diff --git a/src/client/rpc/forward_metadata.cpp b/src/client/rpc/forward_metadata.cpp index 58ecc5e27..f66a09001 100644 --- a/src/client/rpc/forward_metadata.cpp +++ b/src/client/rpc/forward_metadata.cpp @@ -236,7 +236,7 @@ int forward_decr_size(const std::string& path, size_t length) { } int forward_update_metadentry(const string& path, const gkfs::metadata::Metadata& md, - const MetadentryUpdateFlags& md_flags) { + const gkfs::metadata::MetadentryUpdateFlags& md_flags) { auto endp = CTX->hosts().at(CTX->distributor()->locate_file_metadata(path)); -- GitLab From f05f1b46437e351ebb1846d198e1e960e5b2c320 Mon Sep 17 00:00:00 2001 From: Marc Vef Date: Tue, 25 Feb 2020 01:17:57 +0100 Subject: [PATCH 25/25] Adding gkfs::daemon namespace, cleanup --- include/daemon/classes/fs_data.hpp | 13 +- include/daemon/classes/rpc_data.hpp | 7 +- include/daemon/daemon.hpp | 14 +- src/daemon/classes/fs_data.cpp | 6 +- src/daemon/classes/rpc_data.cpp | 6 + src/daemon/daemon.cpp | 206 ++++++++++++++-------------- src/daemon/util.cpp | 10 +- 7 files changed, 138 insertions(+), 124 deletions(-) diff --git a/include/daemon/classes/fs_data.hpp b/include/daemon/classes/fs_data.hpp index 78333283d..8f96f1121 100644 --- a/include/daemon/classes/fs_data.hpp +++ b/include/daemon/classes/fs_data.hpp @@ -17,6 +17,10 @@ #include +#include +#include +#include //std::hash + /* Forward declarations */ namespace gkfs { namespace metadata { @@ -26,12 +30,8 @@ class MetadataDB; namespace data { class ChunkStorage; } -} - -#include -#include -#include //std::hash +namespace daemon { class FsData { @@ -129,4 +129,7 @@ public: }; +} // namespace daemon +} // namespace gkfs + #endif //LFS_FS_DATA_H diff --git a/include/daemon/classes/rpc_data.hpp b/include/daemon/classes/rpc_data.hpp index ce1917794..203330e51 100644 --- a/include/daemon/classes/rpc_data.hpp +++ b/include/daemon/classes/rpc_data.hpp @@ -17,6 +17,9 @@ #include +namespace gkfs { +namespace daemon { + class RPCData { private: @@ -59,7 +62,9 @@ public: void self_addr_str(const std::string& addr_str); - }; +} // namespace daemon +} // namespace gkfs + #endif //LFS_RPC_DATA_HPP diff --git a/include/daemon/daemon.hpp b/include/daemon/daemon.hpp index 8fc4cfb9d..16cf7f7bf 100644 --- a/include/daemon/daemon.hpp +++ b/include/daemon/daemon.hpp @@ -30,17 +30,7 @@ extern "C" { #include #include -#define GKFS_DATA (static_cast(FsData::getInstance())) -#define RPC_DATA (static_cast(RPCData::getInstance())) - -void init_environment(); - -void destroy_enviroment(); - -void init_io_tasklet_pool(); - -void init_rpc_server(const std::string& protocol); - -void register_server_rpcs(margo_instance_id mid); +#define GKFS_DATA (static_cast(gkfs::daemon::FsData::getInstance())) +#define RPC_DATA (static_cast(gkfs::daemon::RPCData::getInstance())) #endif // GKFS_DAEMON_DAEMON_HPP diff --git a/src/daemon/classes/fs_data.cpp b/src/daemon/classes/fs_data.cpp index 3b519c4e1..6422ab645 100644 --- a/src/daemon/classes/fs_data.cpp +++ b/src/daemon/classes/fs_data.cpp @@ -15,6 +15,9 @@ #include +namespace gkfs { +namespace daemon { + // getter/setter const std::shared_ptr& FsData::spdlogger() const { @@ -125,7 +128,8 @@ void FsData::blocks_state(bool blocks_state) { FsData::blocks_state_ = blocks_state; } - +} // namespace daemon +} // namespace gkfs diff --git a/src/daemon/classes/rpc_data.cpp b/src/daemon/classes/rpc_data.cpp index 41076f127..3f8064917 100644 --- a/src/daemon/classes/rpc_data.cpp +++ b/src/daemon/classes/rpc_data.cpp @@ -16,6 +16,9 @@ using namespace std; +namespace gkfs { +namespace daemon { + // Getter/Setter margo_instance* RPCData::server_rpc_mid() { @@ -49,3 +52,6 @@ const std::string& RPCData::self_addr_str() const { void RPCData::self_addr_str(const std::string& addr_str) { self_addr_str_ = addr_str; } + +} // namespace daemon +} // namespace gkfs \ No newline at end of file diff --git a/src/daemon/daemon.cpp b/src/daemon/daemon.cpp index 5a198dbe4..9fbf1d3f7 100644 --- a/src/daemon/daemon.cpp +++ b/src/daemon/daemon.cpp @@ -42,6 +42,108 @@ namespace bfs = boost::filesystem; static condition_variable shutdown_please; static mutex mtx; +void init_io_tasklet_pool() { + assert(gkfs::config::rpc::daemon_io_xstreams >= 0); + unsigned int xstreams_num = gkfs::config::rpc::daemon_io_xstreams; + + //retrieve the pool of the just created scheduler + ABT_pool pool; + auto ret = ABT_pool_create_basic(ABT_POOL_FIFO_WAIT, ABT_POOL_ACCESS_MPMC, ABT_TRUE, &pool); + if (ret != ABT_SUCCESS) { + throw runtime_error("Failed to create I/O tasks pool"); + } + + //create all subsequent xstream and the associated scheduler, all tapping into the same pool + vector xstreams(xstreams_num); + for (unsigned int i = 0; i < xstreams_num; ++i) { + ret = ABT_xstream_create_basic(ABT_SCHED_BASIC_WAIT, 1, &pool, + ABT_SCHED_CONFIG_NULL, &xstreams[i]); + if (ret != ABT_SUCCESS) { + throw runtime_error("Failed to create task execution streams for I/O operations"); + } + } + + RPC_DATA->io_streams(xstreams); + RPC_DATA->io_pool(pool); +} + +/** + * Registers RPC handlers to Margo instance + * @param hg_class + */ +void register_server_rpcs(margo_instance_id mid) { + MARGO_REGISTER(mid, gkfs::rpc::tag::fs_config, void, rpc_config_out_t, rpc_srv_get_fs_config); + MARGO_REGISTER(mid, gkfs::rpc::tag::create, rpc_mk_node_in_t, rpc_err_out_t, rpc_srv_create); + MARGO_REGISTER(mid, gkfs::rpc::tag::stat, rpc_path_only_in_t, rpc_stat_out_t, rpc_srv_stat); + MARGO_REGISTER(mid, gkfs::rpc::tag::decr_size, rpc_trunc_in_t, rpc_err_out_t, rpc_srv_decr_size); + MARGO_REGISTER(mid, gkfs::rpc::tag::remove, rpc_rm_node_in_t, rpc_err_out_t, rpc_srv_remove); + MARGO_REGISTER(mid, gkfs::rpc::tag::update_metadentry, rpc_update_metadentry_in_t, rpc_err_out_t, + rpc_srv_update_metadentry); + MARGO_REGISTER(mid, gkfs::rpc::tag::get_metadentry_size, rpc_path_only_in_t, rpc_get_metadentry_size_out_t, + rpc_srv_get_metadentry_size); + MARGO_REGISTER(mid, gkfs::rpc::tag::update_metadentry_size, rpc_update_metadentry_size_in_t, + rpc_update_metadentry_size_out_t, rpc_srv_update_metadentry_size); + MARGO_REGISTER(mid, gkfs::rpc::tag::get_dirents, rpc_get_dirents_in_t, rpc_get_dirents_out_t, + rpc_srv_get_dirents); +#ifdef HAS_SYMLINKS + MARGO_REGISTER(mid, gkfs::rpc::tag::mk_symlink, rpc_mk_symlink_in_t, rpc_err_out_t, rpc_srv_mk_symlink); +#endif + MARGO_REGISTER(mid, gkfs::rpc::tag::write, rpc_write_data_in_t, rpc_data_out_t, rpc_srv_write); + MARGO_REGISTER(mid, gkfs::rpc::tag::read, rpc_read_data_in_t, rpc_data_out_t, rpc_srv_read); + MARGO_REGISTER(mid, gkfs::rpc::tag::truncate, rpc_trunc_in_t, rpc_err_out_t, rpc_srv_truncate); + MARGO_REGISTER(mid, gkfs::rpc::tag::get_chunk_stat, rpc_chunk_stat_in_t, rpc_chunk_stat_out_t, + rpc_srv_get_chunk_stat); +} + +void init_rpc_server(const string& protocol_port) { + hg_addr_t addr_self; + hg_size_t addr_self_cstring_sz = 128; + char addr_self_cstring[128]; + // IMPORTANT: this struct needs to be zeroed before use + struct hg_init_info hg_options = {}; +#if USE_SHM + hg_options.auto_sm = HG_TRUE; +#else + hg_options.auto_sm = HG_FALSE; +#endif + hg_options.stats = HG_FALSE; + hg_options.na_class = nullptr; + // Start Margo (this will also initialize Argobots and Mercury internally) + auto mid = margo_init_opt(protocol_port.c_str(), + MARGO_SERVER_MODE, + &hg_options, + HG_TRUE, + gkfs::config::rpc::daemon_handler_xstreams); + if (mid == MARGO_INSTANCE_NULL) { + throw runtime_error("Failed to initialize the Margo RPC server"); + } + // Figure out what address this server is listening on (must be freed when finished) + auto hret = margo_addr_self(mid, &addr_self); + if (hret != HG_SUCCESS) { + margo_finalize(mid); + throw runtime_error("Failed to retrieve server RPC address"); + } + // Convert the address to a cstring (with \0 terminator). + hret = margo_addr_to_string(mid, addr_self_cstring, &addr_self_cstring_sz, addr_self); + if (hret != HG_SUCCESS) { + margo_addr_free(mid, addr_self); + margo_finalize(mid); + throw runtime_error("Failed to convert server RPC address to string"); + } + margo_addr_free(mid, addr_self); + + std::string addr_self_str(addr_self_cstring); + RPC_DATA->self_addr_str(addr_self_str); + + GKFS_DATA->spdlogger()->info("{}() Accepting RPCs on address {}", __func__, addr_self_cstring); + + // Put context and class into RPC_data object + RPC_DATA->server_rpc_mid(mid); + + // register RPCs + register_server_rpcs(mid); +} + void init_environment() { // Initialize metadata db std::string metadata_path = GKFS_DATA->metadir() + "/rocksdb"s; @@ -135,108 +237,6 @@ void destroy_enviroment() { GKFS_DATA->close_mdb(); } -void init_io_tasklet_pool() { - assert(gkfs::config::rpc::daemon_io_xstreams >= 0); - unsigned int xstreams_num = gkfs::config::rpc::daemon_io_xstreams; - - //retrieve the pool of the just created scheduler - ABT_pool pool; - auto ret = ABT_pool_create_basic(ABT_POOL_FIFO_WAIT, ABT_POOL_ACCESS_MPMC, ABT_TRUE, &pool); - if (ret != ABT_SUCCESS) { - throw runtime_error("Failed to create I/O tasks pool"); - } - - //create all subsequent xstream and the associated scheduler, all tapping into the same pool - vector xstreams(xstreams_num); - for (unsigned int i = 0; i < xstreams_num; ++i) { - ret = ABT_xstream_create_basic(ABT_SCHED_BASIC_WAIT, 1, &pool, - ABT_SCHED_CONFIG_NULL, &xstreams[i]); - if (ret != ABT_SUCCESS) { - throw runtime_error("Failed to create task execution streams for I/O operations"); - } - } - - RPC_DATA->io_streams(xstreams); - RPC_DATA->io_pool(pool); -} - -void init_rpc_server(const string& protocol_port) { - hg_addr_t addr_self; - hg_size_t addr_self_cstring_sz = 128; - char addr_self_cstring[128]; - // IMPORTANT: this struct needs to be zeroed before use - struct hg_init_info hg_options = {}; -#if USE_SHM - hg_options.auto_sm = HG_TRUE; -#else - hg_options.auto_sm = HG_FALSE; -#endif - hg_options.stats = HG_FALSE; - hg_options.na_class = nullptr; - // Start Margo (this will also initialize Argobots and Mercury internally) - auto mid = margo_init_opt(protocol_port.c_str(), - MARGO_SERVER_MODE, - &hg_options, - HG_TRUE, - gkfs::config::rpc::daemon_handler_xstreams); - if (mid == MARGO_INSTANCE_NULL) { - throw runtime_error("Failed to initialize the Margo RPC server"); - } - // Figure out what address this server is listening on (must be freed when finished) - auto hret = margo_addr_self(mid, &addr_self); - if (hret != HG_SUCCESS) { - margo_finalize(mid); - throw runtime_error("Failed to retrieve server RPC address"); - } - // Convert the address to a cstring (with \0 terminator). - hret = margo_addr_to_string(mid, addr_self_cstring, &addr_self_cstring_sz, addr_self); - if (hret != HG_SUCCESS) { - margo_addr_free(mid, addr_self); - margo_finalize(mid); - throw runtime_error("Failed to convert server RPC address to string"); - } - margo_addr_free(mid, addr_self); - - std::string addr_self_str(addr_self_cstring); - RPC_DATA->self_addr_str(addr_self_str); - - GKFS_DATA->spdlogger()->info("{}() Accepting RPCs on address {}", __func__, addr_self_cstring); - - // Put context and class into RPC_data object - RPC_DATA->server_rpc_mid(mid); - - // register RPCs - register_server_rpcs(mid); -} - -/** - * Registers RPC handlers to Margo instance - * @param hg_class - */ -void register_server_rpcs(margo_instance_id mid) { - MARGO_REGISTER(mid, gkfs::rpc::tag::fs_config, void, rpc_config_out_t, rpc_srv_get_fs_config); - MARGO_REGISTER(mid, gkfs::rpc::tag::create, rpc_mk_node_in_t, rpc_err_out_t, rpc_srv_create); - MARGO_REGISTER(mid, gkfs::rpc::tag::stat, rpc_path_only_in_t, rpc_stat_out_t, rpc_srv_stat); - MARGO_REGISTER(mid, gkfs::rpc::tag::decr_size, rpc_trunc_in_t, rpc_err_out_t, rpc_srv_decr_size); - MARGO_REGISTER(mid, gkfs::rpc::tag::remove, rpc_rm_node_in_t, rpc_err_out_t, rpc_srv_remove); - MARGO_REGISTER(mid, gkfs::rpc::tag::update_metadentry, rpc_update_metadentry_in_t, rpc_err_out_t, - rpc_srv_update_metadentry); - MARGO_REGISTER(mid, gkfs::rpc::tag::get_metadentry_size, rpc_path_only_in_t, rpc_get_metadentry_size_out_t, - rpc_srv_get_metadentry_size); - MARGO_REGISTER(mid, gkfs::rpc::tag::update_metadentry_size, rpc_update_metadentry_size_in_t, - rpc_update_metadentry_size_out_t, rpc_srv_update_metadentry_size); - MARGO_REGISTER(mid, gkfs::rpc::tag::get_dirents, rpc_get_dirents_in_t, rpc_get_dirents_out_t, - rpc_srv_get_dirents); -#ifdef HAS_SYMLINKS - MARGO_REGISTER(mid, gkfs::rpc::tag::mk_symlink, rpc_mk_symlink_in_t, rpc_err_out_t, rpc_srv_mk_symlink); -#endif - MARGO_REGISTER(mid, gkfs::rpc::tag::write, rpc_write_data_in_t, rpc_data_out_t, rpc_srv_write); - MARGO_REGISTER(mid, gkfs::rpc::tag::read, rpc_read_data_in_t, rpc_data_out_t, rpc_srv_read); - MARGO_REGISTER(mid, gkfs::rpc::tag::truncate, rpc_trunc_in_t, rpc_err_out_t, rpc_srv_truncate); - MARGO_REGISTER(mid, gkfs::rpc::tag::get_chunk_stat, rpc_chunk_stat_in_t, rpc_chunk_stat_out_t, - rpc_srv_get_chunk_stat); -} - void shutdown_handler(int dummy) { GKFS_DATA->spdlogger()->info("{}() Received signal: '{}'", __func__, strsignal(dummy)); shutdown_please.notify_all(); @@ -389,4 +389,4 @@ int main(int argc, const char* argv[]) { destroy_enviroment(); GKFS_DATA->spdlogger()->info("{}() Complete. Exiting...", __func__); return 0; -} +} \ No newline at end of file diff --git a/src/daemon/util.cpp b/src/daemon/util.cpp index 4090aa933..92fa06221 100644 --- a/src/daemon/util.cpp +++ b/src/daemon/util.cpp @@ -20,7 +20,10 @@ using namespace std; -void gkfs::util::populate_hosts_file() { +namespace gkfs { +namespace util { + +void populate_hosts_file() { const auto& hosts_file = GKFS_DATA->hosts_file(); GKFS_DATA->spdlogger()->debug("{}() Populating hosts file: '{}'", __func__, hosts_file); ofstream lfstream(hosts_file, ios::out | ios::app); @@ -36,6 +39,9 @@ void gkfs::util::populate_hosts_file() { lfstream.close(); } -void gkfs::util::destroy_hosts_file() { +void destroy_hosts_file() { std::remove(GKFS_DATA->hosts_file().c_str()); } + +} // namespace util +} // namespace gkfs -- GitLab