diff --git a/CHANGELOG.md b/CHANGELOG.md index 068962c31386d40b09fbe553d0db3a9fdcb3ce4c..08875e4a7fa5df2a6a1e9b1c0d0a1502760d6b76 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,6 +5,19 @@ All notable changes to GekkoFS project will be documented in this file. The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). +## [Unreleased] +### New + - directory optimization with compression and reattemp ([!270](https://storage.bsc.es/gitlab/hpc/gekkofs/-/merge_requests/270)) + - Refactor sfind so it can use SLURM_ environment variables to ask to different servers. + - Create a sample bash script to gather all the info (map->reduce) + - Compress directory data with zstd. + - Make a new config.hpp option for controlling the compression + - If directory buffer is not enough it will reattempt with the exact size + +### Changed + +### Fixed + ## [0.9.5] - 2025-08 ### New - Added cppcheck code checking capabilities ([!214](https://storage.bsc.es/gitlab/hpc/gekkofs/-/merge_requests/214)) diff --git a/CMake/FindZStd.cmake b/CMake/FindZStd.cmake index 8756c5e5abca17aaa8fb22cb48343a015a9f3b26..4f05f584af61f1b0d37887116bf9cfd4822c7652 100644 --- a/CMake/FindZStd.cmake +++ b/CMake/FindZStd.cmake @@ -26,32 +26,49 @@ # SPDX-License-Identifier: GPL-3.0-or-later # ################################################################################ -# -# - Try to find Facebook zstd library -# This will define -# ZStd_FOUND -# ZStd_INCLUDE_DIR -# ZStd_LIBRARIES -# + + +# Standard names to search for +set(ZStd_NAMES zstd zstd_static) find_path(ZStd_INCLUDE_DIR - NAMES zstd.h -) + NAMES zstd.h + PATH_SUFFIXES include) + +# Allow ZStd_LIBRARY to be set manually, as the location of the zstd library +if(NOT ZStd_LIBRARY) + find_library(ZStd_LIBRARY_RELEASE + NAMES ${ZStd_NAMES} + PATH_SUFFIXES lib) + -find_library(ZStd_LIBRARY - NAMES zstd -) + include(SelectLibraryConfigurations) + select_library_configurations(ZStd) +endif() -set(ZStd_LIBRARIES ${ZStd_LIBRARY}) -set(ZStd_INCLUDE_DIRS ${ZStd_INCLUDE_DIR}) +unset(ZStd_NAMES) + +mark_as_advanced(ZStd_INCLUDE_DIR) include(FindPackageHandleStandardArgs) +FIND_PACKAGE_HANDLE_STANDARD_ARGS(ZStd + REQUIRED_VARS ZStd_LIBRARY ZStd_INCLUDE_DIR + VERSION_VAR ZStd_VERSION_STRING) + +if(ZStd_FOUND) + set(ZStd_INCLUDE_DIRS ${ZStd_INCLUDE_DIR}) + + if(NOT ZStd_LIBRARIES) + set(ZStd_LIBRARIES ${ZStd_LIBRARY}) + endif() -find_package_handle_standard_args(ZStd - DEFAULT_MSG ZStd_LIBRARY ZStd_INCLUDE_DIR -) + if(NOT TARGET ZStd::ZStd) + add_library(ZStd::ZStd UNKNOWN IMPORTED) + set_target_properties(ZStd::ZStd PROPERTIES + INTERFACE_INCLUDE_DIRECTORIES "${ZStd_INCLUDE_DIRS}") + + set_target_properties(ZStd::ZStd PROPERTIES + IMPORTED_LOCATION "${ZStd_LIBRARY}") -mark_as_advanced( - ZStd_LIBRARY - ZStd_INCLUDE_DIR -) \ No newline at end of file + endif() +endif() diff --git a/CMakeLists.txt b/CMakeLists.txt index 98fc0776b593c1111b6dbe95213c26121f06f4d4..fbbe38282744282185452931af7cfcbf30671c09 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -161,6 +161,9 @@ find_package(Margo 0.14.0 REQUIRED) message(STATUS "[${PROJECT_NAME}] Checking for syscall_intercept") find_package(Syscall_intercept REQUIRED) +message(STATUS "[${PROJECT_NAME}] Checking for Zstd") +find_package(ZStd REQUIRED) + ### AGIOS: required for scheduling I/O requests if (GKFS_ENABLE_AGIOS) message(STATUS "[${PROJECT_NAME}] Checking for Agios") diff --git a/README.md b/README.md index 88b0b86d3a600a4a367b622f7014e1693400fe35..d4232a88a70b7343914c16fe0a04b33d39212c6a 100644 --- a/README.md +++ b/README.md @@ -587,6 +587,9 @@ Client-metrics require the CMake argument `-DGKFS_ENABLE_CLIENT_METRICS=ON` (see - `LIBGKFS_METRICS_IP_PORT` - Enable flushing to a set ZeroMQ server (replaces `LIBGKFS_METRICS_PATH`). - `LIBGKFS_PROXY_PID_FILE` - Path to the proxy pid file (when using the GekkoFS proxy). - `LIBGKFS_NUM_REPL` - Number of replicas for data. +#### Directory optimizations +Set `true` the variable `use_dirents_compression` available at `include/config.hpp` to transfer directories compressed with zstd. + #### Caching ##### Dentry cache Improves performance for `ls -l` type operations by caching file metadata for subsequent `stat()` operations during diff --git a/examples/gfind/CMakeLists.txt b/examples/gfind/CMakeLists.txt index e73bb47286abc95c986bf7ed7310883f261c68f4..38156271f6f4044e11f1d090086003a45ca90c3d 100644 --- a/examples/gfind/CMakeLists.txt +++ b/examples/gfind/CMakeLists.txt @@ -29,6 +29,7 @@ set (CMAKE_CXX_STANDARD 17) add_executable(sfind sfind.cpp) +target_link_libraries(sfind PRIVATE ZStd::ZStd) set_property(TARGET sfind PROPERTY POSITION_INDEPENDENT_CODE ON) if(GKFS_INSTALL_TESTS) install(TARGETS sfind diff --git a/examples/gfind/gfind.cpp b/examples/gfind/gfind.cpp index eb6665299046badab6805346508b3847c83d8607..1d64c5328f8ab0a950c999d710273e8d9d43231a 100644 --- a/examples/gfind/gfind.cpp +++ b/examples/gfind/gfind.cpp @@ -36,9 +36,6 @@ SPDX-License-Identifier: GPL-3.0-or-later */ -/* Based on pfind from ior500 */ -/* https://github.com/VI4IO/pfind/ */ - #include #include #include @@ -58,12 +55,11 @@ #include #include #include -#include -#include +#include // Include OpenMPI header + using namespace std; /* Minimal struct needed for io500 find */ -/* We could also do the filtering on the server */ struct dirent_extended { size_t size; time_t ctime; @@ -72,14 +68,12 @@ struct dirent_extended { char d_name[1]; }; -/* Function exported from GekkoFS LD_PRELOAD, code needs to be compiled with - * -fPIC, if not will segfault */ +/* Function exported from GekkoFS LD_PRELOAD */ extern "C" int -gkfs_getsingleserverdir(const char* path, struct dirent_extended* dirp, - unsigned int count, int server) __attribute__((weak)); +gkfs_getsingleserverdir(const char* path, struct dirent_extended** dirp, + int server) __attribute__((weak)); -/* PFIND OPTIONS EXTENDED We need to add the GekkoFS mount dir and the number of - * servers */ +/* PFIND OPTIONS EXTENDED */ typedef struct { string workdir; bool just_count = false; @@ -108,22 +102,21 @@ typedef struct { typedef struct { uint64_t ctime_min = 0; - double stonewall_endtime = 0.0; - FILE* logfile = nullptr; - bool needs_stat = false; } pfind_runtime_options_t; static pfind_runtime_options_t runtime; - int pfind_rank = 0; int pfind_size = 1; - static pfind_options_t* opt; [[noreturn]] void pfind_abort(const string& str) { - cerr << str << endl; - exit(1); + if(pfind_rank == 0) { + cerr << "ERROR: " << str << endl; + } + // Use MPI_Abort for a clean shutdown in an MPI environment + MPI_Abort(MPI_COMM_WORLD, 1); + exit(1); // MPI_Abort should terminate, but exit is a fallback } @@ -144,13 +137,12 @@ pfind_print_help(const pfind_options_t* res) { res->name_pattern.c_str(), res->num_servers, res->mountdir.c_str()); } -MPI_Comm pfind_com; pfind_options_t* -pfind_parse_args(int argc, char** argv, int force_print_help, MPI_Comm com) { - MPI_Comm_rank(com, &pfind_rank); - MPI_Comm_size(com, &pfind_size); - pfind_com = com; +pfind_parse_args(int argc, char** argv, bool force_print_help) { + + pfind_rank = 0; + pfind_size = 1; auto res = new pfind_options_t(); @@ -353,11 +345,6 @@ pfind_parse_args(int argc, char** argv, int force_print_help, MPI_Comm com) { if(print_help) { if(pfind_rank == 0) pfind_print_help(res); - int init; - MPI_Initialized(&init); - if(init) { - MPI_Finalize(); - } exit(0); } @@ -368,221 +355,148 @@ pfind_parse_args(int argc, char** argv, int force_print_help, MPI_Comm com) { return res; } -/* Master send a new path to the workers */ + +// Helper to broadcast a C++ string void -send_newPath(string path) { - auto count = path.size() + 1; - MPI_Bcast(&count, 1, MPI_INT, 0, MPI_COMM_WORLD); - MPI_Bcast((void*) path.c_str(), count, MPI_CHAR, 0, MPI_COMM_WORLD); +bcast_string(string& s, int root) { + int len = 0; + if(pfind_rank == root) { + len = s.length(); + } + MPI_Bcast(&len, 1, MPI_INT, root, MPI_COMM_WORLD); + s.resize(len); + MPI_Bcast(&s[0], len, MPI_CHAR, root, MPI_COMM_WORLD); } -/* Clients get a new path, getting a "0" size char means there is no new path*/ -string -recv_newPath() { - int count; - MPI_Bcast(&count, 1, MPI_INT, 0, MPI_COMM_WORLD); - if(count == 0) - return "Terminate"; - std::vector buf(count); - MPI_Bcast(buf.data(), count, MPI_CHAR, 0, MPI_COMM_WORLD); - return std::string(buf.begin(), buf.end()); +// Broadcast all options from rank 0 to other processes +void +bcast_options(pfind_options_t* opt) { + // Broadcast simple POD types + MPI_Bcast(&opt->just_count, 1, MPI_CXX_BOOL, 0, MPI_COMM_WORLD); + MPI_Bcast(&opt->size, 1, MPI_UINT64_T, 0, MPI_COMM_WORLD); + MPI_Bcast(&opt->num_servers, 1, MPI_INT, 0, MPI_COMM_WORLD); + MPI_Bcast(&opt->verbosity, 1, MPI_INT, 0, MPI_COMM_WORLD); + + // Broadcast complex types (strings) + bcast_string(opt->workdir, 0); + bcast_string(opt->timestamp_file, 0); + bcast_string(opt->name_pattern, 0); + bcast_string(opt->mountdir, 0); + + // All processes construct the regex from the broadcasted pattern + if(pfind_rank != 0 && !opt->name_pattern.empty()) { + try { + opt->name_regex = regex(opt->name_pattern); + } catch(const regex_error& e) { + pfind_abort("Invalid regex for name given: " + string(e.what())); + } + } } -/* Client Processing a path. - * We increment local checked/found based on the filters - * Each client sends the request to a subset of GekkoFS servers. - * We use 102400 (plus space from 255 chars paths) so it is nearly 1M files per - * server, which is enough for most cases - * - */ void -dirProcess(const string path, unsigned long long& checked, - unsigned long long& found, queue& dirs, - unsigned int world_rank, unsigned int world_size, - const pfind_options_t* opt) { - const size_t buffer_size = - (sizeof(struct dirent_extended) + 255) * 1024 * 100; - unique_ptr getdir( - new struct dirent_extended - [buffer_size / (sizeof(struct dirent_extended) + 255)]{}); - - - int servers_per_node = ceil(opt->num_servers / (world_size - 1)); - if(servers_per_node == 0) - servers_per_node++; - for(int it = 0; it < servers_per_node; it++) { - auto server = (world_rank - 1) * servers_per_node + it; - if(server >= (unsigned int) opt->num_servers) - break; +dirProcess(const string& path, unsigned long long& checked, + unsigned long long& found, const pfind_options_t* opt) { + + // --- PARALLELIZATION LOGIC --- + // Each process calculates its own range of servers to query. + int servers_per_proc = opt->num_servers / pfind_size; + int remainder = opt->num_servers % pfind_size; + int start_server = + pfind_rank * servers_per_proc + min(pfind_rank, remainder); + int end_server = + start_server + servers_per_proc + (pfind_rank < remainder ? 1 : 0); + + if(opt->verbosity > 0) { + cout << "[Rank " << pfind_rank << "] Processing servers " + << start_server << " to " << end_server - 1 << endl; + } - unsigned long long total_size = 0; - long unsigned int n = gkfs_getsingleserverdir( - path.c_str(), getdir.get(), buffer_size, server); + // Each process loops ONLY over its assigned servers + for(int server = start_server; server < end_server; server++) { + struct dirent_extended* entries = nullptr; + long unsigned int n = + gkfs_getsingleserverdir(path.c_str(), &entries, server); - struct dirent_extended* temp = getdir.get(); + if(n <= 0) { // Handle empty or error cases + if(entries) + free(entries); + continue; + } - while(total_size < n) { - if(strlen(temp->d_name) == 0) + char* ptr = reinterpret_cast(entries); + int bytes_processed = 0; + while(bytes_processed < (int) n) { + struct dirent_extended* temp = + reinterpret_cast(ptr); + if(temp->d_reclen == 0) break; - total_size += temp->d_reclen; - - /* Queue directory to process */ - if(temp->d_type == 1) { - string slash; - if(path.back() != '/') - slash = "/"; - checked++; - dirs.push(path + slash + temp->d_name); - temp = reinterpret_cast( - reinterpret_cast(temp) + temp->d_reclen); - continue; - } + if(temp->d_type != 1) { + bool timeOK = opt->timestamp_file.empty() || + ((uint64_t) temp->ctime >= runtime.ctime_min); + bool sizeOK = + (opt->size == std::numeric_limits::max() || + temp->size == opt->size); + bool nameOK = opt->name_pattern.empty() || + regex_search(temp->d_name, opt->name_regex); - /* Find filtering */ - bool timeOK = true; - if(!opt->timestamp_file.empty()) { - if((uint64_t) temp->ctime < runtime.ctime_min) - timeOK = false; - } - - if(timeOK && (temp->size == opt->size || - opt->size == std::numeric_limits::max())) { - if(opt->name_pattern.empty() || - regex_search(temp->d_name, opt->name_regex)) { + if(timeOK && sizeOK && nameOK) found++; - } } - checked++; - temp = reinterpret_cast( - reinterpret_cast(temp) + temp->d_reclen); + bytes_processed += temp->d_reclen; + ptr += temp->d_reclen; } + free(entries); } } int -process(char* processor_name, int world_rank, int world_size, - const pfind_options_t* opt) { - // Print off a hello world message +process_parallel(const pfind_options_t* opt) { + unsigned long long local_found = 0; + unsigned long long local_checked = 0; + runtime = {}; // Initialize runtime options - // INIT PFIND - runtime = {}; - /* Get timestamp file */ + /* Get timestamp file, broadcast from rank 0 */ if(!opt->timestamp_file.empty()) { if(pfind_rank == 0) { struct stat timer_file; if(lstat(opt->timestamp_file.c_str(), &timer_file) != 0) { - printf("Could not open: \"%s\", error: %s", - opt->timestamp_file.c_str(), strerror(errno)); - pfind_abort("\n"); + cerr << "Could not open: \"" << opt->timestamp_file + << "\", error: " << strerror(errno) << endl; + MPI_Abort(MPI_COMM_WORLD, 1); } runtime.ctime_min = timer_file.st_ctime; } - MPI_Bcast(&runtime.ctime_min, 1, MPI_INT, 0, pfind_com); + // Broadcast the timestamp to all processes + MPI_Bcast(&runtime.ctime_min, 1, MPI_UINT64_T, 0, MPI_COMM_WORLD); } - if(world_rank == 0) { - queue dirs; - string workdir = opt->workdir; - if(workdir.rfind(opt->mountdir, 0) == 0) { - workdir = workdir.substr(opt->mountdir.length()); - } - if(workdir.empty()) { - workdir = "/"; - } - dirs.push(workdir); - - do { - std::string processpath = dirs.front(); - dirs.pop(); - send_newPath(processpath); - - auto received_strings = true; - - for(auto i = 1; i < world_size; i++) { - received_strings = true; - while(received_strings) { - received_strings = false; - - MPI_Status mpistatus; - MPI_Probe(i, 0, MPI_COMM_WORLD, &mpistatus); - - int count; - MPI_Get_count(&mpistatus, MPI_CHAR, &count); - - std::vector buf(count); - MPI_Recv(buf.data(), count, MPI_CHAR, i, 0, MPI_COMM_WORLD, - &mpistatus); - - if(count == 0) { - continue; - } - std::string s(buf.begin(), buf.end()); - dirs.push(s); - received_strings = true; - } - } - } while(!dirs.empty()); - - - auto count = 0; - MPI_Bcast(&count, 1, MPI_INT, 0, MPI_COMM_WORLD); - - MPI_Barrier(MPI_COMM_WORLD); + string workdir = opt->workdir; + if(workdir.rfind(opt->mountdir, 0) == 0) { + workdir = workdir.substr(opt->mountdir.length()); + } + if(workdir.empty()) { + workdir = "/"; + } - unsigned long long* Array_checked = (unsigned long long*) malloc( - sizeof(unsigned long long) * world_size); - unsigned long long* Array_found = (unsigned long long*) malloc( - sizeof(unsigned long long) * world_size); - unsigned long long checked = 0; - unsigned long long found = 0; + // Each process calls dirProcess, which will handle its assigned subset of + // servers + dirProcess(workdir, local_checked, local_found, opt); - MPI_Gather(&checked, 1, MPI_UNSIGNED_LONG_LONG, Array_checked, 1, - MPI_UNSIGNED_LONG_LONG, 0, MPI_COMM_WORLD); - MPI_Gather(&found, 1, MPI_UNSIGNED_LONG_LONG, Array_found, 1, - MPI_UNSIGNED_LONG_LONG, 0, MPI_COMM_WORLD); - for(int i = 0; i < world_size; i++) { - checked += Array_checked[i]; - found += Array_found[i]; - } + unsigned long long global_found = 0; + unsigned long long global_checked = 0; - cout << "MATCHED " << found << "/" << checked << endl; - } - else { - unsigned long long checked = 0; - unsigned long long found = 0; - while(1) { + MPI_Reduce(&local_found, &global_found, 1, MPI_UNSIGNED_LONG_LONG, MPI_SUM, + 0, MPI_COMM_WORLD); + MPI_Reduce(&local_checked, &global_checked, 1, MPI_UNSIGNED_LONG_LONG, + MPI_SUM, 0, MPI_COMM_WORLD); - string toProcess = recv_newPath(); - if(toProcess == "Terminate") { - break; - } - // cout << "REceived " << toProcess << " --- " << world_rank << - // endl; - queue dirs; - - dirProcess(toProcess, checked, found, dirs, world_rank, world_size, - opt); - // Send NEW DIRS to master - while(!dirs.empty()) { - string s = dirs.front(); - dirs.pop(); - // cout << world_rank << " --> Sending " << s << endl; - MPI_Send((void*) s.c_str(), (s.size() + 1), MPI_CHAR, 0, 0, - MPI_COMM_WORLD); - } - // cout << world_rank << " --> Sending 0 " << endl; - MPI_Send((void*) 0, 0, MPI_CHAR, 0, 0, MPI_COMM_WORLD); - } - MPI_Barrier(MPI_COMM_WORLD); - MPI_Gather(&checked, 1, MPI_UNSIGNED_LONG_LONG, nullptr, 1, - MPI_UNSIGNED_LONG_LONG, 0, MPI_COMM_WORLD); - MPI_Gather(&found, 1, MPI_UNSIGNED_LONG_LONG, nullptr, 1, - MPI_UNSIGNED_LONG_LONG, 0, MPI_COMM_WORLD); + if(pfind_rank == 0) { + cout << "MATCHED " << global_found << "/" << global_checked << endl; } return 0; @@ -590,37 +504,44 @@ process(char* processor_name, int world_rank, int world_size, int main(int argc, char** argv) { + // --- MPI INITIALIZATION --- + MPI_Init(&argc, &argv); + MPI_Comm_rank(MPI_COMM_WORLD, &pfind_rank); + MPI_Comm_size(MPI_COMM_WORLD, &pfind_size); - for(int i = 0; i < argc; i++) { + for(int i = 0; i < argc; ++i) { if(strcmp(argv[i], "--help") == 0) { - argv[i][0] = 0; - pfind_rank = 0; - opt = pfind_parse_args(argc, argv, 1, MPI_COMM_SELF); - delete opt; + if(pfind_rank == 0) { + // pfind_parse_args handles printing help + pfind_parse_args(argc, argv, true); + } + MPI_Finalize(); return 0; } } - // Initialize the MPI environment - MPI_Init(&argc, &argv); + opt = new pfind_options_t(); + if(pfind_rank == 0) { + opt = pfind_parse_args(argc, argv, false); + } - // Get the number of processes - int world_size; - MPI_Comm_size(MPI_COMM_WORLD, &world_size); + // --- BROADCAST CONFIGURATION --- + bcast_options(opt); - // Get the rank of the process - int world_rank; - MPI_Comm_rank(MPI_COMM_WORLD, &world_rank); + // Check if the GekkoFS function is available (e.g., via LD_PRELOAD) + if(gkfs_getsingleserverdir == nullptr) { + if(pfind_rank == 0) + cerr << "Error: GekkoFS functions not available. Is the library preloaded?" + << endl; + MPI_Abort(MPI_COMM_WORLD, 1); + } - opt = pfind_parse_args(argc, argv, 0, MPI_COMM_WORLD); - // cout << opt->num_servers << " -- " << opt->mountdir << endl; - // Get the name of the processor - char processor_name[MPI_MAX_PROCESSOR_NAME]; - int name_len; - MPI_Get_processor_name(processor_name, &name_len); + // --- RUN PARALLEL PROCESSING --- + int result = process_parallel(opt); - process(processor_name, world_rank, world_size, opt); delete opt; - // Finalize the MPI environment. + + // --- MPI FINALIZATION --- MPI_Finalize(); -} + return result; +} \ No newline at end of file diff --git a/examples/gfind/pfind.sh b/examples/gfind/pfind.sh index 90618182bad9195a18fc7491512a6cd7e1d362bd..03a2dcf4b0225aa6778c7b3ba3c74920767f5281 100755 --- a/examples/gfind/pfind.sh +++ b/examples/gfind/pfind.sh @@ -27,10 +27,47 @@ # SPDX-License-Identifier: GPL-3.0-or-later # ################################################################################ -# optimal $GKFS_FIND_PROCESS is $GKFS_SERVERS+1 as we align servers and find processes if the $NUM_NODES are the same -# Output is saved to a file, so it can be processed by io500 -srun --nvram-options=1LM:1980 -N $NUM_NODES -n $GKFS_FIND_PROCESS --export=ALL,PSM2_DEVICES=self,hfi,shm,PSM2_MULTIRAIL=1,PSM2_MULTI_EP=0,LD_PRELOAD=${GKFS_PRLD} -o find_${SLURM_JOB_ID}.txt $GKFS_FIND $@ -M $GKFS_MNT -S $GKFS_SERVERS -tail -n1 find_${SLURM_JOB_ID}.txt + + +NUM_NODES=10 +GKFS_FIND_PROCESS=10 +GKFS_SERVERS=200 +GKFS_FIND=~/ADMIRE/iodeps/bin/sfind + +srun -N $NUM_NODES -n $GKFS_FIND_PROCESS --overlap --overcommit --mem=0 --oversubscribe --export=ALL,LD_PRELOAD=${GKFS} $GKFS_FIND $@ -M $GKFS_MNT -S $GKFS_SERVERS + + +# Initialize total counters +total_found=0 +total_checked=0 + +# Check if any result files exist +if ! ls gfind_results.rank-*.txt 1> /dev/null 2>&1; then + echo "No result files found (gfind_results.rank-*.txt)." + exit 1 +fi + +# Loop through all result files +for file in gfind_results.rank-*.txt; do + # Read the line "MATCHED found/checked" from the file + # and extract the numbers. + read -r _ found_str checked_str < "$file" + + # Use cut to handle the "found/checked" format + found=$(echo "$found_str" | cut -d'/' -f1) + checked=$(echo "$checked_str") # this will be the same as found_str's second part + + # Bash arithmetic to add to totals + total_found=$((total_found + found)) + total_checked=$((total_checked + checked)) +done + +# Print the final aggregated result +echo "MATCHED ${total_found}/${total_checked}" + +# Optional: Clean up the intermediate files +# Uncomment the line below if you want to automatically remove the partial results +rm gfind_results.rank-*.txt exit 0 diff --git a/examples/gfind/sfind.cpp b/examples/gfind/sfind.cpp index 8e5dfaa4a0eefffcbd9cc20a64029d6ed5925137..520b18333547101cff90e209ed35c9538c0f439e 100644 --- a/examples/gfind/sfind.cpp +++ b/examples/gfind/sfind.cpp @@ -1,44 +1,3 @@ -/* - Copyright 2018-2025, Barcelona Supercomputing Center (BSC), Spain - Copyright 2015-2025, Johannes Gutenberg Universitaet Mainz, Germany - - This software was partially supported by the - EC H2020 funded project NEXTGenIO (Project ID: 671951, www.nextgenio.eu). - - This software was partially supported by the - ADA-FS project under the SPPEXA project funded by the DFG. - - This software was partially supported by the - the European Union’s Horizon 2020 JTI-EuroHPC research and - innovation programme, by the project ADMIRE (Project ID: 956748, - admire-eurohpc.eu) - - This project was partially promoted by the Ministry for Digital Transformation - and the Civil Service, within the framework of the Recovery, - Transformation and Resilience Plan - Funded by the European Union - -NextGenerationEU. - - This file is part of GekkoFS. - - GekkoFS is free software: you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation, either version 3 of the License, or - (at your option) any later version. - - GekkoFS is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with GekkoFS. If not, see . - - SPDX-License-Identifier: GPL-3.0-or-later -*/ - -/* Based on pfind from ior500 */ -/* https://github.com/VI4IO/pfind/ */ - #include #include #include @@ -58,11 +17,14 @@ #include #include #include +#include // For getenv +#include // Include Pthreads header +#include // For atomic counters using namespace std; +#pragma region structs /* Minimal struct needed for io500 find */ -/* We could also do the filtering on the server */ struct dirent_extended { size_t size; time_t ctime; @@ -71,14 +33,12 @@ struct dirent_extended { char d_name[1]; }; -/* Function exported from GekkoFS LD_PRELOAD, code needs to be compiled with - * -fPIC */ +/* Function exported from GekkoFS LD_PRELOAD */ extern "C" int -gkfs_getsingleserverdir(const char* path, struct dirent_extended* dirp, - unsigned int count, int server) __attribute__((weak)); +gkfs_getsingleserverdir(const char* path, struct dirent_extended** dirp, + int server) __attribute__((weak)); -/* PFIND OPTIONS EXTENDED We need to add the GekkoFS mount dir and the number of - * servers */ +/* PFIND OPTIONS EXTENDED */ typedef struct { string workdir; bool just_count = false; @@ -86,45 +46,34 @@ typedef struct { string results_dir; int stonewall_timer = 0; bool print_rates = false; - string timestamp_file; string name_pattern; regex name_regex; uint64_t size = std::numeric_limits::max(); - int num_servers = 0; string mountdir; - // optimizing parameters NOT USED - int queue_length = 100000; - int max_entries_per_iter = 1000; - bool steal_from_next = false; // if true, then steal from the next process - int parallel_single_dir_access = 0; // if 1, use hashing to parallelize - // single directory access, if 2 - // sequential increment - int verbosity = 0; } pfind_options_t; typedef struct { uint64_t ctime_min = 0; - double stonewall_endtime = 0.0; - FILE* logfile = nullptr; - bool needs_stat = false; } pfind_runtime_options_t; +#pragma endregion static pfind_runtime_options_t runtime; - -int pfind_rank = 0; -int pfind_size = 1; - static pfind_options_t* opt; +// Global rank/size for logging and work distribution +int sfind_rank = 0; +int sfind_size = 1; + [[noreturn]] void pfind_abort(const string& str) { - cerr << str << endl; + cerr << "ERROR [Rank " << sfind_rank << "]: " << str << endl; exit(1); } +#pragma region parsing static void pfind_print_help(const pfind_options_t* res) { printf("pfind \nSynopsis:\n" @@ -144,146 +93,85 @@ pfind_print_help(const pfind_options_t* res) { pfind_options_t* pfind_parse_args(int argc, char** argv, bool force_print_help) { - - pfind_rank = 0; - pfind_size = 1; - auto res = new pfind_options_t(); - bool print_help = force_print_help; - vector modified_argv(argv, argv + argc); - - // when we find special args, we process them - // but we need to replace them with 0 so that getopt will ignore them - // and getopt will continue to process beyond them - for(int i = 1; i < argc - 1; ++i) { - if(strcmp(argv[i], "-newer") == 0) { + for(int i = 1; i < argc; ++i) { + if(strcmp(argv[i], "-newer") == 0 && i + 1 < argc) { res->timestamp_file = argv[i + 1]; modified_argv[i][0] = 0; modified_argv[i + 1][0] = 0; ++i; - } else if(strcmp(argv[i], "-size") == 0) { + } else if(strcmp(argv[i], "-size") == 0 && i + 1 < argc) { char* str = argv[i + 1]; char extension = str[strlen(str) - 1]; str[strlen(str) - 1] = 0; try { res->size = stoull(str); - } catch(const invalid_argument& e) { - pfind_abort("Invalid size argument: " + string(str) + "\n"); - } catch(const out_of_range& e) { - pfind_abort("Size argument out of range: " + string(str) + - "\n"); - } - - switch(extension) { - case 'c': - break; - default: - pfind_abort("Unsupported extension for -size\n"); + } catch(...) { + pfind_abort("Invalid size: " + string(str)); } + if(extension != 'c') + pfind_abort( + "Unsupported extension for -size, only 'c' is supported"); modified_argv[i][0] = 0; modified_argv[i + 1][0] = 0; ++i; - } else if(strcmp(argv[i], "-name") == 0) { + } else if((strcmp(argv[i], "-name") == 0 || + strcmp(argv[i], "-regex") == 0) && + i + 1 < argc) { + bool is_name = (strcmp(argv[i], "-name") == 0); string pattern = argv[i + 1]; - res->name_pattern.clear(); - res->name_pattern.reserve(pattern.length() * 4 + - 100); // pre-allocate for expansion - - for(char c : pattern) { - switch(c) { - case '*': + if(is_name) { + res->name_pattern.clear(); + for(char c : pattern) { + if(c == '*') res->name_pattern += ".*"; - break; - case '.': + else if(c == '.') res->name_pattern += "[.]"; - break; - case '"': - case '\'': - // erase quotes - break; - default: + else if(c != '"' && c != '\'') res->name_pattern += c; - break; } + } else { + res->name_pattern = pattern; } - - try { - res->name_regex = regex(res->name_pattern); - } catch(const regex_error& e) { - pfind_abort("Invalid regex for name given: " + - string(e.what()) + "\n"); - } - modified_argv[i][0] = 0; - modified_argv[i + 1][0] = 0; - ++i; - } else if(strcmp(argv[i], "-regex") == 0) { - res->name_pattern = argv[i + 1]; try { res->name_regex = regex(res->name_pattern); } catch(const regex_error& e) { - pfind_abort("Invalid regex for name given: " + - string(e.what()) + "\n"); + pfind_abort("Invalid regex: " + string(e.what())); } modified_argv[i][0] = 0; modified_argv[i + 1][0] = 0; ++i; - } else if(strcmp(argv[i], "-M") == 0) { + } else if(strcmp(argv[i], "-M") == 0 && i + 1 < argc) { res->mountdir = argv[i + 1]; modified_argv[i][0] = 0; modified_argv[i + 1][0] = 0; ++i; - } else if(strcmp(argv[i], "-S") == 0) { + } else if(strcmp(argv[i], "-S") == 0 && i + 1 < argc) { try { res->num_servers = stoi(argv[i + 1]); - } catch(const invalid_argument& e) { - pfind_abort("Invalid number of servers: " + - string(argv[i + 1]) + "\n"); - } catch(const out_of_range& e) { - pfind_abort("Number of servers out of range: " + - string(argv[i + 1]) + "\n"); + } catch(...) { + pfind_abort("Invalid server count: " + string(argv[i + 1])); } modified_argv[i][0] = 0; modified_argv[i + 1][0] = 0; ++i; - } else if(res->workdir.empty()) { + } else if(res->workdir.empty() && argv[i][0] != '-') { res->workdir = argv[i]; modified_argv[i][0] = 0; } } - if(argc == 2 && res->workdir.empty()) { res->workdir = argv[1]; } - - const char* optstring = "CPs:r:vhD:xq:H:NM:S:"; int c; - optind = 1; // Reset getopt's internal index for repeated calls. + optind = 1; while((c = getopt(argc, modified_argv.data(), optstring)) != -1) { - if(c == -1) { - break; - } - switch(c) { - case 'H': - try { - res->parallel_single_dir_access = stoi(optarg); - } catch(const invalid_argument& e) { - pfind_abort("Invalid parallel_single_dir_access: " + - string(optarg) + "\n"); - } catch(const out_of_range& e) { - pfind_abort("parallel_single_dir_access out of range: " + - string(optarg) + "\n"); - } - break; - case 'N': - res->steal_from_next = true; - break; - case 'x': - /* ignore fake arg that we added when we processed the extra - * args */ + case 'h': + print_help = true; break; case 'P': res->print_by_process = true; @@ -291,203 +179,250 @@ pfind_parse_args(int argc, char** argv, bool force_print_help) { case 'C': res->just_count = true; break; - case 'D': - if(strcmp(optarg, "rates") == 0) { - res->print_rates = true; - } else { - pfind_abort("Unsupported debug flag\n"); - } - break; - case 'h': - print_help = true; - break; - case 'r': - res->results_dir = optarg; - break; - case 'q': - try { - res->queue_length = stoi(optarg); - } catch(const invalid_argument& e) { - pfind_abort("Invalid queue length: " + string(optarg) + - "\n"); - } catch(const out_of_range& e) { - pfind_abort("Queue length out of range: " + string(optarg) + - "\n"); - } - if(res->queue_length < 10) { - pfind_abort("Queue must be at least 10 elements!\n"); - } - break; - case 's': - try { - res->stonewall_timer = stoi(optarg); - } catch(const invalid_argument& e) { - pfind_abort("Invalid stonewall timer: " + string(optarg) + - "\n"); - } catch(const out_of_range& e) { - pfind_abort("Stonewall timer out of range: " + - string(optarg) + "\n"); - } - break; - case 'v': - res->verbosity++; - break; - case 0: - break; case '?': - // getopt already prints an error message exit(1); default: - pfind_abort("Unhandled option: " + string(1, (char) c) + "\n"); + break; } } - - if(res->verbosity > 2 && pfind_rank == 0) { - printf("Regex: %s\n", res->name_pattern.c_str()); - } - if(print_help) { - if(pfind_rank == 0) + if(sfind_rank == 0) pfind_print_help(res); exit(0); } - if(res->workdir.empty()) { - pfind_abort("Error: pfind \n"); + pfind_abort("pfind is required"); + } + if(res->num_servers == 0) { + pfind_abort("-S is required"); } - return res; } +#pragma endregion + +struct ThreadData { + int thread_id; + const pfind_options_t* opt; + const string* workdir; + queue* server_queue; + pthread_mutex_t* queue_mutex; + atomic* total_found; + atomic* total_checked; +}; -/* Client Processing a path. - * We increment local checked/found based on the filters - * Each client sends the request to a subset of GekkoFS servers. - * We use 102400 (plus space from 255 chars paths) so it is nearly 1M files per - * server, which is enough for most cases - * - */ -void -dirProcess(const string& path, unsigned long long& checked, - unsigned long long& found, queue& dirs, - unsigned int world_rank, unsigned int world_size, - const pfind_options_t* opt) { - const size_t buffer_size = - (sizeof(struct dirent_extended) + 255) * 1024 * 100; - unique_ptr getdir( - new struct dirent_extended - [buffer_size / (sizeof(struct dirent_extended) + 255)]{}); - - // cout << "PROCESSING " << world_rank << "/"<< world_size << " = " << path - // << endl; - - for(int server = 0; server < opt->num_servers; server++) { - unsigned long long total_size = 0; - long unsigned int n = gkfs_getsingleserverdir( - path.c_str(), getdir.get(), buffer_size, server); - - struct dirent_extended* temp = getdir.get(); - - while(total_size < n) { - if(strlen(temp->d_name) == 0) - break; +// The function each worker thread will execute +void* +worker_routine(void* arg) { + ThreadData* data = static_cast(arg); + unsigned long long local_found = 0; + unsigned long long local_checked = 0; + + while(true) { + int server_id = -1; + pthread_mutex_lock(data->queue_mutex); + if(!data->server_queue->empty()) { + server_id = data->server_queue->front(); + data->server_queue->pop(); + } + pthread_mutex_unlock(data->queue_mutex); - total_size += temp->d_reclen; - - /* Queue directory to process */ - if(temp->d_type == 1) { - string slash; - if(path.back() != '/') - slash = "/"; - checked++; - dirs.push(path + slash + temp->d_name); - temp = reinterpret_cast( - reinterpret_cast(temp) + temp->d_reclen); - continue; - } + if(server_id == -1) + break; - /* Find filtering */ - bool timeOK = true; - if(!opt->timestamp_file.empty()) { - if((uint64_t) temp->ctime < runtime.ctime_min) - timeOK = false; - } + struct dirent_extended* entries = nullptr; + long n = gkfs_getsingleserverdir(data->workdir->c_str(), &entries, + server_id); - if(timeOK && (temp->size == opt->size || - opt->size == std::numeric_limits::max())) { - if(opt->name_pattern.empty() || - regex_search(temp->d_name, opt->name_regex)) { - found++; - } + if(n <= 0) { + if(n < 0) { + cerr << "Warning: Rank " << sfind_rank << " Thread " + << data->thread_id << " received error from server " + << server_id << endl; } + if(entries) + free(entries); + continue; + } - checked++; - temp = reinterpret_cast( - reinterpret_cast(temp) + temp->d_reclen); + char* ptr = reinterpret_cast(entries); + int bytes_processed = 0; + while(bytes_processed < n) { + struct dirent_extended* temp = + reinterpret_cast(ptr); + if(temp->d_reclen == 0) + break; + + if(temp->d_type != 1) { + bool timeOK = data->opt->timestamp_file.empty() || + ((uint64_t) temp->ctime >= runtime.ctime_min); + bool sizeOK = (data->opt->size == + std::numeric_limits::max() || + temp->size == data->opt->size); + bool nameOK = data->opt->name_pattern.empty() || + regex_search(temp->d_name, data->opt->name_regex); + + if(timeOK && sizeOK && nameOK) + local_found++; + } + local_checked++; + bytes_processed += temp->d_reclen; + ptr += temp->d_reclen; } + free(entries); } + + // Atomically add local results to the global counters + data->total_found->fetch_add(local_found); + data->total_checked->fetch_add(local_checked); + + return nullptr; } int -process(const pfind_options_t* opt) { - // Print off a hello world message - unsigned long long found = 0; - unsigned long long checked = 0; - runtime = {}; // Initialize runtime options +process_parallel_pthreads(const pfind_options_t* opt) { + atomic global_found(0); + atomic global_checked(0); + runtime = {}; - /* Get timestamp file */ if(!opt->timestamp_file.empty()) { - if(pfind_rank == 0) { - struct stat timer_file; - if(lstat(opt->timestamp_file.c_str(), &timer_file) != 0) { - printf("Could not open: \"%s\", error: %s", - opt->timestamp_file.c_str(), strerror(errno)); - pfind_abort("\n"); - } - runtime.ctime_min = timer_file.st_ctime; + struct stat timer_file; + if(lstat(opt->timestamp_file.c_str(), &timer_file) != 0) { + pfind_abort("Could not open timestamp file: " + + opt->timestamp_file); } + runtime.ctime_min = timer_file.st_ctime; } - queue dirs; string workdir = opt->workdir; if(workdir.rfind(opt->mountdir, 0) == 0) { workdir = workdir.substr(opt->mountdir.length()); } - if(workdir.empty()) { + if(workdir.empty()) workdir = "/"; + + // --- 1. Calculate this process's subset of servers --- + int servers_per_proc = opt->num_servers / sfind_size; + int remainder = opt->num_servers % sfind_size; + int start_server = + sfind_rank * servers_per_proc + min(sfind_rank, remainder); + int end_server = + start_server + servers_per_proc + (sfind_rank < remainder ? 1 : 0); + int num_servers_for_this_rank = end_server - start_server; + + if(num_servers_for_this_rank <= 0) { + cout << "[Rank " << sfind_rank << "] No servers to process. Exiting." + << endl; + return 0; } - dirs.push(workdir); - while(!dirs.empty()) { - string processpath = dirs.front(); - dirs.pop(); + // --- 2. Populate the local work queue with this rank's servers --- + queue server_queue; + for(int i = start_server; i < end_server; ++i) { + server_queue.push(i); + } - dirProcess(processpath, checked, found, dirs, 0, 1, opt); - // cout << "NO more paths " << dirs.size() << endl; + // --- Pthreads Setup --- + int num_threads = 20; // Default number of parallel requests + const char* env_threads = getenv("SFIND_NUM_THREADS"); + if(env_threads) { + try { + num_threads = stoi(env_threads); + } catch(...) { /* ignore */ + } } + num_threads = min(num_threads, num_servers_for_this_rank); - cout << "MATCHED " << found << "/" << checked << endl; + cout << "[Rank " << sfind_rank << "] Processing servers " << start_server + << "-" << end_server - 1 << " using " << num_threads << " threads." + << endl; + + // --- WRITE LOG TO A UNIQUE FILE --- + string output_log = "gfind_log.rank-" + to_string(sfind_rank) + ".txt"; + ofstream output_file2(output_log); + if(!output_file2.is_open()) { + pfind_abort("Failed to open output file: " + output_log); + } + output_file2 << "[Rank " << sfind_rank << "] Processing servers " + << start_server << "-" << end_server - 1 << " using " + << num_threads << " threads." << endl; + output_file2.close(); + + pthread_mutex_t queue_mutex; + pthread_mutex_init(&queue_mutex, nullptr); + vector threads(num_threads); + vector thread_args(num_threads); + + for(int i = 0; i < num_threads; ++i) { + thread_args[i] = {i, + opt, + &workdir, + &server_queue, + &queue_mutex, + &global_found, + &global_checked}; + pthread_create(&threads[i], nullptr, worker_routine, &thread_args[i]); + } + + for(int i = 0; i < num_threads; ++i) { + pthread_join(threads[i], nullptr); + } + pthread_mutex_destroy(&queue_mutex); + + // Results are already aggregated via atomics. + // Each process prints its own sub-total. The final aggregation must be done + // externally (e.g., with a script). We can use the files or the std outo + cout << "MATCHED " << global_found << "/" << global_checked << endl; + + // --- WRITE LOCAL RESULTS TO A UNIQUE FILE --- + string output_filename = + "gfind_results.rank-" + to_string(sfind_rank) + ".txt"; + ofstream output_file(output_filename); + if(!output_file.is_open()) { + pfind_abort("Failed to open output file: " + output_filename); + } + output_file << "MATCHED " << global_found << "/" << global_checked << endl; + output_file.close(); return 0; } int main(int argc, char** argv) { - - for(int i = 0; i < argc; ++i) { - if(strcmp(argv[i], "--help") == 0) { - argv[i][0] = 0; - pfind_rank = 0; - opt = pfind_parse_args(argc, argv, true); - delete opt; - return 0; + // --- Get Rank/Size from SLURM Environment Variables --- + const char* env_rank = getenv("SLURM_PROCID"); + const char* env_size = getenv("SLURM_NPROCS"); + + if(env_rank && env_size) { + try { + sfind_rank = stoi(env_rank); + sfind_size = stoi(env_size); + } catch(const std::exception& e) { + cerr << "Could not parse SLURM environment variables: " << e.what() + << ". Falling back to sequential mode." << endl; + sfind_rank = 0; + sfind_size = 1; + } + } else { + if(getenv("SLURM_JOB_ID")) { + cerr << "Warning: SLURM_JOB_ID is set, but SLURM_PROCID/SLURM_NPROCS are not. Are you running with `srun`?" + << endl; } + cout << "SLURM variables not found. Running in sequential mode (rank 0 of 1)." + << endl; + sfind_rank = 0; + sfind_size = 1; } + // Each process parses its own arguments. This is simple and correct. opt = pfind_parse_args(argc, argv, false); - int result = process(opt); + if(gkfs_getsingleserverdir == nullptr) { + pfind_abort( + "GekkoFS functions not available. Is the library preloaded?"); + } - delete opt; + int result = process_parallel_pthreads(opt); + delete opt; return result; } \ No newline at end of file diff --git a/include/client/gkfs_functions.hpp b/include/client/gkfs_functions.hpp index 5d14bb90a5fe21ad4b51c77ff156de55f4df1000..98799e11900fe1e09e9f41a7fb39b6900dfe4d0e 100644 --- a/include/client/gkfs_functions.hpp +++ b/include/client/gkfs_functions.hpp @@ -198,7 +198,7 @@ gkfs_msync(void* addr, size_t length, int flags); // gkfs_getsingleserverdir is using extern "C" to demangle it for C usage extern "C" int -gkfs_getsingleserverdir(const char* path, struct dirent_extended* dirp, - unsigned int count, int server); +gkfs_getsingleserverdir(const char* path, struct dirent_extended** dirp, + int server); #endif // GEKKOFS_GKFS_FUNCTIONS_HPP diff --git a/include/config.hpp b/include/config.hpp index c1ba919ca9c9eddeec6882f285b1f65ffd67c1f9..6f2f7cfb5672c3067f5de3c21e752481fcee8ad1 100644 --- a/include/config.hpp +++ b/include/config.hpp @@ -159,8 +159,8 @@ constexpr auto fwd_io_count_threshold = 0; namespace rpc { constexpr auto chunksize = 524288; // in bytes (e.g., 524288 == 512KB) // size of preallocated buffer to hold directory entries in rpc call -constexpr auto dirents_buff_size = (8 * 1024 * 1024); // 8 mega -constexpr auto dirents_buff_size_proxy = (128 * 1024 * 1024); // 8 mega +constexpr auto dirents_buff_size = (8 * 1024 * 1024); // 8 mega +constexpr auto dirents_buff_size_proxy = (8 * 1024 * 1024); // 8 mega /* * Indicates the number of concurrent progress to drive I/O operations of chunk * files to and from local file systems The value is directly mapped to created @@ -171,6 +171,8 @@ constexpr auto daemon_io_xstreams = 8; constexpr auto daemon_handler_xstreams = 4; // Number of threads used for RPC handlers at the proxy constexpr auto proxy_handler_xstreams = 3; +// Enable compression for directory entries transfer +constexpr auto use_dirents_compression = false; } // namespace rpc namespace rocksdb { diff --git a/include/daemon/backend/metadata/db.hpp b/include/daemon/backend/metadata/db.hpp index 1abaa2bc91b3d519574e293a4f4c2375c9c8a304..72fe2f9b2291cd194d7dfa87aa5f71392eee7391 100644 --- a/include/daemon/backend/metadata/db.hpp +++ b/include/daemon/backend/metadata/db.hpp @@ -170,6 +170,18 @@ public: [[nodiscard]] std::vector> get_dirents_extended(const std::string& dir) const; + + /** + * @brief Return all file names and modes for all the entries of the + * given directory including their sizes and creation time. + * @param dir directory prefix string + * @return vector of pair , + * where name is the name of the entries and is_dir + * is true in the case the entry is a directory. + */ + [[nodiscard]] std::vector> + get_all_dirents_extended(const std::string& dir) const; + /** * @brief Iterate over complete database, note ONLY used for debugging and * is therefore unused. diff --git a/include/daemon/backend/metadata/metadata_backend.hpp b/include/daemon/backend/metadata/metadata_backend.hpp index 0c1840a5af305e0aa449c20030ed5352fa61fe44..bdafa5d7c4e85d02e891a945aefe961c08dfac73 100644 --- a/include/daemon/backend/metadata/metadata_backend.hpp +++ b/include/daemon/backend/metadata/metadata_backend.hpp @@ -83,6 +83,9 @@ public: virtual std::vector> get_dirents_extended(const std::string& dir) const = 0; + virtual std::vector> + get_all_dirents_extended(const std::string& dir) const = 0; + virtual void* iterate_all() const = 0; @@ -150,6 +153,11 @@ public: return static_cast(*this).get_dirents_extended_impl(dir); } + std::vector> + get_all_dirents_extended(const std::string& dir) const { + return static_cast(*this).get_all_dirents_extended_impl(dir); + } + void* iterate_all() const { return static_cast(*this).iterate_all_impl(); diff --git a/include/daemon/backend/metadata/parallax_backend.hpp b/include/daemon/backend/metadata/parallax_backend.hpp index 11a6de55c3c52b4aab7940f1b7fed15a1783de89..ced3522eabb0a138f5c90f6784b51dec80a3d0f8 100644 --- a/include/daemon/backend/metadata/parallax_backend.hpp +++ b/include/daemon/backend/metadata/parallax_backend.hpp @@ -196,6 +196,9 @@ public: std::vector> get_dirents_extended_impl(const std::string& dir) const; + std::vector> + get_all_dirents_extended_impl(const std::string& root_path) const; + /** * Code example for iterating all entries in KV store. This is for debug * only as it is too expensive diff --git a/include/daemon/backend/metadata/rocksdb_backend.hpp b/include/daemon/backend/metadata/rocksdb_backend.hpp index da33cdded237761298924dfc6cf367d73e2b9658..1b0a1fd006276343948eca288546dad000ff294c 100644 --- a/include/daemon/backend/metadata/rocksdb_backend.hpp +++ b/include/daemon/backend/metadata/rocksdb_backend.hpp @@ -180,9 +180,13 @@ public: std::vector> get_dirents_extended_impl(const std::string& dir) const; + + std::vector> + get_all_dirents_extended_impl(const std::string& root_path) const; + /** - * Code example for iterating all entries in KV store. This is for debug - * only as it is too expensive + * Code example for iterating all entries in KV store. This is for + * debug only as it is too expensive */ void* iterate_all_impl() const; diff --git a/include/daemon/ops/metadentry.hpp b/include/daemon/ops/metadentry.hpp index a3dc84a89c25818713d11693998ecf472b6b22ea..22868ea6b94b05058f456ab311b49aefbcb30fe0 100644 --- a/include/daemon/ops/metadentry.hpp +++ b/include/daemon/ops/metadentry.hpp @@ -88,6 +88,16 @@ get_dirents(const std::string& dir); std::vector> get_dirents_extended(const std::string& dir); + +/** + * @brief Returns a vector of directory entries for given directory (extended + * version, all) + * @param dir + * @return + */ +std::vector> +get_all_dirents_extended(const std::string& dir); + /** * @brief Creates metadata (if required) and dentry at the same time * @param path diff --git a/include/proxy/rpc/forward_metadata.hpp b/include/proxy/rpc/forward_metadata.hpp index 01dc8bf2eee037861ac5383fbe104474dea71db1..c447197d2c619f2e9876952ec9c9f0b6385556b3 100644 --- a/include/proxy/rpc/forward_metadata.hpp +++ b/include/proxy/rpc/forward_metadata.hpp @@ -47,9 +47,8 @@ std::pair forward_update_metadentry_size(const std::string& path, const size_t size, const off64_t offset, const bool append_flag); -std::pair -forward_get_dirents_single(const std::string& path, int server, void* buf, - const size_t bulk_size); +std::pair> +forward_get_dirents_single(const std::string& path, int server); } // namespace gkfs::rpc diff --git a/scripts/compile_dep.sh b/scripts/compile_dep.sh index f0d8488b4c0645b421b41bc880eb8450984d9e52..edff9eec0693e87d190961ad9c57302df22e1b4e 100755 --- a/scripts/compile_dep.sh +++ b/scripts/compile_dep.sh @@ -301,7 +301,7 @@ determine_compiler() { # We honor the CXX environment variable if defined. # Otherwise, we try to find the compiler by using `command -v`. - if [[ -n "${CXX}" && ! "${CXX}" =~ ^.*(g\+\+|clang)$ ]]; then + if [[ -n "${CXX}" && ! "${CXX}" =~ (g\+\+|clang) ]]; then echo "ERROR: Unknown compiler '${CXX}'" exit 1 fi diff --git a/scripts/profiles/0.9.6-dev/agios.specs b/scripts/profiles/0.9.6-dev/agios.specs new file mode 100644 index 0000000000000000000000000000000000000000..8e1755f3bb6bfd18178fbcaac89ed4e2ff58a3d9 --- /dev/null +++ b/scripts/profiles/0.9.6-dev/agios.specs @@ -0,0 +1,76 @@ +################################################################################ +# Copyright 2018-2025, Barcelona Supercomputing Center (BSC), Spain # +# Copyright 2015-2025, Johannes Gutenberg Universitaet Mainz, Germany # +# # +# This software was partially supported by the # +# EC H2020 funded project NEXTGenIO (Project ID: 671951, www.nextgenio.eu). # +# # +# This software was partially supported by the # +# ADA-FS project under the SPPEXA project funded by the DFG. # +# # +# This file is part of GekkoFS. # +# # +# GekkoFS is free software: you can redistribute it and/or modify # +# it under the terms of the GNU General Public License as published by # +# the Free Software Foundation, either version 3 of the License, or # +# (at your option) any later version. # +# # +# GekkoFS is distributed in the hope that it will be useful, # +# but WITHOUT ANY WARRANTY; without even the implied warranty of # +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # +# GNU General Public License for more details. # +# # +# You should have received a copy of the GNU General Public License # +# along with GekkoFS. If not, see . # +# # +# SPDX-License-Identifier: GPL-3.0-or-later # +################################################################################ + +# vi: ft=bash + +# Variables to be imported into the scripts +declare -A wgetdeps clonedeps clonedeps_args clonedeps_patches extra_install_args +declare -a order + +# Comment that should be displayed when printing the profile +comment="All dependencies (except transport-specific and experimental)" + +# Dependencies that must be downloaded directly +wgetdeps=( + ["zstd"]="1.5.7" + ["lz4"]="1.9.4" + ["capstone"]="6.0.0-Alpha1" + ["argobots"]="1.2" + ["rocksdb"]="10.4.2" + ["json-c"]="0.17-20230812" +) + +# Dependencies that must be cloned +clonedeps=( + ["libfabric"]="HEAD@v2.2.0" + ["mercury"]="v2.4.1rc1" + ["margo"]="v0.18.3" + ["syscall_intercept"]="d8b2a69961921ed123625c79a609331fc56a8931" + ["agios"]="c26a6544200f823ebb8f890dd94e653d148bf226@development" +) + +# Extra arguments for git clone +clonedeps_args=( + ["mercury"]="--recurse-submodules" +) + +# Ordering that MUST be followed when downloading +order=( + "lz4" "zstd" "capstone" "json-c" "libfabric" "mercury" "argobots" "margo" "rocksdb" "syscall_intercept" "agios" +) + +# Extra arguments passed to the installation script. As such, they can +# reference the following variables: +# - CMAKE: a variable that expands to the cmake binary +# - SOURCE_DIR: the directory where the sources for the package were +# downloaded +# - INSTALL_DIR: the directory where the package should be installed +# - CORES: the number of cores to use when building +# - PERFORM_TEST: whether tests for the package should be executed +extra_install_args=( +) diff --git a/scripts/profiles/0.9.6-dev/arm.specs b/scripts/profiles/0.9.6-dev/arm.specs new file mode 100644 index 0000000000000000000000000000000000000000..573ff4268b8497831ccc5d97375effd0bcabca29 --- /dev/null +++ b/scripts/profiles/0.9.6-dev/arm.specs @@ -0,0 +1,78 @@ +################################################################################ +# Copyright 2018-2025, Barcelona Supercomputing Center (BSC), Spain # +# Copyright 2015-2025, Johannes Gutenberg Universitaet Mainz, Germany # +# # +# This software was partially supported by the # +# EC H2020 funded project NEXTGenIO (Project ID: 671951, www.nextgenio.eu). # +# # +# This software was partially supported by the # +# ADA-FS project under the SPPEXA project funded by the DFG. # +# # +# This file is part of GekkoFS. # +# # +# GekkoFS is free software: you can redistribute it and/or modify # +# it under the terms of the GNU General Public License as published by # +# the Free Software Foundation, either version 3 of the License, or # +# (at your option) any later version. # +# # +# GekkoFS is distributed in the hope that it will be useful, # +# but WITHOUT ANY WARRANTY; without even the implied warranty of # +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # +# GNU General Public License for more details. # +# # +# You should have received a copy of the GNU General Public License # +# along with GekkoFS. If not, see . # +# # +# SPDX-License-Identifier: GPL-3.0-or-later # +################################################################################ + +# vi: ft=bash + +# Variables to be imported into the scripts +declare -A wgetdeps clonedeps clonedeps_args clonedeps_patches extra_install_args +declare -a order + +# Comment that should be displayed when printing the profile +comment="Dependencies for PowerPC supercomputer" + +# Dependencies that must be downloaded directly +wgetdeps=( + ["zstd"]="1.5.7" + ["lz4"]="1.9.4" + ["capstone"]="6.0.0-Alpha1" + ["argobots"]="1.2" + ["rocksdb"]="8.10.0" + ["json-c"]="0.17-20230812" + ["psm2"]="11.2.185" +) + +# Dependencies that must be cloned +clonedeps=( + ["libfabric"]="HEAD@v1.20.1" + ["mercury"]="v2.4.0" + ["margo"]="v0.18.3" + ["syscall_intercept"]="88043bdbbc60801d4fbe0076962ed1a766ba4800" +) + +# Extra arguments for git clone +clonedeps_args=( + ["mercury"]="--recurse-submodules" +) + +# Ordering that MUST be followed when downloading +order=( + "lz4" "zstd" "capstone" "json-c" "psm2" "libfabric" "mercury" "argobots" "margo" "rocksdb" "syscall_intercept" +) + +# Extra arguments passed to the installation script. As such, they can +# reference the following variables: +# - CMAKE: a variable that expands to the cmake binary +# - SOURCE_DIR: the directory where the sources for the package were +# downloaded +# - INSTALL_DIR: the directory where the package should be installed +# - CORES: the number of cores to use when building +# - PERFORM_TEST: whether tests for the package should be executed +extra_install_args=( + ["libfabric"]="--enable-psm2=no --enable-sockets=yes" + ["syscall_intercept"]="ARM64" +) diff --git a/scripts/profiles/0.9.6-dev/ci.specs b/scripts/profiles/0.9.6-dev/ci.specs new file mode 100644 index 0000000000000000000000000000000000000000..7c07b5009140897eb0d5f82ea55cb239faa69049 --- /dev/null +++ b/scripts/profiles/0.9.6-dev/ci.specs @@ -0,0 +1,76 @@ +################################################################################ +# Copyright 2018-2025, Barcelona Supercomputing Center (BSC), Spain # +# Copyright 2015-2025, Johannes Gutenberg Universitaet Mainz, Germany # +# # +# This software was partially supported by the # +# EC H2020 funded project NEXTGenIO (Project ID: 671951, www.nextgenio.eu). # +# # +# This software was partially supported by the # +# ADA-FS project under the SPPEXA project funded by the DFG. # +# # +# This file is part of GekkoFS. # +# # +# GekkoFS is free software: you can redistribute it and/or modify # +# it under the terms of the GNU General Public License as published by # +# the Free Software Foundation, either version 3 of the License, or # +# (at your option) any later version. # +# # +# GekkoFS is distributed in the hope that it will be useful, # +# but WITHOUT ANY WARRANTY; without even the implied warranty of # +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # +# GNU General Public License for more details. # +# # +# You should have received a copy of the GNU General Public License # +# along with GekkoFS. If not, see . # +# # +# SPDX-License-Identifier: GPL-3.0-or-later # +################################################################################ + +# vi: ft=bash + +# Variables to be imported into the scripts +declare -A wgetdeps clonedeps clonedeps_args clonedeps_patches extra_install_args +declare -a order + +# Comment that should be displayed when printing the profile +comment="Dependencies required by the CI" + +# Dependencies that must be downloaded directly +wgetdeps=( + ["argobots"]="1.2" + ["rocksdb"]="10.4.2" + ["prometheus-cpp"]="v1.0.0" + ["capstone"]="6.0.0-Alpha1" + ) + +# Dependencies that must be cloned +clonedeps=( + ["libfabric"]="HEAD@v2.2.0" + ["mercury"]="v2.4.1rc1" + ["margo"]="v0.18.3" + ["syscall_intercept"]="d8b2a69961921ed123625c79a609331fc56a8931" + ["agios"]="c26a6544200f823ebb8f890dd94e653d148bf226@development" + ["parallax"]="ffdea6e820f5c4c2d33e60d9a4b15ef9e6bbcfdd" + ) + +# Extra arguments for git clone +clonedeps_args=( + ["mercury"]="--recurse-submodules" +) + +# Ordering that MUST be followed when downloading +order=( + "capstone" "libfabric" "mercury" "argobots" "margo" "rocksdb" "syscall_intercept" + "agios" "parallax" "prometheus-cpp" +) + +# Extra arguments passed to the installation script. As such, they can +# reference the following variables: +# - CMAKE: a variable that expands to the cmake binary +# - SOURCE_DIR: the directory where the sources for the package were +# downloaded +# - INSTALL_DIR: the directory where the package should be installed +# - CORES: the number of cores to use when building +# - PERFORM_TEST: whether tests for the package should be executed +extra_install_args=( +) diff --git a/scripts/profiles/0.9.6-dev/default.specs b/scripts/profiles/0.9.6-dev/default.specs new file mode 100644 index 0000000000000000000000000000000000000000..c4e54c5d90a4996f7cc3d154421c369d956953d8 --- /dev/null +++ b/scripts/profiles/0.9.6-dev/default.specs @@ -0,0 +1,75 @@ +################################################################################ +# Copyright 2018-2025, Barcelona Supercomputing Center (BSC), Spain # +# Copyright 2015-2025, Johannes Gutenberg Universitaet Mainz, Germany # +# # +# This software was partially supported by the # +# EC H2020 funded project NEXTGenIO (Project ID: 671951, www.nextgenio.eu). # +# # +# This software was partially supported by the # +# ADA-FS project under the SPPEXA project funded by the DFG. # +# # +# This file is part of GekkoFS. # +# # +# GekkoFS is free software: you can redistribute it and/or modify # +# it under the terms of the GNU General Public License as published by # +# the Free Software Foundation, either version 3 of the License, or # +# (at your option) any later version. # +# # +# GekkoFS is distributed in the hope that it will be useful, # +# but WITHOUT ANY WARRANTY; without even the implied warranty of # +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # +# GNU General Public License for more details. # +# # +# You should have received a copy of the GNU General Public License # +# along with GekkoFS. If not, see . # +# # +# SPDX-License-Identifier: GPL-3.0-or-later # +################################################################################ + +# vi: ft=bash + +# Variables to be imported into the scripts +declare -A wgetdeps clonedeps clonedeps_args clonedeps_patches extra_install_args +declare -a order + +# Comment that should be displayed when printing the profile +comment="All dependencies" + +# Dependencies that must be downloaded directly +wgetdeps=( + ["zstd"]="1.5.7" + ["lz4"]="1.9.4" + ["capstone"]="6.0.0-Alpha1" + ["argobots"]="1.2" + ["rocksdb"]="10.4.2" + ["json-c"]="0.17-20230812" +) + +# Dependencies that must be cloned. +clonedeps=( + ["libfabric"]="HEAD@v2.2.0" + ["mercury"]="v2.4.1rc1" + ["margo"]="v0.18.3" + ["syscall_intercept"]="d8b2a69961921ed123625c79a609331fc56a8931" +) + +# Extra arguments for git clone +clonedeps_args=( + ["mercury"]="--recurse-submodules" +) + +# Ordering that MUST be followed when downloading +order=( + "lz4" "zstd" "capstone" "json-c" "libfabric" "mercury" "argobots" "margo" "rocksdb" "syscall_intercept" +) + +# Extra arguments passed to the installation script. As such, they can +# reference the following variables: +# - CMAKE: a variable that expands to the cmake binary +# - SOURCE_DIR: the directory where the sources for the package were +# downloaded +# - INSTALL_DIR: the directory where the package should be installed +# - CORES: the number of cores to use when building +# - PERFORM_TEST: whether tests for the package should be executed +extra_install_args=( +) diff --git a/scripts/profiles/0.9.6-dev/default_zmq.specs b/scripts/profiles/0.9.6-dev/default_zmq.specs new file mode 100644 index 0000000000000000000000000000000000000000..b9a58a7b34744603f52fb49ecb057146e3a3a52b --- /dev/null +++ b/scripts/profiles/0.9.6-dev/default_zmq.specs @@ -0,0 +1,77 @@ +################################################################################ +# Copyright 2018-2025, Barcelona Supercomputing Center (BSC), Spain # +# Copyright 2015-2025, Johannes Gutenberg Universitaet Mainz, Germany # +# # +# This software was partially supported by the # +# EC H2020 funded project NEXTGenIO (Project ID: 671951, www.nextgenio.eu). # +# # +# This software was partially supported by the # +# ADA-FS project under the SPPEXA project funded by the DFG. # +# # +# This file is part of GekkoFS. # +# # +# GekkoFS is free software: you can redistribute it and/or modify # +# it under the terms of the GNU General Public License as published by # +# the Free Software Foundation, either version 3 of the License, or # +# (at your option) any later version. # +# # +# GekkoFS is distributed in the hope that it will be useful, # +# but WITHOUT ANY WARRANTY; without even the implied warranty of # +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # +# GNU General Public License for more details. # +# # +# You should have received a copy of the GNU General Public License # +# along with GekkoFS. If not, see . # +# # +# SPDX-License-Identifier: GPL-3.0-or-later # +################################################################################ + +# vi: ft=bash + +# Variables to be imported into the scripts +declare -A wgetdeps clonedeps clonedeps_args clonedeps_patches extra_install_args +declare -a order + +# Comment that should be displayed when printing the profile +comment="All dependencies" + +# Dependencies that must be downloaded directly +wgetdeps=( + ["zstd"]="1.5.7" + ["lz4"]="1.9.4" + ["capstone"]="6.0.0-Alpha1" + ["argobots"]="1.2" + ["rocksdb"]="10.4.2" + ["json-c"]="0.17-20230812" + ["libzmq"]="4.3.5" + ["cppzmq"]="4.10.0" +) + +# Dependencies that must be cloned. +clonedeps=( + ["libfabric"]="HEAD@v2.2.0" + ["mercury"]="v2.4.1rc1" + ["margo"]="v0.18.3" + ["syscall_intercept"]="d8b2a69961921ed123625c79a609331fc56a8931" +) + +# Extra arguments for git clone +clonedeps_args=( + ["mercury"]="--recurse-submodules" +) + +# Ordering that MUST be followed when downloading +order=( + "lz4" "zstd" "capstone" "json-c" "libfabric" "mercury" "argobots" "margo" "rocksdb" "syscall_intercept" "libzmq" "cppzmq" +) + +# Extra arguments passed to the installation script. As such, they can +# reference the following variables: +# - CMAKE: a variable that expands to the cmake binary +# - SOURCE_DIR: the directory where the sources for the package were +# downloaded +# - INSTALL_DIR: the directory where the package should be installed +# - CORES: the number of cores to use when building +# - PERFORM_TEST: whether tests for the package should be executed +extra_install_args=( +) diff --git a/scripts/profiles/0.9.6-dev/infiniband_verbs.specs b/scripts/profiles/0.9.6-dev/infiniband_verbs.specs new file mode 100644 index 0000000000000000000000000000000000000000..c3da68052b33437e533b6ab7e63ee5298cb91c59 --- /dev/null +++ b/scripts/profiles/0.9.6-dev/infiniband_verbs.specs @@ -0,0 +1,77 @@ +################################################################################ +# Copyright 2018-2025, Barcelona Supercomputing Center (BSC), Spain # +# Copyright 2015-2025, Johannes Gutenberg Universitaet Mainz, Germany # +# # +# This software was partially supported by the # +# EC H2020 funded project NEXTGenIO (Project ID: 671951, www.nextgenio.eu). # +# # +# This software was partially supported by the # +# ADA-FS project under the SPPEXA project funded by the DFG. # +# # +# This file is part of GekkoFS. # +# # +# GekkoFS is free software: you can redistribute it and/or modify # +# it under the terms of the GNU General Public License as published by # +# the Free Software Foundation, either version 3 of the License, or # +# (at your option) any later version. # +# # +# GekkoFS is distributed in the hope that it will be useful, # +# but WITHOUT ANY WARRANTY; without even the implied warranty of # +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # +# GNU General Public License for more details. # +# # +# You should have received a copy of the GNU General Public License # +# along with GekkoFS. If not, see . # +# # +# SPDX-License-Identifier: GPL-3.0-or-later # +################################################################################ + +# vi: ft=bash + +# Variables to be imported into the scripts +declare -A wgetdeps clonedeps clonedeps_args clonedeps_patches extra_install_args +declare -a order + +# Comment that should be displayed when printing the profile +comment="Dependencies for Infiniband supercomputer" + +# Dependencies that must be downloaded directly +wgetdeps=( + ["zstd"]="1.5.7" + ["lz4"]="1.9.4" + ["capstone"]="6.0.0-Alpha1" + ["argobots"]="1.2" + ["rocksdb"]="10.4.2" + ["json-c"]="0.17-20230812" +) + +# Dependencies that must be cloned +clonedeps=( + ["libfabric%verbs"]="HEAD@v2.2.0" + ["mercury"]="v2.4.1rc1" + ["margo"]="v0.18.3" + ["syscall_intercept"]="d8b2a69961921ed123625c79a609331fc56a8931" +) + +# Extra arguments for git clone +clonedeps_args=( + ["mercury"]="--recurse-submodules" +) + +# Ordering that MUST be followed when downloading/installing +order=( + "lz4" "zstd" "capstone" "json-c" "libfabric%verbs" "mercury" "argobots" "margo" "rocksdb" + "syscall_intercept" +) + +# Extra arguments passed to the installation script. As such, they can +# reference the following variables: +# - CMAKE: a variable that expands to the cmake binary +# - SOURCE_DIR: the directory where the sources for the package were +# downloaded +# - INSTALL_DIR: the directory where the package should be installed +# - CORES: the number of cores to use when building +# - PERFORM_TEST: whether tests for the package should be executed +extra_install_args=( + ["libfabric%verbs"]="--enable-verbs=yes" +) diff --git a/scripts/profiles/0.9.6-dev/install/agios.install b/scripts/profiles/0.9.6-dev/install/agios.install new file mode 100644 index 0000000000000000000000000000000000000000..e24a89fce157fadcca6f8279ae361acdab83d3c5 --- /dev/null +++ b/scripts/profiles/0.9.6-dev/install/agios.install @@ -0,0 +1,57 @@ +################################################################################ +# Copyright 2018-2025, Barcelona Supercomputing Center (BSC), Spain # +# Copyright 2015-2025, Johannes Gutenberg Universitaet Mainz, Germany # +# # +# This software was partially supported by the # +# EC H2020 funded project NEXTGenIO (Project ID: 671951, www.nextgenio.eu). # +# # +# This software was partially supported by the # +# ADA-FS project under the SPPEXA project funded by the DFG. # +# # +# This file is part of GekkoFS. # +# # +# GekkoFS is free software: you can redistribute it and/or modify # +# it under the terms of the GNU General Public License as published by # +# the Free Software Foundation, either version 3 of the License, or # +# (at your option) any later version. # +# # +# GekkoFS is distributed in the hope that it will be useful, # +# but WITHOUT ANY WARRANTY; without even the implied warranty of # +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # +# GNU General Public License for more details. # +# # +# You should have received a copy of the GNU General Public License # +# along with GekkoFS. If not, see . # +# # +# SPDX-License-Identifier: GPL-3.0-or-later # +################################################################################ +# vi: ft=bash + +################################################################################ +## The installation script must define both a pkg_install function and +## pkg_check function that, as their name implies, must specify how +## a dependency package should be installed and tested. ## ## The following +## variables can be used in the installation script: +## - CMAKE: a variable that expands to the cmake binary +## - SOURCE_DIR: the directory where the sources for the package were +## downloaded +## - INSTALL_DIR: the directory where the package should be installed +## - CORES: the number of cores to use when building +## - COMPILER_NAME: the name of the compiler being used (e.g. g++, clang, etc.) +## - COMPILER_FULL_VERSION: the compiler's full version (e.g. 9.3.0) +## - COMPILER_MAJOR_VERSION: the compiler's major version (e.g. 9) +## - PERFORM_TEST: whether tests for the package should be executed +################################################################################ + +pkg_install() { + ID="agios" + CURR="${SOURCE_DIR}/${ID}" + prepare_build_dir "${CURR}" + cd "${CURR}/build" + ${CMAKE} -DCMAKE_INSTALL_PREFIX="${INSTALL_DIR}" .. + make install +} + +pkg_check() { + : +} diff --git a/scripts/profiles/0.9.6-dev/install/argobots.install b/scripts/profiles/0.9.6-dev/install/argobots.install new file mode 100644 index 0000000000000000000000000000000000000000..f50e57301d561362590ae6089c4f92da6237926c --- /dev/null +++ b/scripts/profiles/0.9.6-dev/install/argobots.install @@ -0,0 +1,60 @@ +################################################################################ +# Copyright 2018-2025, Barcelona Supercomputing Center (BSC), Spain # +# Copyright 2015-2025, Johannes Gutenberg Universitaet Mainz, Germany # +# # +# This software was partially supported by the # +# EC H2020 funded project NEXTGenIO (Project ID: 671951, www.nextgenio.eu). # +# # +# This software was partially supported by the # +# ADA-FS project under the SPPEXA project funded by the DFG. # +# # +# This file is part of GekkoFS. # +# # +# GekkoFS is free software: you can redistribute it and/or modify # +# it under the terms of the GNU General Public License as published by # +# the Free Software Foundation, either version 3 of the License, or # +# (at your option) any later version. # +# # +# GekkoFS is distributed in the hope that it will be useful, # +# but WITHOUT ANY WARRANTY; without even the implied warranty of # +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # +# GNU General Public License for more details. # +# # +# You should have received a copy of the GNU General Public License # +# along with GekkoFS. If not, see . # +# # +# SPDX-License-Identifier: GPL-3.0-or-later # +################################################################################ +# vi: ft=bash + +################################################################################ +## The installation script must define both a pkg_install function and +## pkg_check function that, as their name implies, must specify how +## a dependency package should be installed and tested. ## ## The following +## variables can be used in the installation script: +## - CMAKE: a variable that expands to the cmake binary +## - SOURCE_DIR: the directory where the sources for the package were +## downloaded +## - INSTALL_DIR: the directory where the package should be installed +## - CORES: the number of cores to use when building +## - COMPILER_NAME: the name of the compiler being used (e.g. g++, clang, etc.) +## - COMPILER_FULL_VERSION: the compiler's full version (e.g. 9.3.0) +## - COMPILER_MAJOR_VERSION: the compiler's major version (e.g. 9) +## - PERFORM_TEST: whether tests for the package should be executed +################################################################################ + +pkg_install() { + ID="argobots" + CURR="${SOURCE_DIR}/${ID}" + prepare_build_dir "${CURR}" + cd "${CURR}" + ./autogen.sh + cd "${CURR}/build" + ../configure --prefix="${INSTALL_DIR}" --enable-perf-opt --disable-checks + make -j"${CORES}" + make install +} + +pkg_check() { + make check +} diff --git a/scripts/profiles/0.9.6-dev/install/capstone.install b/scripts/profiles/0.9.6-dev/install/capstone.install new file mode 100644 index 0000000000000000000000000000000000000000..78bdb81f6b53cae29145b04b0c6671c4cc712f09 --- /dev/null +++ b/scripts/profiles/0.9.6-dev/install/capstone.install @@ -0,0 +1,57 @@ +################################################################################ +# Copyright 2018-2025, Barcelona Supercomputing Center (BSC), Spain # +# Copyright 2015-2025, Johannes Gutenberg Universitaet Mainz, Germany # +# # +# This software was partially supported by the # +# EC H2020 funded project NEXTGenIO (Project ID: 671951, www.nextgenio.eu). # +# # +# This software was partially supported by the # +# ADA-FS project under the SPPEXA project funded by the DFG. # +# # +# This file is part of GekkoFS. # +# # +# GekkoFS is free software: you can redistribute it and/or modify # +# it under the terms of the GNU General Public License as published by # +# the Free Software Foundation, either version 3 of the License, or # +# (at your option) any later version. # +# # +# GekkoFS is distributed in the hope that it will be useful, # +# but WITHOUT ANY WARRANTY; without even the implied warranty of # +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # +# GNU General Public License for more details. # +# # +# You should have received a copy of the GNU General Public License # +# along with GekkoFS. If not, see . # +# # +# SPDX-License-Identifier: GPL-3.0-or-later # +################################################################################ +# vi: ft=bash + +################################################################################ +## The installation script must define both a pkg_install function and +## pkg_check function that, as their name implies, must specify how +## a dependency package should be installed and tested. ## ## The following +## variables can be used in the installation script: +## - CMAKE: a variable that expands to the cmake binary +## - SOURCE_DIR: the directory where the sources for the package were +## downloaded +## - INSTALL_DIR: the directory where the package should be installed +## - CORES: the number of cores to use when building +## - COMPILER_NAME: the name of the compiler being used (e.g. g++, clang, etc.) +## - COMPILER_FULL_VERSION: the compiler's full version (e.g. 9.3.0) +## - COMPILER_MAJOR_VERSION: the compiler's major version (e.g. 9) +## - PERFORM_TEST: whether tests for the package should be executed +################################################################################ + +pkg_install() { + ID="capstone" + CURR="${SOURCE_DIR}/${ID}" + prepare_build_dir "${CURR}" + cd "${CURR}/build" + ${CMAKE} -DCMAKE_INSTALL_PREFIX="${INSTALL_DIR}" -DCMAKE_BUILD_TYPE:STRING=Release -DBUILD_SHARED_LIBS=ON .. + make -j"${CORES}" install +} + +pkg_check() { + : +} diff --git a/scripts/profiles/0.9.6-dev/install/cppzmq.install b/scripts/profiles/0.9.6-dev/install/cppzmq.install new file mode 100644 index 0000000000000000000000000000000000000000..eb0e5d8531ce0d1ab42ed4eb4b8292998719b58d --- /dev/null +++ b/scripts/profiles/0.9.6-dev/install/cppzmq.install @@ -0,0 +1,62 @@ +################################################################################ +# Copyright 2018-2025, Barcelona Supercomputing Center (BSC), Spain # +# Copyright 2015-2025, Johannes Gutenberg Universitaet Mainz, Germany # +# # +# This software was partially supported by the # +# EC H2020 funded project NEXTGenIO (Project ID: 671951, www.nextgenio.eu). # +# # +# This software was partially supported by the # +# ADA-FS project under the SPPEXA project funded by the DFG. # +# # +# This file is part of GekkoFS. # +# # +# GekkoFS is free software: you can redistribute it and/or modify # +# it under the terms of the GNU General Public License as published by # +# the Free Software Foundation, either version 3 of the License, or # +# (at your option) any later version. # +# # +# GekkoFS is distributed in the hope that it will be useful, # +# but WITHOUT ANY WARRANTY; without even the implied warranty of # +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # +# GNU General Public License for more details. # +# # +# You should have received a copy of the GNU General Public License # +# along with GekkoFS. If not, see . # +# # +# SPDX-License-Identifier: GPL-3.0-or-later # +################################################################################ +# vi: ft=bash + +################################################################################ +## The installation script must define both a pkg_install function and +## pkg_check function that, as their name implies, must specify how +## a dependency package should be installed and tested. ## ## The following +## variables can be used in the installation script: +## - CMAKE: a variable that expands to the cmake binary +## - SOURCE_DIR: the directory where the sources for the package were +## downloaded +## - INSTALL_DIR: the directory where the package should be installed +## - CORES: the number of cores to use when building +## - COMPILER_NAME: the name of the compiler being used (e.g. g++, clang, etc.) +## - COMPILER_FULL_VERSION: the compiler's full version (e.g. 9.3.0) +## - COMPILER_MAJOR_VERSION: the compiler's major version (e.g. 9) +## - PERFORM_TEST: whether tests for the package should be executed +################################################################################ + +pkg_install() { + ID="cppzmq" + CURR="${SOURCE_DIR}/${ID}" + prepare_build_dir "${CURR}" + cd "${CURR}/build" + $CMAKE \ + -DCMAKE_PREFIX_PATH=${INSTALL_DIR} \ + -DCMAKE_BUILD_TYPE:STRING=Release \ + -DCMAKE_INSTALL_PREFIX=${INSTALL_DIR} \ + .. + make -j"${CORES}" + make install +} + +pkg_check() { + : +} diff --git a/scripts/profiles/0.9.6-dev/install/curl.install b/scripts/profiles/0.9.6-dev/install/curl.install new file mode 100644 index 0000000000000000000000000000000000000000..76bf9511d3413d19e8f46518b2e77710fff460b4 --- /dev/null +++ b/scripts/profiles/0.9.6-dev/install/curl.install @@ -0,0 +1,58 @@ +################################################################################ +# Copyright 2018-2025, Barcelona Supercomputing Center (BSC), Spain # +# Copyright 2015-2025, Johannes Gutenberg Universitaet Mainz, Germany # +# # +# This software was partially supported by the # +# EC H2020 funded project NEXTGenIO (Project ID: 671951, www.nextgenio.eu). # +# # +# This software was partially supported by the # +# ADA-FS project under the SPPEXA project funded by the DFG. # +# # +# This file is part of GekkoFS. # +# # +# GekkoFS is free software: you can redistribute it and/or modify # +# it under the terms of the GNU General Public License as published by # +# the Free Software Foundation, either version 3 of the License, or # +# (at your option) any later version. # +# # +# GekkoFS is distributed in the hope that it will be useful, # +# but WITHOUT ANY WARRANTY; without even the implied warranty of # +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # +# GNU General Public License for more details. # +# # +# You should have received a copy of the GNU General Public License # +# along with GekkoFS. If not, see . # +# # +# SPDX-License-Identifier: GPL-3.0-or-later # +################################################################################ +# vi: ft=bash + +################################################################################ +## The installation script must define both a pkg_install function and +## pkg_check function that, as their name implies, must specify how +## a dependency package should be installed and tested. ## ## The following +## variables can be used in the installation script: +## - CMAKE: a variable that expands to the cmake binary +## - SOURCE_DIR: the directory where the sources for the package were +## downloaded +## - INSTALL_DIR: the directory where the package should be installed +## - CORES: the number of cores to use when building +## - COMPILER_NAME: the name of the compiler being used (e.g. g++, clang, etc.) +## - COMPILER_FULL_VERSION: the compiler's full version (e.g. 9.3.0) +## - COMPILER_MAJOR_VERSION: the compiler's major version (e.g. 9) +## - PERFORM_TEST: whether tests for the package should be executed +################################################################################ + +pkg_install() { + ID="curl" + CURR="${SOURCE_DIR}/${ID}" + cd "${CURR}" + autoreconf -fi + ./configure --prefix="${INSTALL_DIR}" --without-ssl + make -j"${CORES}" + make install +} + +pkg_check() { + : +} diff --git a/scripts/profiles/0.9.6-dev/install/json-c.install b/scripts/profiles/0.9.6-dev/install/json-c.install new file mode 100644 index 0000000000000000000000000000000000000000..2c9b1f0eb28a754a2e4d515b732da41c9770ab3e --- /dev/null +++ b/scripts/profiles/0.9.6-dev/install/json-c.install @@ -0,0 +1,60 @@ +################################################################################ +# Copyright 2018-2025, Barcelona Supercomputing Center (BSC), Spain # +# Copyright 2015-2025, Johannes Gutenberg Universitaet Mainz, Germany # +# # +# This software was partially supported by the # +# EC H2020 funded project NEXTGenIO (Project ID: 671951, www.nextgenio.eu). # +# # +# This software was partially supported by the # +# ADA-FS project under the SPPEXA project funded by the DFG. # +# # +# This file is part of GekkoFS. # +# # +# GekkoFS is free software: you can redistribute it and/or modify # +# it under the terms of the GNU General Public License as published by # +# the Free Software Foundation, either version 3 of the License, or # +# (at your option) any later version. # +# # +# GekkoFS is distributed in the hope that it will be useful, # +# but WITHOUT ANY WARRANTY; without even the implied warranty of # +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # +# GNU General Public License for more details. # +# # +# You should have received a copy of the GNU General Public License # +# along with GekkoFS. If not, see . # +# # +# SPDX-License-Identifier: GPL-3.0-or-later # +################################################################################ +# vi: ft=bash + +################################################################################ +## The installation script must define both a pkg_install function and +## pkg_check function that, as their name implies, must specify how +## a dependency package should be installed and tested. ## ## The following +## variables can be used in the installation script: +## - CMAKE: a variable that expands to the cmake binary +## - SOURCE_DIR: the directory where the sources for the package were +## downloaded +## - INSTALL_DIR: the directory where the package should be installed +## - CORES: the number of cores to use when building +## - COMPILER_NAME: the name of the compiler being used (e.g. g++, clang, etc.) +## - COMPILER_FULL_VERSION: the compiler's full version (e.g. 9.3.0) +## - COMPILER_MAJOR_VERSION: the compiler's major version (e.g. 9) +## - PERFORM_TEST: whether tests for the package should be executed +################################################################################ + +pkg_install() { + CURR="${SOURCE_DIR}/json-c" + prepare_build_dir "${CURR}" + cd "${CURR}/build" + cmake -DCMAKE_INSTALL_PREFIX="${INSTALL_DIR}" -DCMAKE_BUILD_TYPE:STRING=Release -DCMAKE_POLICY_VERSION_MINIMUM=3.5 .. + make -j"${CORES}" install + # Margo doesn't search in both directories, so we make it available in both lib and lib64 + if [[ -f "${INSTALL_DIR}/lib64/pkgconfig/json-c.pc" ]]; then + cp ${INSTALL_DIR}/lib64/pkgconfig/json-c.pc ${INSTALL_DIR}/lib/pkgconfig/ + fi +} + +pkg_check() { + : +} diff --git a/scripts/profiles/0.9.6-dev/install/libfabric%verbs.install b/scripts/profiles/0.9.6-dev/install/libfabric%verbs.install new file mode 100644 index 0000000000000000000000000000000000000000..49306dae06bae7078048ce1a3868c109a01d3ea3 --- /dev/null +++ b/scripts/profiles/0.9.6-dev/install/libfabric%verbs.install @@ -0,0 +1,67 @@ +################################################################################ +# Copyright 2018-2025, Barcelona Supercomputing Center (BSC), Spain # +# Copyright 2015-2025, Johannes Gutenberg Universitaet Mainz, Germany # +# # +# This software was partially supported by the # +# EC H2020 funded project NEXTGenIO (Project ID: 671951, www.nextgenio.eu). # +# # +# This software was partially supported by the # +# ADA-FS project under the SPPEXA project funded by the DFG. # +# # +# This file is part of GekkoFS. # +# # +# GekkoFS is free software: you can redistribute it and/or modify # +# it under the terms of the GNU General Public License as published by # +# the Free Software Foundation, either version 3 of the License, or # +# (at your option) any later version. # +# # +# GekkoFS is distributed in the hope that it will be useful, # +# but WITHOUT ANY WARRANTY; without even the implied warranty of # +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # +# GNU General Public License for more details. # +# # +# You should have received a copy of the GNU General Public License # +# along with GekkoFS. If not, see . # +# # +# SPDX-License-Identifier: GPL-3.0-or-later # +################################################################################ +# vi: ft=bash + +################################################################################ +## The installation script must define both a pkg_install function and +## pkg_check function that, as their name implies, must specify how +## a dependency package should be installed and tested. ## ## The following +## variables can be used in the installation script: +## - CMAKE: a variable that expands to the cmake binary +## - SOURCE_DIR: the directory where the sources for the package were +## downloaded +## - INSTALL_DIR: the directory where the package should be installed +## - CORES: the number of cores to use when building +## - COMPILER_NAME: the name of the compiler being used (e.g. g++, clang, etc.) +## - COMPILER_FULL_VERSION: the compiler's full version (e.g. 9.3.0) +## - COMPILER_MAJOR_VERSION: the compiler's major version (e.g. 9) +## - PERFORM_TEST: whether tests for the package should be executed +################################################################################ + +pkg_install() { + ID="libfabric%verbs" + CURR="${SOURCE_DIR}/${ID}" + prepare_build_dir "${CURR}" + cd "${CURR}" + ./autogen.sh + cd "${CURR}/build" + OFI_CONFIG="../configure --prefix=${INSTALL_DIR} --enable-tcp=yes" + + EXTRA_INSTALL_ARGS="${PROFILE_EXTRA_INSTALL_ARGS[${ID}]}" + + if [[ -n "${EXTRA_INSTALL_ARGS}" ]]; then + OFI_CONFIG="${OFI_CONFIG} ${EXTRA_INSTALL_ARGS}" + fi + + ${OFI_CONFIG} + make -j"${CORES}" install +} + +pkg_check() { + make check +} diff --git a/scripts/profiles/0.9.6-dev/install/libfabric.install b/scripts/profiles/0.9.6-dev/install/libfabric.install new file mode 100644 index 0000000000000000000000000000000000000000..72ffd9d32357b5d57a02fa7ed7244fcf69a9e575 --- /dev/null +++ b/scripts/profiles/0.9.6-dev/install/libfabric.install @@ -0,0 +1,67 @@ +################################################################################ +# Copyright 2018-2025, Barcelona Supercomputing Center (BSC), Spain # +# Copyright 2015-2025, Johannes Gutenberg Universitaet Mainz, Germany # +# # +# This software was partially supported by the # +# EC H2020 funded project NEXTGenIO (Project ID: 671951, www.nextgenio.eu). # +# # +# This software was partially supported by the # +# ADA-FS project under the SPPEXA project funded by the DFG. # +# # +# This file is part of GekkoFS. # +# # +# GekkoFS is free software: you can redistribute it and/or modify # +# it under the terms of the GNU General Public License as published by # +# the Free Software Foundation, either version 3 of the License, or # +# (at your option) any later version. # +# # +# GekkoFS is distributed in the hope that it will be useful, # +# but WITHOUT ANY WARRANTY; without even the implied warranty of # +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # +# GNU General Public License for more details. # +# # +# You should have received a copy of the GNU General Public License # +# along with GekkoFS. If not, see . # +# # +# SPDX-License-Identifier: GPL-3.0-or-later # +################################################################################ +# vi: ft=bash + +################################################################################ +## The installation script must define both a pkg_install function and +## pkg_check function that, as their name implies, must specify how +## a dependency package should be installed and tested. ## ## The following +## variables can be used in the installation script: +## - CMAKE: a variable that expands to the cmake binary +## - SOURCE_DIR: the directory where the sources for the package were +## downloaded +## - INSTALL_DIR: the directory where the package should be installed +## - CORES: the number of cores to use when building +## - COMPILER_NAME: the name of the compiler being used (e.g. g++, clang, etc.) +## - COMPILER_FULL_VERSION: the compiler's full version (e.g. 9.3.0) +## - COMPILER_MAJOR_VERSION: the compiler's major version (e.g. 9) +## - PERFORM_TEST: whether tests for the package should be executed +################################################################################ + +pkg_install() { + ID=libfabric + CURR="${SOURCE_DIR}/${ID}" + prepare_build_dir "${CURR}" + cd "${CURR}" + ./autogen.sh + cd "${CURR}/build" + OFI_CONFIG="../configure --prefix=${INSTALL_DIR} --enable-tcp=yes" + + EXTRA_INSTALL_ARGS="${PROFILE_EXTRA_INSTALL_ARGS[${ID}]}" + + if [[ -n "${EXTRA_INSTALL_ARGS}" ]]; then + OFI_CONFIG="${OFI_CONFIG} ${EXTRA_INSTALL_ARGS}" + fi + + ${OFI_CONFIG} + make -j"${CORES}" install +} + +pkg_check() { + make check +} diff --git a/scripts/profiles/0.9.6-dev/install/libzmq.install b/scripts/profiles/0.9.6-dev/install/libzmq.install new file mode 100644 index 0000000000000000000000000000000000000000..4eaa1b207fdbc282f92a4f0964a94b65c9a0cf33 --- /dev/null +++ b/scripts/profiles/0.9.6-dev/install/libzmq.install @@ -0,0 +1,59 @@ +################################################################################ +# Copyright 2018-2025, Barcelona Supercomputing Center (BSC), Spain # +# Copyright 2015-2025, Johannes Gutenberg Universitaet Mainz, Germany # +# # +# This software was partially supported by the # +# EC H2020 funded project NEXTGenIO (Project ID: 671951, www.nextgenio.eu). # +# # +# This software was partially supported by the # +# ADA-FS project under the SPPEXA project funded by the DFG. # +# # +# This file is part of GekkoFS. # +# # +# GekkoFS is free software: you can redistribute it and/or modify # +# it under the terms of the GNU General Public License as published by # +# the Free Software Foundation, either version 3 of the License, or # +# (at your option) any later version. # +# # +# GekkoFS is distributed in the hope that it will be useful, # +# but WITHOUT ANY WARRANTY; without even the implied warranty of # +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # +# GNU General Public License for more details. # +# # +# You should have received a copy of the GNU General Public License # +# along with GekkoFS. If not, see . # +# # +# SPDX-License-Identifier: GPL-3.0-or-later # +################################################################################ +# vi: ft=bash + +################################################################################ +## The installation script must define both a pkg_install function and +## pkg_check function that, as their name implies, must specify how +## a dependency package should be installed and tested. ## ## The following +## variables can be used in the installation script: +## - CMAKE: a variable that expands to the cmake binary +## - SOURCE_DIR: the directory where the sources for the package were +## downloaded +## - INSTALL_DIR: the directory where the package should be installed +## - CORES: the number of cores to use when building +## - COMPILER_NAME: the name of the compiler being used (e.g. g++, clang, etc.) +## - COMPILER_FULL_VERSION: the compiler's full version (e.g. 9.3.0) +## - COMPILER_MAJOR_VERSION: the compiler's major version (e.g. 9) +## - PERFORM_TEST: whether tests for the package should be executed +################################################################################ + +pkg_install() { + ID="libzmq" + CURR="${SOURCE_DIR}/${ID}" + prepare_build_dir "${CURR}" + cd "${CURR}" + ./autogen.sh + cd "${CURR}/build" + ../configure --prefix="${INSTALL_DIR}" CFLAGS="${CFLAGS} -Wall -O3" + make -j"${CORES}" install +} + +pkg_check() { + : +} diff --git a/scripts/profiles/0.9.6-dev/install/lz4.install b/scripts/profiles/0.9.6-dev/install/lz4.install new file mode 100644 index 0000000000000000000000000000000000000000..32d424bd4552f86a6d9496abd4fac08c7269b903 --- /dev/null +++ b/scripts/profiles/0.9.6-dev/install/lz4.install @@ -0,0 +1,61 @@ +################################################################################ +# Copyright 2018-2025, Barcelona Supercomputing Center (BSC), Spain # +# Copyright 2015-2025, Johannes Gutenberg Universitaet Mainz, Germany # +# # +# This software was partially supported by the # +# EC H2020 funded project NEXTGenIO (Project ID: 671951, www.nextgenio.eu). # +# # +# This software was partially supported by the # +# ADA-FS project under the SPPEXA project funded by the DFG. # +# # +# This file is part of GekkoFS. # +# # +# GekkoFS is free software: you can redistribute it and/or modify # +# it under the terms of the GNU General Public License as published by # +# the Free Software Foundation, either version 3 of the License, or # +# (at your option) any later version. # +# # +# GekkoFS is distributed in the hope that it will be useful, # +# but WITHOUT ANY WARRANTY; without even the implied warranty of # +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # +# GNU General Public License for more details. # +# # +# You should have received a copy of the GNU General Public License # +# along with GekkoFS. If not, see . # +# # +# SPDX-License-Identifier: GPL-3.0-or-later # +################################################################################ +# vi: ft=bash + +################################################################################ +## The installation script must define both a pkg_install function and +## pkg_check function that, as their name implies, must specify how +## a dependency package should be installed and tested. ## ## The following +## variables can be used in the installation script: +## - CMAKE: a variable that expands to the cmake binary +## - SOURCE_DIR: the directory where the sources for the package were +## downloaded +## - INSTALL_DIR: the directory where the package should be installed +## - CORES: the number of cores to use when building +## - COMPILER_NAME: the name of the compiler being used (e.g. g++, clang, etc.) +## - COMPILER_FULL_VERSION: the compiler's full version (e.g. 9.3.0) +## - COMPILER_MAJOR_VERSION: the compiler's major version (e.g. 9) +## - PERFORM_TEST: whether tests for the package should be executed +################################################################################ + +pkg_install() { + ID="lz4" + CURR="${SOURCE_DIR}/${ID}" + cd "${CURR}" + # try to remove binaries first in case they already exist. Otherwise install fails. + LZ4_BINS=("${INSTALL_DIR}"/bin/lz4c "${INSTALL_DIR}"/bin/lz4cat "${INSTALL_DIR}"/bin/unlz4 "${INSTALL_DIR}"/bin/lz4) + for LZ4_BIN in "${LZ4_BINS[@]}"; do + [ -e "$LZ4_BIN" ] && rm "$LZ4_BIN" + done + make -j"${CORES}" + make DESTDIR="${INSTALL_DIR}" PREFIX="" install +} + +pkg_check() { + : +} diff --git a/scripts/profiles/0.9.6-dev/install/margo.install b/scripts/profiles/0.9.6-dev/install/margo.install new file mode 100644 index 0000000000000000000000000000000000000000..6fbfbe0a5703e9e416a1923baae42f46965ab9e7 --- /dev/null +++ b/scripts/profiles/0.9.6-dev/install/margo.install @@ -0,0 +1,59 @@ +################################################################################ +# Copyright 2018-2025, Barcelona Supercomputing Center (BSC), Spain # +# Copyright 2015-2025, Johannes Gutenberg Universitaet Mainz, Germany # +# # +# This software was partially supported by the # +# EC H2020 funded project NEXTGenIO (Project ID: 671951, www.nextgenio.eu). # +# # +# This software was partially supported by the # +# ADA-FS project under the SPPEXA project funded by the DFG. # +# # +# This file is part of GekkoFS. # +# # +# GekkoFS is free software: you can redistribute it and/or modify # +# it under the terms of the GNU General Public License as published by # +# the Free Software Foundation, either version 3 of the License, or # +# (at your option) any later version. # +# # +# GekkoFS is distributed in the hope that it will be useful, # +# but WITHOUT ANY WARRANTY; without even the implied warranty of # +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # +# GNU General Public License for more details. # +# # +# You should have received a copy of the GNU General Public License # +# along with GekkoFS. If not, see . # +# # +# SPDX-License-Identifier: GPL-3.0-or-later # +################################################################################ +# vi: ft=bash + +################################################################################ +## The installation script must define both a pkg_install function and +## pkg_check function that, as their name implies, must specify how +## a dependency package should be installed and tested. ## ## The following +## variables can be used in the installation script: +## - CMAKE: a variable that expands to the cmake binary +## - SOURCE_DIR: the directory where the sources for the package were +## downloaded +## - INSTALL_DIR: the directory where the package should be installed +## - CORES: the number of cores to use when building +## - COMPILER_NAME: the name of the compiler being used (e.g. g++, clang, etc.) +## - COMPILER_FULL_VERSION: the compiler's full version (e.g. 9.3.0) +## - COMPILER_MAJOR_VERSION: the compiler's major version (e.g. 9) +## - PERFORM_TEST: whether tests for the package should be executed +################################################################################ + +pkg_install() { + ID="margo" + CURR="${SOURCE_DIR}/${ID}" + prepare_build_dir "${CURR}" + cd "${CURR}" + ./prepare.sh + cd "${CURR}/build" + ../configure --prefix="${INSTALL_DIR}" PKG_CONFIG_PATH="${INSTALL_DIR}/lib/pkgconfig" CFLAGS="${CFLAGS} -Wall -O3" + make -j"${CORES}" install +} + +pkg_check() { + make check +} diff --git a/scripts/profiles/0.9.6-dev/install/mercury.install b/scripts/profiles/0.9.6-dev/install/mercury.install new file mode 100644 index 0000000000000000000000000000000000000000..1f770a53693da5ed2d453db669471ad39264fc2e --- /dev/null +++ b/scripts/profiles/0.9.6-dev/install/mercury.install @@ -0,0 +1,82 @@ +################################################################################ +# Copyright 2018-2025, Barcelona Supercomputing Center (BSC), Spain # +# Copyright 2015-2025, Johannes Gutenberg Universitaet Mainz, Germany # +# # +# This software was partially supported by the # +# EC H2020 funded project NEXTGenIO (Project ID: 671951, www.nextgenio.eu). # +# # +# This software was partially supported by the # +# ADA-FS project under the SPPEXA project funded by the DFG. # +# # +# This file is part of GekkoFS. # +# # +# GekkoFS is free software: you can redistribute it and/or modify # +# it under the terms of the GNU General Public License as published by # +# the Free Software Foundation, either version 3 of the License, or # +# (at your option) any later version. # +# # +# GekkoFS is distributed in the hope that it will be useful, # +# but WITHOUT ANY WARRANTY; without even the implied warranty of # +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # +# GNU General Public License for more details. # +# # +# You should have received a copy of the GNU General Public License # +# along with GekkoFS. If not, see . # +# # +# SPDX-License-Identifier: GPL-3.0-or-later # +################################################################################ +# vi: ft=bash + +################################################################################ +## The installation script must define both a pkg_install function and +## pkg_check function that, as their name implies, must specify how +## a dependency package should be installed and tested. ## ## The following +## variables can be used in the installation script: +## - CMAKE: a variable that expands to the cmake binary +## - SOURCE_DIR: the directory where the sources for the package were +## downloaded +## - INSTALL_DIR: the directory where the package should be installed +## - CORES: the number of cores to use when building +## - COMPILER_NAME: the name of the compiler being used (e.g. g++, clang, etc.) +## - COMPILER_FULL_VERSION: the compiler's full version (e.g. 9.3.0) +## - COMPILER_MAJOR_VERSION: the compiler's major version (e.g. 9) +## - PERFORM_TEST: whether tests for the package should be executed +################################################################################ + +pkg_install() { + # if the profile compiles bmi, enable it + if [[ -n "${PROFILE_DEP_NAMES['bmi']}" ]]; then + USE_BMI="-DNA_USE_BMI:BOOL=ON" + else + USE_BMI="-DNA_USE_BMI:BOOL=OFF" + fi + + # if the profile provides any flavour of libfabric, enable it + if profile_has_dependency "^libfabric.*$"; then + USE_OFI="-DNA_USE_OFI:BOOL=ON" + else + USE_OFI="-DNA_USE_OFI:BOOL=OFF" + fi + + ID="mercury" + CURR="${SOURCE_DIR}/${ID}" + prepare_build_dir "${CURR}" + cd "${CURR}/build" + $CMAKE \ + -DCMAKE_PREFIX_PATH=${INSTALL_DIR} \ + -DCMAKE_BUILD_TYPE:STRING=Release \ + -DBUILD_TESTING:BOOL=ON \ + -DMERCURY_USE_CHECKSUMS:BOOL=OFF \ + -DMERCURY_USE_BOOST_PP:BOOL=ON \ + -DBUILD_SHARED_LIBS:BOOL=ON \ + -DCMAKE_INSTALL_PREFIX=${INSTALL_DIR} \ + ${USE_BMI} ${USE_OFI} \ + .. + #-DNA_USE_OFI:BOOL=ON -DNA_USE_UCX:BOOL=ON \ + make -j"${CORES}" + make install +} + +pkg_check() { + : +} diff --git a/scripts/profiles/0.9.6-dev/install/parallax.install b/scripts/profiles/0.9.6-dev/install/parallax.install new file mode 100644 index 0000000000000000000000000000000000000000..905d4a1e86fb4a4858476245e5373a2362a8701c --- /dev/null +++ b/scripts/profiles/0.9.6-dev/install/parallax.install @@ -0,0 +1,64 @@ +################################################################################ +# Copyright 2018-2025, Barcelona Supercomputing Center (BSC), Spain # +# Copyright 2015-2025, Johannes Gutenberg Universitaet Mainz, Germany # +# # +# This software was partially supported by the # +# EC H2020 funded project NEXTGenIO (Project ID: 671951, www.nextgenio.eu). # +# # +# This software was partially supported by the # +# ADA-FS project under the SPPEXA project funded by the DFG. # +# # +# This file is part of GekkoFS. # +# # +# GekkoFS is free software: you can redistribute it and/or modify # +# it under the terms of the GNU General Public License as published by # +# the Free Software Foundation, either version 3 of the License, or # +# (at your option) any later version. # +# # +# GekkoFS is distributed in the hope that it will be useful, # +# but WITHOUT ANY WARRANTY; without even the implied warranty of # +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # +# GNU General Public License for more details. # +# # +# You should have received a copy of the GNU General Public License # +# along with GekkoFS. If not, see . # +# # +# SPDX-License-Identifier: GPL-3.0-or-later # +################################################################################ +# vi: ft=bash + +################################################################################ +## The installation script must define both a pkg_install function and +## pkg_check function that, as their name implies, must specify how +## a dependency package should be installed and tested. ## ## The following +## variables can be used in the installation script: +## - CMAKE: a variable that expands to the cmake binary +## - SOURCE_DIR: the directory where the sources for the package were +## downloaded +## - INSTALL_DIR: the directory where the package should be installed +## - CORES: the number of cores to use when building +## - PERFORM_TEST: whether tests for the package should be executed +################################################################################ + +pkg_install() { + + CURR="${SOURCE_DIR}/parallax" + # sed -i -e 's/KEY_SIZE (256)/KEY_SIZE (4096)/g' ${CURR}/lib/btree/conf.h + prepare_build_dir "${CURR}" + cd "${CURR}/build" + PKG_CONFIG_PATH="${INSTALL_DIR}/lib/pkgconfig" $CMAKE \ + -DBUILD_SHARED_LIBS:BOOL=ON \ + -DCMAKE_INSTALL_PREFIX=${INSTALL_DIR} \ + -DCMAKE_BUILD_TYPE="Release" \ + -DCMAKE_CXX_FLAGS_RELEASE="-Wno-error=unused-result" \ + -DDISABLE_LOGGING:BOOL=ON \ + .. + make -j"${CORES}" + make install + # We need to copy this file as it is not installed + cp ${CURR}/lib/include/parallax/structures.h ${INSTALL_DIR}/include/ +} + +pkg_check() { + : +} diff --git a/scripts/profiles/0.9.6-dev/install/prometheus-cpp.install b/scripts/profiles/0.9.6-dev/install/prometheus-cpp.install new file mode 100644 index 0000000000000000000000000000000000000000..0dc3c52877dfd7d136e97d901d7ada576dacf2a4 --- /dev/null +++ b/scripts/profiles/0.9.6-dev/install/prometheus-cpp.install @@ -0,0 +1,61 @@ +################################################################################ +# Copyright 2018-2025, Barcelona Supercomputing Center (BSC), Spain # +# Copyright 2015-2025, Johannes Gutenberg Universitaet Mainz, Germany # +# # +# This software was partially supported by the # +# EC H2020 funded project NEXTGenIO (Project ID: 671951, www.nextgenio.eu). # +# # +# This software was partially supported by the # +# ADA-FS project under the SPPEXA project funded by the DFG. # +# # +# This file is part of GekkoFS. # +# # +# GekkoFS is free software: you can redistribute it and/or modify # +# it under the terms of the GNU General Public License as published by # +# the Free Software Foundation, either version 3 of the License, or # +# (at your option) any later version. # +# # +# GekkoFS is distributed in the hope that it will be useful, # +# but WITHOUT ANY WARRANTY; without even the implied warranty of # +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # +# GNU General Public License for more details. # +# # +# You should have received a copy of the GNU General Public License # +# along with GekkoFS. If not, see . # +# # +# SPDX-License-Identifier: GPL-3.0-or-later # +################################################################################ +# vi: ft=bash + +################################################################################ +## The installation script must define both a pkg_install function and +## pkg_check function that, as their name implies, must specify how +## a dependency package should be installed and tested. ## ## The following +## variables can be used in the installation script: +## - CMAKE: a variable that expands to the cmake binary +## - SOURCE_DIR: the directory where the sources for the package were +## downloaded +## - INSTALL_DIR: the directory where the package should be installed +## - CORES: the number of cores to use when building +## - COMPILER_NAME: the name of the compiler being used (e.g. g++, clang, etc.) +## - COMPILER_FULL_VERSION: the compiler's full version (e.g. 9.3.0) +## - COMPILER_MAJOR_VERSION: the compiler's major version (e.g. 9) +## - PERFORM_TEST: whether tests for the package should be executed +################################################################################ + +pkg_install() { + ID="prometheus-cpp" + CURR="${SOURCE_DIR}/${ID}" + prepare_build_dir "${CURR}" + cd "${CURR}/build" + ${CMAKE} \ + -DCMAKE_BUILD_TYPE:STRING=Release \ + -DCMAKE_INSTALL_PREFIX="${INSTALL_DIR}" \ + -DBUILD_SHARED_LIBS:BOOL=ON \ + .. + make -j"${CORES}" install +} + +pkg_check() { + : +} diff --git a/scripts/profiles/0.9.6-dev/install/rocksdb.install b/scripts/profiles/0.9.6-dev/install/rocksdb.install new file mode 100644 index 0000000000000000000000000000000000000000..dfdf6091e3c7a0ce349b62d7a1feea194cfb08d3 --- /dev/null +++ b/scripts/profiles/0.9.6-dev/install/rocksdb.install @@ -0,0 +1,79 @@ +################################################################################ +# Copyright 2018-2025, Barcelona Supercomputing Center (BSC), Spain # +# Copyright 2015-2025, Johannes Gutenberg Universitaet Mainz, Germany # +# # +# This software was partially supported by the # +# EC H2020 funded project NEXTGenIO (Project ID: 671951, www.nextgenio.eu). # +# # +# This software was partially supported by the # +# ADA-FS project under the SPPEXA project funded by the DFG. # +# # +# This file is part of GekkoFS. # +# # +# GekkoFS is free software: you can redistribute it and/or modify # +# it under the terms of the GNU General Public License as published by # +# the Free Software Foundation, either version 3 of the License, or # +# (at your option) any later version. # +# # +# GekkoFS is distributed in the hope that it will be useful, # +# but WITHOUT ANY WARRANTY; without even the implied warranty of # +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # +# GNU General Public License for more details. # +# # +# You should have received a copy of the GNU General Public License # +# along with GekkoFS. If not, see . # +# # +# SPDX-License-Identifier: GPL-3.0-or-later # +################################################################################ +# vi: ft=bash + +################################################################################ +## The installation script must define both a pkg_install function and +## pkg_check function that, as their name implies, must specify how +## a dependency package should be installed and tested. ## ## The following +## variables can be used in the installation script: +## - CMAKE: a variable that expands to the cmake binary +## - SOURCE_DIR: the directory where the sources for the package were +## downloaded +## - INSTALL_DIR: the directory where the package should be installed +## - CORES: the number of cores to use when building +## - COMPILER_NAME: the name of the compiler being used (e.g. g++, clang, etc.) +## - COMPILER_FULL_VERSION: the compiler's full version (e.g. 9.3.0) +## - COMPILER_MAJOR_VERSION: the compiler's major version (e.g. 9) +## - PERFORM_TEST: whether tests for the package should be executed +################################################################################ + +pkg_install() { + CXXFLAGS='' + # gcc 9 and clang 8 need -Wno-error=deprecated-copy -Wno-error=pessimizing-move + if [[ ("${COMPILER_NAME}" == "g++" && "${COMPILER_MAJOR_VERSION}" -ge 9) || + ("${COMPILER_NAME}" == "clang" && "${COMPILER_MAJOR_VERSION}" -ge 8) ]]; then + CXXFLAGS='-Wno-error=deprecated-copy -Wno-error=pessimizing-move -Wno-error=maybe-uninitialized' + fi + + # TODO use SSE? + CURR="${SOURCE_DIR}/rocksdb" + prepare_build_dir "${CURR}" + cd "${CURR}/build" + PKG_CONFIG_PATH="${INSTALL_DIR}/lib/pkgconfig" $CMAKE \ + -DCMAKE_CXX_FLAGS="${CXXFLAGS}" \ + -DCMAKE_BUILD_TYPE=Release \ + -DCMAKE_PREFIX_PATH="${INSTALL_DIR}" \ + -DCMAKE_INSTALL_LIBDIR="${INSTALL_DIR}/lib" \ + -DCMAKE_INSTALL_INCLUDEDIR="${INSTALL_DIR}/include" \ + -DROCKSDB_BUILD_SHARED=OFF \ + -DWITH_LZ4=ON \ + -DWITH_GFLAGS=OFF \ + -DUSE_RTTI=1 \ + -DPORTABLE=1 \ + -DWITH_ALL_TESTS=OFF \ + -DWITH_BENCHMARK_TOOLS=OFF \ + -DWITH_TOOLS=OFF .. + make -j"${CORES}" install + + +} + +pkg_check() { + : +} diff --git a/scripts/profiles/0.9.6-dev/install/syscall_intercept.install b/scripts/profiles/0.9.6-dev/install/syscall_intercept.install new file mode 100644 index 0000000000000000000000000000000000000000..ad3969c467c271a77fb93c875b54a3358f3edd84 --- /dev/null +++ b/scripts/profiles/0.9.6-dev/install/syscall_intercept.install @@ -0,0 +1,78 @@ +################################################################################ +# Copyright 2018-2025, Barcelona Supercomputing Center (BSC), Spain # +# Copyright 2015-2025, Johannes Gutenberg Universitaet Mainz, Germany # +# # +# This software was partially supported by the # +# EC H2020 funded project NEXTGenIO (Project ID: 671951, www.nextgenio.eu). # +# # +# This software was partially supported by the # +# ADA-FS project under the SPPEXA project funded by the DFG. # +# # +# This file is part of GekkoFS. # +# # +# GekkoFS is free software: you can redistribute it and/or modify # +# it under the terms of the GNU General Public License as published by # +# the Free Software Foundation, either version 3 of the License, or # +# (at your option) any later version. # +# # +# GekkoFS is distributed in the hope that it will be useful, # +# but WITHOUT ANY WARRANTY; without even the implied warranty of # +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # +# GNU General Public License for more details. # +# # +# You should have received a copy of the GNU General Public License # +# along with GekkoFS. If not, see . # +# # +# SPDX-License-Identifier: GPL-3.0-or-later # +################################################################################ +# vi: ft=bash + +################################################################################ +## The installation script must define both a pkg_install function and +## pkg_check function that, as their name implies, must specify how +## a dependency package should be installed and tested. ## ## The following +## variables can be used in the installation script: +## - CMAKE: a variable that expands to the cmake binary +## - SOURCE_DIR: the directory where the sources for the package were +## downloaded +## - INSTALL_DIR: the directory where the package should be installed +## - CORES: the number of cores to use when building +## - COMPILER_NAME: the name of the compiler being used (e.g. g++, clang, etc.) +## - COMPILER_FULL_VERSION: the compiler's full version (e.g. 9.3.0) +## - COMPILER_MAJOR_VERSION: the compiler's major version (e.g. 9) +## - PERFORM_TEST: whether tests for the package should be executed +################################################################################ + +pkg_install() { + ID="syscall_intercept" + CURR="${SOURCE_DIR}/${ID}" + EXTRA_INSTALL_ARGS="${PROFILE_EXTRA_INSTALL_ARGS[${ID}]}" + prepare_build_dir "${CURR}" + + if [[ ${EXTRA_INSTALL_ARGS} == "ARM64" ]]; then + cd "${CURR}"/arch/aarch64/ + mkdir -p build + cd build + else + cd "${CURR}"/build + fi + $CMAKE -DCMAKE_PREFIX_PATH="${INSTALL_DIR}" \ + -DCMAKE_INSTALL_PREFIX="${INSTALL_DIR}" \ + -DCMAKE_BUILD_TYPE:STRING=Debug \ + -DBUILD_EXAMPLES:BOOL=OFF \ + -DSTATIC_CAPSTONE:BOOL=OFF \ + -DBUILD_TESTS:BOOK=OFF -DCMAKE_POLICY_VERSION_MINIMUM=3.5 .. + make -j"${CORES}" install + if [[ ${EXTRA_INSTALL_ARGS} == "ARM64" ]]; then + cp "${CURR}"/arch/aarch64/include/libsyscall_intercept_hook_point.h ${INSTALL_DIR}/include + fi + + # patch for riscv syscall_intercept version to fit other implementations + # it replaces line 91 only if there is the struct wrapper_ret of riscv + sed -i '89s/struct wrapper_ret/long/g' \ + ${INSTALL_DIR}/include/libsyscall_intercept_hook_point.h +} + +pkg_check() { + : +} diff --git a/scripts/profiles/0.9.6-dev/install/ucx.install b/scripts/profiles/0.9.6-dev/install/ucx.install new file mode 100644 index 0000000000000000000000000000000000000000..fd90d6d3f62dd7552becf46bb02ee8c9fdc7a82c --- /dev/null +++ b/scripts/profiles/0.9.6-dev/install/ucx.install @@ -0,0 +1,61 @@ +################################################################################ +# Copyright 2018-2022, Barcelona Supercomputing Center (BSC), Spain # +# Copyright 2015-2022, Johannes Gutenberg Universitaet Mainz, Germany # +# # +# This software was partially supported by the # +# EC H2020 funded project NEXTGenIO (Project ID: 671951, www.nextgenio.eu). # +# # +# This software was partially supported by the # +# ADA-FS project under the SPPEXA project funded by the DFG. # +# # +# This file is part of GekkoFS. # +# # +# GekkoFS is free software: you can redistribute it and/or modify # +# it under the terms of the GNU General Public License as published by # +# the Free Software Foundation, either version 3 of the License, or # +# (at your option) any later version. # +# # +# GekkoFS is distributed in the hope that it will be useful, # +# but WITHOUT ANY WARRANTY; without even the implied warranty of # +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # +# GNU General Public License for more details. # +# # +# You should have received a copy of the GNU General Public License # +# along with GekkoFS. If not, see . # +# # +# SPDX-License-Identifier: GPL-3.0-or-later # +################################################################################ +# vi: ft=bash + +################################################################################ +## The installation script must define both a pkg_install function and +## pkg_check function that, as their name implies, must specify how +## a dependency package should be installed and tested. ## ## The following +## variables can be used in the installation script: +## - CMAKE: a variable that expands to the cmake binary +## - SOURCE_DIR: the directory where the sources for the package were +## downloaded +## - INSTALL_DIR: the directory where the package should be installed +## - CORES: the number of cores to use when building +## - COMPILER_NAME: the name of the compiler being used (e.g. g++, clang, etc.) +## - COMPILER_FULL_VERSION: the compiler's full version (e.g. 9.3.0) +## - COMPILER_MAJOR_VERSION: the compiler's major version (e.g. 9) +## - PERFORM_TEST: whether tests for the package should be executed +################################################################################ + +pkg_install() { + + ID="ucx" + CURR="${SOURCE_DIR}/${ID}" + prepare_build_dir "${CURR}" + cd "${CURR}" + ./autogen.sh + cd "${CURR}/build" + ../contrib/configure-release --prefix=${INSTALL_DIR} + make -j"${CORES}" + make install +} + +pkg_check() { + : +} diff --git a/scripts/profiles/0.9.6-dev/install/zstd.install b/scripts/profiles/0.9.6-dev/install/zstd.install new file mode 100644 index 0000000000000000000000000000000000000000..0152130593884aeb2bd6ed34d8ba4f0e80f11977 --- /dev/null +++ b/scripts/profiles/0.9.6-dev/install/zstd.install @@ -0,0 +1,70 @@ +################################################################################ +# Copyright 2018-2025, Barcelona Supercomputing Center (BSC), Spain # +# Copyright 2015-2025, Johannes Gutenberg Universitaet Mainz, Germany # +# # +# This software was partially supported by the # +# EC H2020 funded project NEXTGenIO (Project ID: 671951, www.nextgenio.eu). # +# # +# This software was partially supported by the # +# ADA-FS project under the SPPEXA project funded by the DFG. # +# # +# This file is part of GekkoFS. # +# # +# GekkoFS is free software: you can redistribute it and/or modify # +# it under the terms of the GNU General Public License as published by # +# the Free Software Foundation, either version 3 of the License, or # +# (at your option) any later version. # +# # +# GekkoFS is distributed in the hope that it will be useful, # +# but WITHOUT ANY WARRANTY; without even the implied warranty of # +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # +# GNU General Public License for more details. # +# # +# You should have received a copy of the GNU General Public License # +# along with GekkoFS. If not, see . # +# # +# SPDX-License-Identifier: GPL-3.0-or-later # +################################################################################ +# vi: ft=bash + +################################################################################ +## The installation script must define both a pkg_install function and +## pkg_check function that, as their name implies, must specify how +## a dependency package should be installed and tested. ## ## The following +## variables can be used in the installation script: +## - CMAKE: a variable that expands to the cmake binary +## - SOURCE_DIR: the directory where the sources for the package were +## downloaded +## - INSTALL_DIR: the directory where the package should be installed +## - CORES: the number of cores to use when building +## - COMPILER_NAME: the name of the compiler being used (e.g. g++, clang, etc.) +## - COMPILER_FULL_VERSION: the compiler's full version (e.g. 9.3.0) +## - COMPILER_MAJOR_VERSION: the compiler's major version (e.g. 9) +## - PERFORM_TEST: whether tests for the package should be executed +################################################################################ + +pkg_install() { + ID="zstd" + CURR="${SOURCE_DIR}/${ID}/build/cmake" + prepare_build_dir "${CURR}" + cd "${CURR}/build" + + cmake -DCMAKE_INSTALL_PREFIX="${INSTALL_DIR}" \ + -DCMAKE_BUILD_TYPE=Release \ + -DCMAKE_PREFIX_PATH="${INSTALL_DIR}" \ + -DZSTD_BUILD_PROGRAMS=OFF \ + -DZSTD_BUILD_CONTRIB=OFF \ + -DZSTD_BUILD_TESTS=OFF \ + -DZSTD_LEGACY_SUPPORT=OFF \ + -DCMAKE_POSITION_INDEPENDENT_CODE=ON .. + + make -j"${CORES}" + make DESTDIR="${INSTALL_DIR}" PREFIX="" install + + + +} + +pkg_check() { + : +} diff --git a/scripts/profiles/0.9.6-dev/marenostrum4.specs b/scripts/profiles/0.9.6-dev/marenostrum4.specs new file mode 100644 index 0000000000000000000000000000000000000000..1b72ae749526113628b675262147dd8e50e57ee5 --- /dev/null +++ b/scripts/profiles/0.9.6-dev/marenostrum4.specs @@ -0,0 +1,79 @@ +################################################################################ +# Copyright 2018-2025, Barcelona Supercomputing Center (BSC), Spain # +# Copyright 2015-2025, Johannes Gutenberg Universitaet Mainz, Germany # +# # +# This software was partially supported by the # +# EC H2020 funded project NEXTGenIO (Project ID: 671951, www.nextgenio.eu). # +# # +# This software was partially supported by the # +# ADA-FS project under the SPPEXA project funded by the DFG. # +# # +# This file is part of GekkoFS. # +# # +# GekkoFS is free software: you can redistribute it and/or modify # +# it under the terms of the GNU General Public License as published by # +# the Free Software Foundation, either version 3 of the License, or # +# (at your option) any later version. # +# # +# GekkoFS is distributed in the hope that it will be useful, # +# but WITHOUT ANY WARRANTY; without even the implied warranty of # +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # +# GNU General Public License for more details. # +# # +# You should have received a copy of the GNU General Public License # +# along with GekkoFS. If not, see . # +# # +# SPDX-License-Identifier: GPL-3.0-or-later # +################################################################################ + +# vi: ft=bash + +# Variables to be imported into the scripts +declare -A wgetdeps clonedeps clonedeps_args clonedeps_patches extra_install_args +declare -a order + +# Comment that should be displayed when printing the profile +comment="Dependencies for Marenostrum 4 supercomputer" + +# Dependencies that must be downloaded directly +wgetdeps=( + ["zstd"]="1.5.7" + ["lz4"]="1.9.4" + ["capstone"]="6.0.0-Alpha1" + ["argobots"]="1.2" + ["rocksdb"]="8.10.0" + ["json-c"]="0.17-20230812" + ["psm2"]="11.2.185" +) + +# Dependencies that must be cloned +clonedeps=( + ["libfabric"]="HEAD@v1.20.1" + ["mercury"]="v2.4.0" + ["margo"]="v0.18.3" + ["syscall_intercept"]="d8b2a69961921ed123625c79a609331fc56a8931" + ["date"]="e7e1482087f58913b80a20b04d5c58d9d6d90155" + ["parallax"]="c130decd7a71c60c20b98d6a23924f05f754c3cd" +) + +# Extra arguments for git clone +clonedeps_args=( + ["mercury"]="--recurse-submodules" +) + +# Ordering that MUST be followed when downloading +order=( + "lz4" "zstd" "capstone" "json-c" "psm2" "libfabric" "mercury" "argobots" "margo" "rocksdb" "syscall_intercept" "date" "parallax" +) + +# Extra arguments passed to the installation script. As such, they can +# reference the following variables: +# - CMAKE: a variable that expands to the cmake binary +# - SOURCE_DIR: the directory where the sources for the package were +# downloaded +# - INSTALL_DIR: the directory where the package should be installed +# - CORES: the number of cores to use when building +# - PERFORM_TEST: whether tests for the package should be executed +extra_install_args=( + ["libfabric"]="--enable-psm2=no --enable-sockets=yes" +) diff --git a/scripts/profiles/0.9.6-dev/mogon2.specs b/scripts/profiles/0.9.6-dev/mogon2.specs new file mode 100644 index 0000000000000000000000000000000000000000..3a127ba391b4d5cd0669c785ab140a1f5e649a8f --- /dev/null +++ b/scripts/profiles/0.9.6-dev/mogon2.specs @@ -0,0 +1,79 @@ +################################################################################ +# Copyright 2018-2025, Barcelona Supercomputing Center (BSC), Spain # +# Copyright 2015-2025, Johannes Gutenberg Universitaet Mainz, Germany # +# # +# This software was partially supported by the # +# EC H2020 funded project NEXTGenIO (Project ID: 671951, www.nextgenio.eu). # +# # +# This software was partially supported by the # +# ADA-FS project under the SPPEXA project funded by the DFG. # +# # +# This file is part of GekkoFS. # +# # +# GekkoFS is free software: you can redistribute it and/or modify # +# it under the terms of the GNU General Public License as published by # +# the Free Software Foundation, either version 3 of the License, or # +# (at your option) any later version. # +# # +# GekkoFS is distributed in the hope that it will be useful, # +# but WITHOUT ANY WARRANTY; without even the implied warranty of # +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # +# GNU General Public License for more details. # +# # +# You should have received a copy of the GNU General Public License # +# along with GekkoFS. If not, see . # +# # +# SPDX-License-Identifier: GPL-3.0-or-later # +################################################################################ + +# vi: ft=bash + +# Variables to be imported into the scripts +declare -A wgetdeps clonedeps clonedeps_args clonedeps_patches extra_install_args +declare -a order + +# Comment that should be displayed when printing the profile +comment="Dependencies for Mogon 2 supercomputer" + +# Dependencies that must be downloaded directly +wgetdeps=( + ["zstd"]="1.5.7" + ["lz4"]="1.9.4" + ["capstone"]="6.0.0-Alpha1" + ["argobots"]="1.2" + ["rocksdb"]="10.4.2" + ["psm2"]="11.2.185" + ["json-c"]="0.17-20230812" +) + +# Dependencies that must be cloned +clonedeps=( + ["libfabric"]="HEAD@v2.2.0" + ["mercury"]="v2.4.1rc1" + ["margo"]="v0.18.3" + ["syscall_intercept"]="d8b2a69961921ed123625c79a609331fc56a8931" + ["date"]="e7e1482087f58913b80a20b04d5c58d9d6d90155" +) + +# Extra arguments for git clone +clonedeps_args=( + ["mercury"]="--recurse-submodules" +) + +# Ordering that MUST be followed when downloading +order=( + "lz4" "zstd" "capstone" "json-c" "libfabric" "mercury" "argobots" "margo" "rocksdb" "syscall_intercept" "date" "psm2" +) + +# Extra arguments passed to the installation script. As such, they can +# reference the following variables: +# - CMAKE: a variable that expands to the cmake binary +# - SOURCE_DIR: the directory where the sources for the package were +# downloaded +# - INSTALL_DIR: the directory where the package should be installed +# - CORES: the number of cores to use when building +# - PERFORM_TEST: whether tests for the package should be executed +extra_install_args=( +# ["libfabric"]="--enable-psm2=yes --enable-opx=yes" + ["libfabric"]="--enable-psm2=yes --with-psm2-src=${SOURCE_DIR}/psm2" +) diff --git a/scripts/profiles/0.9.6-dev/mogon3.specs b/scripts/profiles/0.9.6-dev/mogon3.specs new file mode 100644 index 0000000000000000000000000000000000000000..237f40cae9a19587f4fb836d3cacc934d638909f --- /dev/null +++ b/scripts/profiles/0.9.6-dev/mogon3.specs @@ -0,0 +1,78 @@ +################################################################################ +# Copyright 2018-2025, Barcelona Supercomputing Center (BSC), Spain # +# Copyright 2015-2025, Johannes Gutenberg Universitaet Mainz, Germany # +# # +# This software was partially supported by the # +# EC H2020 funded project NEXTGenIO (Project ID: 671951, www.nextgenio.eu). # +# # +# This software was partially supported by the # +# ADA-FS project under the SPPEXA project funded by the DFG. # +# # +# This file is part of GekkoFS. # +# # +# GekkoFS is free software: you can redistribute it and/or modify # +# it under the terms of the GNU General Public License as published by # +# the Free Software Foundation, either version 3 of the License, or # +# (at your option) any later version. # +# # +# GekkoFS is distributed in the hope that it will be useful, # +# but WITHOUT ANY WARRANTY; without even the implied warranty of # +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # +# GNU General Public License for more details. # +# # +# You should have received a copy of the GNU General Public License # +# along with GekkoFS. If not, see . # +# # +# SPDX-License-Identifier: GPL-3.0-or-later # +################################################################################ + +# vi: ft=bash + +# Variables to be imported into the scripts +declare -A wgetdeps clonedeps clonedeps_args clonedeps_patches extra_install_args +declare -a order + +# Comment that should be displayed when printing the profile +comment="Dependencies for Mogon 2 supercomputer" + +# Dependencies that must be downloaded directly +wgetdeps=( + ["zstd"]="1.5.7" + ["lz4"]="1.9.4" + ["capstone"]="6.0.0-Alpha1" + ["argobots"]="1.2" + ["rocksdb"]="10.4.2" + ["json-c"]="0.17-20230812" + ["libzmq"]="4.3.5" + ["cppzmq"]="4.10.0" +) + +# Dependencies that must be cloned +clonedeps=( +# ["libfabric"]="HEAD@v1.20.1" + ["mercury"]="v2.4.1rc1" + ["margo"]="v0.18.3" + ["syscall_intercept"]="d8b2a69961921ed123625c79a609331fc56a8931" +) + +# Extra arguments for git clone +clonedeps_args=( + ["mercury"]="--recurse-submodules" +) + +# Ordering that MUST be followed when downloading +order=( + "lz4" "zstd" "capstone" "json-c" "mercury" "argobots" "margo" "rocksdb" "syscall_intercept" "libzmq" "cppzmq" +) + +# Extra arguments passed to the installation script. As such, they can +# reference the following variables: +# - CMAKE: a variable that expands to the cmake binary +# - SOURCE_DIR: the directory where the sources for the package were +# downloaded +# - INSTALL_DIR: the directory where the package should be installed +# - CORES: the number of cores to use when building +# - PERFORM_TEST: whether tests for the package should be executed +extra_install_args=( +# ["libfabric"]="" +) diff --git a/scripts/profiles/0.9.6-dev/ngio.specs b/scripts/profiles/0.9.6-dev/ngio.specs new file mode 100644 index 0000000000000000000000000000000000000000..5ce5cbbfc89d53c4dcebdf6f0f10b43147a622c2 --- /dev/null +++ b/scripts/profiles/0.9.6-dev/ngio.specs @@ -0,0 +1,79 @@ +################################################################################ +# Copyright 2018-2025, Barcelona Supercomputing Center (BSC), Spain # +# Copyright 2015-2025, Johannes Gutenberg Universitaet Mainz, Germany # +# # +# This software was partially supported by the # +# EC H2020 funded project NEXTGenIO (Project ID: 671951, www.nextgenio.eu). # +# # +# This software was partially supported by the # +# ADA-FS project under the SPPEXA project funded by the DFG. # +# # +# This file is part of GekkoFS. # +# # +# GekkoFS is free software: you can redistribute it and/or modify # +# it under the terms of the GNU General Public License as published by # +# the Free Software Foundation, either version 3 of the License, or # +# (at your option) any later version. # +# # +# GekkoFS is distributed in the hope that it will be useful, # +# but WITHOUT ANY WARRANTY; without even the implied warranty of # +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # +# GNU General Public License for more details. # +# # +# You should have received a copy of the GNU General Public License # +# along with GekkoFS. If not, see . # +# # +# SPDX-License-Identifier: GPL-3.0-or-later # +################################################################################ + +# vi: ft=bash + +# Variables to be imported into the scripts +declare -A wgetdeps clonedeps clonedeps_args clonedeps_patches extra_install_args +declare -a order + +# Comment that should be displayed when printing the profile +comment="Dependencies for NEXTGenIO prototype cluster" + +# Dependencies that must be downloaded directly +wgetdeps=( + ["zstd"]="1.5.7" + ["lz4"]="1.9.4" + ["capstone"]="6.0.0-Alpha1" + ["argobots"]="1.2" + ["rocksdb"]="10.4.2" + ["json-c"]="0.17-20230812" + ["psm2"]="11.2.185" +) + +# Dependencies that must be cloned +clonedeps=( + ["libfabric"]="HEAD@v2.2.0" + ["mercury"]="v2.4.1rc1" + ["margo"]="v0.18.3" + ["syscall_intercept"]="d8b2a69961921ed123625c79a609331fc56a8931" + ["date"]="e7e1482087f58913b80a20b04d5c58d9d6d90155" + ["parallax"]="c130decd7a71c60c20b98d6a23924f05f754c3cd" +) + +# Extra arguments for git clone +clonedeps_args=( + ["mercury"]="--recurse-submodules" +) + +# Ordering that MUST be followed when downloading +order=( + "lz4" "zstd" "capstone" "json-c" "psm2" "libfabric" "mercury" "argobots" "margo" "rocksdb" "syscall_intercept" "date" "parallax" +) + +# Extra arguments passed to the installation script. As such, they can +# reference the following variables: +# - CMAKE: a variable that expands to the cmake binary +# - SOURCE_DIR: the directory where the sources for the package were +# downloaded +# - INSTALL_DIR: the directory where the package should be installed +# - CORES: the number of cores to use when building +# - PERFORM_TEST: whether tests for the package should be executed +extra_install_args=( + ["libfabric"]="--enable-psm2=yes --with-psm2-src=${SOURCE_DIR}/psm2" +) diff --git a/scripts/profiles/0.9.6-dev/omnipath_psm2.specs b/scripts/profiles/0.9.6-dev/omnipath_psm2.specs new file mode 100644 index 0000000000000000000000000000000000000000..ed11895e13e80778a507c58f801b787a95bffb68 --- /dev/null +++ b/scripts/profiles/0.9.6-dev/omnipath_psm2.specs @@ -0,0 +1,78 @@ +################################################################################ +# Copyright 2018-2025, Barcelona Supercomputing Center (BSC), Spain # +# Copyright 2015-2025, Johannes Gutenberg Universitaet Mainz, Germany # +# # +# This software was partially supported by the # +# EC H2020 funded project NEXTGenIO (Project ID: 671951, www.nextgenio.eu). # +# # +# This software was partially supported by the # +# ADA-FS project under the SPPEXA project funded by the DFG. # +# # +# This file is part of GekkoFS. # +# # +# GekkoFS is free software: you can redistribute it and/or modify # +# it under the terms of the GNU General Public License as published by # +# the Free Software Foundation, either version 3 of the License, or # +# (at your option) any later version. # +# # +# GekkoFS is distributed in the hope that it will be useful, # +# but WITHOUT ANY WARRANTY; without even the implied warranty of # +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # +# GNU General Public License for more details. # +# # +# You should have received a copy of the GNU General Public License # +# along with GekkoFS. If not, see . # +# # +# SPDX-License-Identifier: GPL-3.0-or-later # +################################################################################ + +# vi: ft=bash + +# Variables to be imported into the scripts +declare -A wgetdeps clonedeps clonedeps_args clonedeps_patches extra_install_args +declare -a order + +# Comment that should be displayed when printing the profile +comment="Dependencies for Omnipath supercomputer" + +# Dependencies that must be downloaded directly +wgetdeps=( + ["zstd"]="1.5.7" + ["lz4"]="1.9.4" + ["capstone"]="6.0.0-Alpha1" + ["argobots"]="1.2" + ["rocksdb"]="10.4.2" + ["json-c"]="0.17-20230812" + ["psm2"]="11.2.185" +) + +# Dependencies that must be cloned +clonedeps=( + ["libfabric"]="HEAD@v2.2.0" + ["mercury"]="v2.4.1rc1" + ["margo"]="v0.18.3" + ["syscall_intercept"]="d8b2a69961921ed123625c79a609331fc56a8931" + ["date"]="e7e1482087f58913b80a20b04d5c58d9d6d90155" +) + +# Extra arguments for git clone +clonedeps_args=( + ["mercury"]="--recurse-submodules" +) + +# Ordering that MUST be followed when downloading +order=( + "lz4" "zstd" "capstone" "json-c" "psm2" "libfabric" "mercury" "argobots" "margo" "rocksdb" "syscall_intercept" "date" +) + +# Extra arguments passed to the installation script. As such, they can +# reference the following variables: +# - CMAKE: a variable that expands to the cmake binary +# - SOURCE_DIR: the directory where the sources for the package were +# downloaded +# - INSTALL_DIR: the directory where the package should be installed +# - CORES: the number of cores to use when building +# - PERFORM_TEST: whether tests for the package should be executed +extra_install_args=( + ["libfabric"]="--enable-psm2=yes --with-psm2-src=${SOURCE_DIR}/psm2" +) diff --git a/scripts/profiles/0.9.6-dev/p9.specs b/scripts/profiles/0.9.6-dev/p9.specs new file mode 100644 index 0000000000000000000000000000000000000000..060296b9a906887a34f5e6f80c089334527d45eb --- /dev/null +++ b/scripts/profiles/0.9.6-dev/p9.specs @@ -0,0 +1,83 @@ +################################################################################ +# Copyright 2018-2025, Barcelona Supercomputing Center (BSC), Spain # +# Copyright 2015-2025, Johannes Gutenberg Universitaet Mainz, Germany # +# # +# This software was partially supported by the # +# EC H2020 funded project NEXTGenIO (Project ID: 671951, www.nextgenio.eu). # +# # +# This software was partially supported by the # +# ADA-FS project under the SPPEXA project funded by the DFG. # +# # +# This file is part of GekkoFS. # +# # +# GekkoFS is free software: you can redistribute it and/or modify # +# it under the terms of the GNU General Public License as published by # +# the Free Software Foundation, either version 3 of the License, or # +# (at your option) any later version. # +# # +# GekkoFS is distributed in the hope that it will be useful, # +# but WITHOUT ANY WARRANTY; without even the implied warranty of # +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # +# GNU General Public License for more details. # +# # +# You should have received a copy of the GNU General Public License # +# along with GekkoFS. If not, see . # +# # +# SPDX-License-Identifier: GPL-3.0-or-later # +################################################################################ + +# vi: ft=bash + +# Variables to be imported into the scripts +declare -A wgetdeps clonedeps clonedeps_args clonedeps_patches extra_install_args +declare -a order + +# Comment that should be displayed when printing the profile +comment="Dependencies for PowerPC supercomputer" + +# Dependencies that must be downloaded directly +wgetdeps=( + ["zstd"]="1.5.7" + ["lz4"]="1.9.4" + ["capstone"]="6.0.0-Alpha1" + ["argobots"]="1.2" + ["rocksdb"]="8.10.0" + ["json-c"]="0.17-20230812" + ["psm2"]="11.2.185" +) + +# Dependencies that must be cloned +clonedeps=( + ["libfabric"]="HEAD@v1.20.1" + ["mercury"]="v2.4.0" + ["margo"]="v0.18.3" + ["syscall_intercept"]="6eb27a9d2053bb2ac3bb9ce30e13b64ce055c19f" + ["date"]="e7e1482087f58913b80a20b04d5c58d9d6d90155" +) + +# Extra arguments for git clone +clonedeps_args=( + ["mercury"]="--recurse-submodules" +) + +# Patches that should be applied post-clone +clonedeps_patches=( + ["syscall_intercept"]="syscall_intercept.patch" +) + +# Ordering that MUST be followed when downloading +order=( + "lz4" "zstd" "capstone" "json-c" "psm2" "libfabric" "mercury" "argobots" "margo" "rocksdb" "syscall_intercept" "date" +) + +# Extra arguments passed to the installation script. As such, they can +# reference the following variables: +# - CMAKE: a variable that expands to the cmake binary +# - SOURCE_DIR: the directory where the sources for the package were +# downloaded +# - INSTALL_DIR: the directory where the package should be installed +# - CORES: the number of cores to use when building +# - PERFORM_TEST: whether tests for the package should be executed +extra_install_args=( + ["libfabric"]="--enable-psm2=no --enable-sockets=yes" +) diff --git a/scripts/profiles/0.9.6-dev/riscv.specs b/scripts/profiles/0.9.6-dev/riscv.specs new file mode 100644 index 0000000000000000000000000000000000000000..1f601171a7a4571e008d698d52fc68b9e21ad927 --- /dev/null +++ b/scripts/profiles/0.9.6-dev/riscv.specs @@ -0,0 +1,75 @@ +################################################################################ +# Copyright 2018-2025, Barcelona Supercomputing Center (BSC), Spain # +# Copyright 2015-2025, Johannes Gutenberg Universitaet Mainz, Germany # +# # +# This software was partially supported by the # +# EC H2020 funded project NEXTGenIO (Project ID: 671951, www.nextgenio.eu). # +# # +# This software was partially supported by the # +# ADA-FS project under the SPPEXA project funded by the DFG. # +# # +# This file is part of GekkoFS. # +# # +# GekkoFS is free software: you can redistribute it and/or modify # +# it under the terms of the GNU General Public License as published by # +# the Free Software Foundation, either version 3 of the License, or # +# (at your option) any later version. # +# # +# GekkoFS is distributed in the hope that it will be useful, # +# but WITHOUT ANY WARRANTY; without even the implied warranty of # +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # +# GNU General Public License for more details. # +# # +# You should have received a copy of the GNU General Public License # +# along with GekkoFS. If not, see . # +# # +# SPDX-License-Identifier: GPL-3.0-or-later # +################################################################################ + +# vi: ft=bash + +# Variables to be imported into the scripts +declare -A wgetdeps clonedeps clonedeps_args clonedeps_patches extra_install_args +declare -a order + +# Comment that should be displayed when printing the profile +comment="All dependencies" + +# Dependencies that must be downloaded directly +wgetdeps=( + ["zstd"]="1.5.7" + ["lz4"]="1.9.4" + ["capstone"]="6.0.0-Alpha2" + ["argobots"]="1.2" + ["rocksdb"]="8.10.0" + ["json-c"]="0.17-20230812" +) + +# Dependencies that must be cloned. +clonedeps=( + ["libfabric"]="HEAD@v1.20.1" + ["mercury"]="v2.4.0" + ["margo"]="v0.18.3" + ["syscall_intercept"]="fcc6d7bd4a62c2fa0419ad65ecd8d5ed95b99f63" +) + +# Extra arguments for git clone +clonedeps_args=( + ["mercury"]="--recurse-submodules" +) + +# Ordering that MUST be followed when downloading +order=( + "lz4" "zstd" "capstone" "json-c" "libfabric" "mercury" "argobots" "margo" "rocksdb" "syscall_intercept" +) + +# Extra arguments passed to the installation script. As such, they can +# reference the following variables: +# - CMAKE: a variable that expands to the cmake binary +# - SOURCE_DIR: the directory where the sources for the package were +# downloaded +# - INSTALL_DIR: the directory where the package should be installed +# - CORES: the number of cores to use when building +# - PERFORM_TEST: whether tests for the package should be executed +extra_install_args=( +) diff --git a/scripts/profiles/latest b/scripts/profiles/latest index 03834411d1529ea7337b4a2c2a4a34b689d2a54e..7ed1cca5bae0f0e116f49fcfbe0a93a06bb95ca5 120000 --- a/scripts/profiles/latest +++ b/scripts/profiles/latest @@ -1 +1 @@ -0.9.5 \ No newline at end of file +0.9.6-dev \ No newline at end of file diff --git a/src/client/CMakeLists.txt b/src/client/CMakeLists.txt index a7d0247f5f6065f16651f2d2cf7affee9d017233..7fa6272f8263d41eb55b22d43a12e7b3a24a09e9 100644 --- a/src/client/CMakeLists.txt +++ b/src/client/CMakeLists.txt @@ -61,6 +61,7 @@ target_link_libraries( fmt::fmt Threads::Threads Microsoft.GSL::GSL + ZStd::ZStd ) add_library(gkfs_intercept SHARED) diff --git a/src/client/gkfs_functions.cpp b/src/client/gkfs_functions.cpp index 95405e0cc1cd2b1134f2bc6730bf80f12355df14..962dee8ec856c0481b2f14015868a37ce9ecfdf9 100644 --- a/src/client/gkfs_functions.cpp +++ b/src/client/gkfs_functions.cpp @@ -1478,6 +1478,8 @@ gkfs_opendir(const std::string& path) { "{}() Sending async dirents for path '{}' to '{}' daemons ...", __func__, path, CTX->hosts().size()); // Launch RPC calls asynchronously + // We need to filter the results from the dentry cache as + // forward_get_dirents_single gathers all the files for(uint64_t i = 0; i < CTX->hosts().size(); i++) { dcache_futures.push_back(std::async(std::launch::async, [&, i]() { if(gkfs::config::proxy::fwd_get_dirents_single && @@ -1502,6 +1504,11 @@ gkfs_opendir(const std::string& path) { get<3>(dentry)); auto ftype = get<1>(dentry) ? gkfs::filemap::FileType::directory : gkfs::filemap::FileType::regular; + // if the name includes a / skip it (as it belongs to a + // subdirectory + if(get<0>(dentry).find('/') != std::string::npos) { + continue; + } // filename, is_dir, size, ctime ret.second->add(get<0>(dentry), ftype); CTX->dentry_cache()->insert( @@ -1561,6 +1568,7 @@ gkfs_rmdir(const std::string& path) { errno = ENOTEMPTY; return -1; } + #endif if(gkfs::config::proxy::fwd_remove && CTX->use_proxy()) { err = gkfs::rpc::forward_remove_proxy(path, true); @@ -1989,14 +1997,36 @@ gkfs_munmap(void* addr, size_t length) { } // namespace gkfs::syscall -/* This function defines an extension of the dirents prepared to do a - * find-like function The function only sends the request to the specified - * server +/** + * Retrieves all directory entries for a given path from a single server. + * + * This function allocates the memory required to hold all directory entries. + * The caller is responsible for freeing this memory using free() when it is + * no longer needed. + * + * @param path The directory path to query. + * @param dirp A pointer to a 'struct dirent_extended*' that will be updated + * to point to the newly allocated buffer. On success, this will + * not be NULL. On failure or if the directory is empty, it will + * be set to NULL. + * @param server The ID of the server to query. + * @return On success, returns the total number of bytes allocated and written. + * If the directory is empty, returns 0. + * On failure, returns -1 and sets errno appropriately. */ extern "C" int -gkfs_getsingleserverdir(const char* path, struct dirent_extended* dirp, - unsigned int count, int server) { +gkfs_getsingleserverdir(const char* path, struct dirent_extended** dirp, + int server) { + + // The user must provide a valid pointer-to-a-pointer. + if(dirp == nullptr) { + errno = EINVAL; + return -1; + } + *dirp = nullptr; + + // --- 2. Fetch Data from RPC (Unchanged) --- pair, bool, size_t, time_t>>>> ret{}; @@ -2013,25 +2043,38 @@ gkfs_getsingleserverdir(const char* path, struct dirent_extended* dirp, } auto& open_dir = *ret.second; + + if(open_dir.empty()) { + return 0; // Success, 0 bytes written, *dirp is already NULL. + } + + // --- 4. First Pass: Calculate Exact Total Size Required --- + size_t total_required_size = 0; + for(const auto& de : open_dir) { + total_required_size += + ALIGN(offsetof(struct dirent_extended, d_name) + + (get<0>(de)).size() + 1, // +1 for null terminator + sizeof(uint64_t)); + } + + // --- 5. Allocate Memory --- + // Use malloc because the C-style caller will use free(). + *dirp = static_cast(malloc(total_required_size)); + if(*dirp == nullptr) { + errno = ENOMEM; // Memory allocation failed + return -1; + } + + // --- 6. Second Pass: Populate the Newly Allocated Buffer --- + char* buffer_ptr = reinterpret_cast(*dirp); unsigned int pos = 0; - unsigned int written = 0; - struct dirent_extended* current_dirp = nullptr; - while(pos < open_dir.size()) { - auto de = open_dir[pos]; - /* - * Calculate the total dentry size within the 'dirent_extended` - * depending on the file name size. The size is then aligned to the - * size of `long` boundary. - */ + for(const auto& de : open_dir) { auto total_size = ALIGN(offsetof(struct dirent_extended, d_name) + (get<0>(de)).size() + 1, sizeof(uint64_t)); - if(total_size > (count - written)) { - // no enough space left on user buffer to insert next dirent - break; - } - current_dirp = reinterpret_cast( - reinterpret_cast(dirp) + written); + + struct dirent_extended* current_dirp = + reinterpret_cast(buffer_ptr); current_dirp->d_reclen = total_size; current_dirp->d_type = get<1>(de); @@ -2041,13 +2084,12 @@ gkfs_getsingleserverdir(const char* path, struct dirent_extended* dirp, LOG(DEBUG, "name {}: {} {} {} {} / size {}", pos, get<0>(de), get<1>(de), get<2>(de), get<3>(de), total_size); std::strcpy(&(current_dirp->d_name[0]), (get<0>(de)).c_str()); + + // Advance the buffer pointer for the next entry + buffer_ptr += total_size; ++pos; - written += total_size; } - if(written == 0) { - errno = EINVAL; - return -1; - } - return written; -} + // --- 7. Return the total size of the allocated buffer --- + return total_required_size; +} \ No newline at end of file diff --git a/src/client/rpc/forward_metadata.cpp b/src/client/rpc/forward_metadata.cpp index ad0882cd2b2842ccc496ba1b47ae1add0871e87d..6bd9203ea82d7ec4b06110ff424c3441ab828446 100644 --- a/src/client/rpc/forward_metadata.cpp +++ b/src/client/rpc/forward_metadata.cpp @@ -47,6 +47,7 @@ #include #include #include +#include using namespace std; @@ -663,8 +664,8 @@ forward_get_metadentry_size(const std::string& path, const int copy) { /** * Send an RPC request to receive all entries of a directory. - * @param open_dir - * @return error code + * @param path + * @return pair> */ pair> forward_get_dirents(const string& path) { @@ -674,14 +675,14 @@ forward_get_dirents(const string& path) { __func__); } - LOG(DEBUG, "{}() enter for path '{}'", __func__, path) + LOG(DEBUG, "{}() enter for path '{}'", __func__, path); auto const targets = CTX->distributor()->locate_directory_metadata(); /* preallocate receiving buffer. The actual size is not known yet. * * On C++14 make_unique function also zeroes the newly allocated buffer. - * It turns out that this operation is increadibly slow for such a big + * It turns out that this operation is incredibly slow for such a big * buffer. Moreover we don't need a zeroed buffer here. */ auto large_buffer = std::unique_ptr( @@ -739,27 +740,158 @@ forward_get_dirents(const string& path) { auto send_error = err != 0; auto open_dir = make_shared(path); + + // Add special files + open_dir->add(".", gkfs::filemap::FileType::directory); + open_dir->add("..", gkfs::filemap::FileType::directory); + + /** + * Helper lambda to deserialize the buffer received from the daemon. + * + * @param buffer_ptr Pointer to the buffer containing the data. + * @param num_entries_or_size If compression is on: Byte size of compressed + * data. If compression is off: Number of entries (count). + */ + auto deserialize_dirents = [&](void* buffer_ptr, + size_t num_entries_or_size) { + if(gkfs::config::rpc::use_dirents_compression) { + // --- Compressed path (AOS layout) --- + // In this mode, num_entries_or_size is the BYTE SIZE of the + // compressed data + size_t capacity = num_entries_or_size; + unsigned long long uncompressed_size = + ZSTD_getFrameContentSize(buffer_ptr, capacity); + LOG(DEBUG, + "{}() Zstd compressed dirents size: {}, uncompressed size: {}", + __func__, capacity, uncompressed_size); + + if(uncompressed_size == ZSTD_CONTENTSIZE_ERROR || + uncompressed_size == ZSTD_CONTENTSIZE_UNKNOWN) { + LOG(ERROR, "{}() Zstd error getting content size", __func__); + return; + } + + std::vector decomp(uncompressed_size); + size_t ret = ZSTD_decompress(decomp.data(), uncompressed_size, + buffer_ptr, capacity); + + if(ZSTD_isError(ret)) { + LOG(ERROR, "{}() Zstd decompression error: {}", __func__, + ZSTD_getErrorName(ret)); + return; + } + + char* ptr = decomp.data(); + char* end = ptr + uncompressed_size; + + while(ptr < end) { + // Format: [bool is_dir][null-term string name] + bool is_dir = *reinterpret_cast(ptr); + ptr += sizeof(bool); + + std::string name(ptr); + ptr += name.size() + 1; // Advance past name + \0 + open_dir->add(name, is_dir ? gkfs::filemap::FileType::directory + : gkfs::filemap::FileType::regular); + } + } else { + // --- Legacy uncompressed path (SOA layout) --- + // In this mode, num_entries_or_size is the COUNT of entries + size_t num_entries = num_entries_or_size; + + bool* bool_ptr = reinterpret_cast(buffer_ptr); + char* names_ptr = reinterpret_cast(buffer_ptr) + + (num_entries * sizeof(bool)); + + for(size_t j = 0; j < num_entries; j++) { + gkfs::filemap::FileType ftype = + (*bool_ptr) ? gkfs::filemap::FileType::directory + : gkfs::filemap::FileType::regular; + bool_ptr++; + + std::string name(names_ptr); + names_ptr += name.size() + 1; + + open_dir->add(name, ftype); + } + } + }; + // wait for RPC responses for(std::size_t i = 0; i < handles.size(); ++i) { gkfs::rpc::get_dirents::output out; try { - // XXX We might need a timeout here to not wait forever for an - // output that never comes? out = handles[i].get().at(0); + // skip processing dirent data if there was an error during send - // In this case all responses are gathered but their contents - // skipped if(send_error) continue; + // --- Retry Logic for ENOBUFS --- + if(out.err() == ENOBUFS) { + // The buffer was too small. The daemon returns the required + // size in dirents_size. + size_t required_size = out.dirents_size(); + LOG(DEBUG, + "{}() Buffer too small for host '{}'. Required: {}, Available: {}. Retrying...", + __func__, targets[i], required_size, per_host_buff_size); + + // Allocate exact size needed + auto retry_buf = + std::unique_ptr(new char[required_size]); + + // Expose new buffer for RMA + hermes::exposed_memory retry_exp; + try { + retry_exp = ld_network_service->expose( + std::vector{ + hermes::mutable_buffer{retry_buf.get(), + required_size}}, + hermes::access_mode::write_only); + } catch(const std::exception& ex) { + LOG(ERROR, "{}() Failed to expose retry buffer. err '{}'", + __func__, ex.what()); + err = EBUSY; + continue; + } + + // Resend RPC to the specific host + auto endp = CTX->hosts().at(targets[i]); + gkfs::rpc::get_dirents::input retry_in(path, retry_exp); + + try { + auto retry_out = ld_network_service + ->post( + endp, retry_in) + .get() + .at(0); + if(retry_out.err() != 0) { + LOG(ERROR, "{}() Retry failed on host '{}'. Error '{}'", + __func__, targets[i], strerror(retry_out.err())); + err = retry_out.err(); + continue; + } + + // Success on retry: deserialize data + deserialize_dirents(retry_buf.get(), + retry_out.dirents_size()); + + } catch(const std::exception& ex) { + LOG(ERROR, "{}() Retry RPC failed for host '{}'. err '{}'", + __func__, targets[i], ex.what()); + err = EBUSY; + } + continue; // Done with this host + } + + // Normal error check if(out.err() != 0) { LOG(ERROR, "{}() Failed to retrieve dir entries from host '{}'. Error '{}', path '{}'", __func__, targets[i], strerror(out.err()), path); err = out.err(); - // We need to gather all responses before exiting continue; } } catch(const std::exception& ex) { @@ -767,46 +899,100 @@ forward_get_dirents(const string& path) { "{}() Failed to get rpc output.. [path: {}, target host: {}] err '{}'", __func__, path, targets[i], ex.what()); err = EBUSY; - // We need to gather all responses before exiting continue; } - // each server wrote information to its pre-defined region in - // large_buffer, recover it by computing the base_address for each - // particular server and adding the appropriate offsets + // Standard success path (Initial buffer was large enough) assert(exposed_buffers[i].count() == 1); void* base_ptr = exposed_buffers[i].begin()->data(); - bool* bool_ptr = reinterpret_cast(base_ptr); - char* names_ptr = reinterpret_cast(base_ptr) + - (out.dirents_size() * sizeof(bool)); - // Add special files like an standard fs. - open_dir->add(".", gkfs::filemap::FileType::directory); - open_dir->add("..", gkfs::filemap::FileType::directory); - for(std::size_t j = 0; j < out.dirents_size(); j++) { - - gkfs::filemap::FileType ftype = - (*bool_ptr) ? gkfs::filemap::FileType::directory - : gkfs::filemap::FileType::regular; - bool_ptr++; - - // Check that we are not outside the recv_buff for this specific - // host - assert((names_ptr - reinterpret_cast(base_ptr)) > 0); - assert(static_cast( - names_ptr - reinterpret_cast(base_ptr)) < - per_host_buff_size); - - auto name = std::string(names_ptr); - // number of characters in entry + \0 terminator - names_ptr += name.size() + 1; - - open_dir->add(name, ftype); - } + deserialize_dirents(base_ptr, out.dirents_size()); } return make_pair(err, open_dir); } + +// This function takes the RPC output and the received buffer, and returns the +// parsed entries. +std::vector> +decompress_and_parse_entries(const gkfs::rpc::get_dirents_extended::output& out, + const void* compressed_buffer) { + if(out.err() != 0) { + throw std::runtime_error("Server returned an error: " + + std::to_string(out.err())); + } + if(out.dirents_size() == 0) { + return {}; // No entries, return empty vector + } + + const char* p = nullptr; + const char* end = nullptr; + std::vector decompressed_data; + + if(gkfs::config::rpc::use_dirents_compression) { + // === STEP 1: Discover the original size from the Zstd frame header === + const unsigned long long uncompressed_size = + ZSTD_getFrameContentSize(compressed_buffer, out.dirents_size()); + + if(uncompressed_size == ZSTD_CONTENTSIZE_ERROR) { + throw std::runtime_error( + "Received data is not a valid Zstd frame."); + } + if(uncompressed_size == ZSTD_CONTENTSIZE_UNKNOWN) { + throw std::runtime_error( + "Zstd frame content size is unknown and was not written in the frame."); + } + + // === STEP 2: Decompress the data into a new buffer === + decompressed_data.resize(uncompressed_size); + const size_t result_size = + ZSTD_decompress(decompressed_data.data(), uncompressed_size, + compressed_buffer, out.dirents_size()); + + if(ZSTD_isError(result_size)) { + throw std::runtime_error( + "Zstd decompression failed: " + + std::string(ZSTD_getErrorName(result_size))); + } + if(result_size != uncompressed_size) { + throw std::runtime_error("Decompression size mismatch."); + } + + p = decompressed_data.data(); + end = p + uncompressed_size; + } else { + // No compression: Data is raw in the input buffer + p = static_cast(compressed_buffer); + end = p + out.dirents_size(); + } + + // === STEP 3: Parse the data stream === + // AOS Layout: [bool is_dir][size_t size][time_t ctime][string name\0] + std::vector> entries; + + while(p < end) { + // Read is_dir + bool is_dir = *reinterpret_cast(p); + p += sizeof(bool); + + // Read file_size + size_t file_size = *reinterpret_cast(p); + p += sizeof(size_t); + + // Read ctime + time_t ctime = *reinterpret_cast(p); + p += sizeof(time_t); + + // Read name (which is null-terminated) + std::string name(p); + p += name.length() + 1; + + entries.emplace_back(name, is_dir, file_size, ctime); + } + + return entries; +} + /** * Send an RPC request to receive all entries of a directory in a server. * @param path @@ -827,119 +1013,103 @@ forward_get_dirents_single(const string& path, int server) { __func__); } - LOG(DEBUG, "{}() enter for path '{}'", __func__, path) + LOG(DEBUG, "{}() enter for path '{}', server '{}'", __func__, path, server); auto const targets = CTX->distributor()->locate_directory_metadata(); + if((unsigned) server >= targets.size()) { + LOG(ERROR, "{}() Invalid server index '{}' for targets size '{}'", + __func__, server, targets.size()); + return make_pair(EINVAL, nullptr); + } - /* preallocate receiving buffer. The actual size is not known yet. - * - * On C++14 make_unique function also zeroes the newly allocated buffer. - * It turns out that this operation is increadibly slow for such a big - * buffer. Moreover we don't need a zeroed buffer here. - */ - auto large_buffer = std::unique_ptr( - new char[gkfs::config::rpc::dirents_buff_size]); + size_t buffer_size = gkfs::config::rpc::dirents_buff_size; + auto large_buffer = std::unique_ptr(new char[buffer_size]); - // We use the full size per server... - const std::size_t per_host_buff_size = gkfs::config::rpc::dirents_buff_size; auto output_ptr = make_unique< vector>>(); + int err = 0; + const int max_retries = 2; // Prevent infinite loops - // expose local buffers for RMA from servers - std::vector exposed_buffers; - exposed_buffers.reserve(1); - std::size_t i = server; - try { - exposed_buffers.emplace_back(ld_network_service->expose( - std::vector{hermes::mutable_buffer{ - large_buffer.get(), per_host_buff_size}}, - hermes::access_mode::write_only)); - } catch(const std::exception& ex) { - LOG(ERROR, "{}() Failed to expose buffers for RMA. err '{}'", __func__, - ex.what()); - return make_pair(EBUSY, std::move(output_ptr)); - } - - auto err = 0; - // send RPCs - std::vector> handles; - - auto endp = CTX->hosts().at(targets[i]); + for(int attempt = 0; attempt < max_retries; ++attempt) { - gkfs::rpc::get_dirents_extended::input in(path, exposed_buffers[0]); + // Expose the current buffer for RMA. + // This needs to be done on each iteration because the buffer might be + // reallocated. + hermes::exposed_memory exposed_buffer; + try { + exposed_buffer = ld_network_service->expose( + std::vector{hermes::mutable_buffer{ + large_buffer.get(), buffer_size}}, + hermes::access_mode::write_only); + } catch(const std::exception& ex) { + LOG(ERROR, + "{}() Failed to expose buffers for RMA on attempt {}. err '{}'", + __func__, attempt, ex.what()); + return make_pair(EBUSY, nullptr); + } - try { - LOG(DEBUG, "{}() Sending RPC to host: '{}'", __func__, targets[i]); - handles.emplace_back( - ld_network_service->post(endp, - in)); - } catch(const std::exception& ex) { - LOG(ERROR, - "{}() Unable to send non-blocking get_dirents() on {} [peer: {}] err '{}'", - __func__, path, targets[i], ex.what()); - err = EBUSY; - } + auto endp = CTX->hosts().at(targets[server]); + gkfs::rpc::get_dirents_extended::input in(path, exposed_buffer); + gkfs::rpc::get_dirents_extended::output out; - LOG(DEBUG, - "{}() path '{}' send rpc_srv_get_dirents() rpc to '{}' targets. per_host_buff_size '{}' Waiting on reply next and deserialize", - __func__, path, targets.size(), per_host_buff_size); + try { + LOG(DEBUG, + "{}() Sending RPC to host '{}' (attempt {}, buffer size {})", + __func__, targets[server], attempt + 1, buffer_size); + + auto handle = + ld_network_service->post( + endp, in); + out = handle.get().at(0); + } catch(const std::exception& ex) { + LOG(ERROR, "{}() RPC post/get failed on attempt {}: {}", __func__, + attempt, ex.what()); + err = EBUSY; + break; // Fatal error, break the loop + } - // wait for RPC responses + // === RETRY LOGIC === + if(out.err() == ENOBUFS) { + size_t required_size = out.dirents_size(); + LOG(WARNING, + "{}() Buffer too small. Server requested {} bytes. Retrying.", + __func__, required_size); - gkfs::rpc::get_dirents_extended::output out; + // Re-allocate the buffer to the exact size the server needs. + buffer_size = required_size; + large_buffer = std::unique_ptr(new char[buffer_size]); - try { - // XXX We might need a timeout here to not wait forever for an - // output that never comes? - out = handles[0].get().at(0); - // skip processing dirent data if there was an error during send - // In this case all responses are gathered but their contents skipped + // The `exposed_buffer` from this iteration will be destructed. + // The loop will continue for the next attempt with the new buffer. + continue; - if(out.err() != 0) { - LOG(ERROR, - "{}() Failed to retrieve dir entries from host '{}'. Error '{}', path '{}'", - __func__, targets[0], strerror(out.err()), path); + } else if(out.err() != 0) { + // A different, fatal server-side error occurred. + LOG(ERROR, "{}() Server returned a fatal error: {}", __func__, + strerror(out.err())); err = out.err(); - // We need to gather all responses before exiting + break; // Break the loop } - } catch(const std::exception& ex) { - LOG(ERROR, - "{}() Failed to get rpc output.. [path: {}, target host: {}] err '{}'", - __func__, path, targets[0], ex.what()); - err = EBUSY; - // We need to gather all responses before exiting - } - - // The parenthesis is extremely important if not the cast will add as a - // size_t or a time_t and not as a char - auto out_buff_ptr = static_cast(exposed_buffers[0].begin()->data()); - auto bool_ptr = reinterpret_cast(out_buff_ptr); - auto size_ptr = reinterpret_cast( - (out_buff_ptr) + (out.dirents_size() * sizeof(bool))); - auto ctime_ptr = reinterpret_cast( - (out_buff_ptr) + - (out.dirents_size() * (sizeof(bool) + sizeof(size_t)))); - auto names_ptr = - out_buff_ptr + (out.dirents_size() * - (sizeof(bool) + sizeof(size_t) + sizeof(time_t))); - - for(std::size_t j = 0; j < out.dirents_size(); j++) { - - bool ftype = (*bool_ptr); - bool_ptr++; - - size_t size = *size_ptr; - size_ptr++; - - time_t ctime = *ctime_ptr; - ctime_ptr++; - - auto name = std::string(names_ptr); - // number of characters in entry + \0 terminator - names_ptr += name.size() + 1; - output_ptr->emplace_back( - std::forward_as_tuple(name, ftype, size, ctime)); + + // --- SUCCESS! --- + // If we reach here, out.err() was 0. + LOG(DEBUG, "{}() RPC successful. Decompressing data.", __func__); + try { + auto entries_vector = + decompress_and_parse_entries(out, large_buffer.get()); + output_ptr = make_unique< + vector>>( + std::move(entries_vector)); + err = 0; // Explicitly set success + } catch(const std::exception& ex) { + LOG(ERROR, "{}() Failed to decompress/parse entries: {}", __func__, + ex.what()); + err = EBADMSG; + } + + break; // Success, so we must break the retry loop. } + return make_pair(err, std::move(output_ptr)); } diff --git a/src/client/rpc/forward_metadata_proxy.cpp b/src/client/rpc/forward_metadata_proxy.cpp index 422ed0f95b9a06cfa8209761e5f983db745e5c9d..a2ea2d508aa141fe221d2d33e26cecf59efbf61e 100644 --- a/src/client/rpc/forward_metadata_proxy.cpp +++ b/src/client/rpc/forward_metadata_proxy.cpp @@ -27,7 +27,7 @@ #include #include - +#include using namespace std; namespace gkfs { @@ -201,121 +201,169 @@ forward_get_metadentry_size_proxy(const std::string& path) { } } -pair>>> -forward_get_dirents_single_proxy(const string& path, int server) { - LOG(DEBUG, "{}() enter for path '{}'", __func__, path) - auto endp = CTX->proxy_host(); +// This function takes the RPC output and the received buffer, and returns the +// parsed entries. +std::vector> +decompress_and_parse_entries( + const gkfs::rpc::get_dirents_extended_proxy::output& out, + const void* compressed_buffer) { + if(out.err() != 0) { + throw std::runtime_error("Server returned an error: " + + std::to_string(out.err())); + } + if(out.dirents_size() == 0) { + return {}; // No entries, return empty vector + } - /* preallocate receiving buffer. The actual size is not known yet. - * - * On C++14 make_unique function also zeroes the newly allocated buffer. - * It turns out that this operation is increadibly slow for such a big - * buffer. Moreover we don't need a zeroed buffer here. - */ - auto large_buffer = std::unique_ptr( - new char[gkfs::config::rpc::dirents_buff_size_proxy]); - - // We use the full size per server... - const std::size_t per_host_buff_size = - gkfs::config::rpc::dirents_buff_size_proxy; - auto output_ptr = make_unique< - vector>>(); + const char* p = nullptr; + const char* end = nullptr; + std::vector decompressed_data; - // expose local buffers for RMA from servers - std::vector exposed_buffers; - exposed_buffers.reserve(1); - try { - exposed_buffers.emplace_back(ld_proxy_service->expose( - std::vector{hermes::mutable_buffer{ - large_buffer.get(), per_host_buff_size}}, - hermes::access_mode::write_only)); - } catch(const std::exception& ex) { - LOG(ERROR, "{}() Failed to expose buffers for RMA. err '{}'", __func__, - ex.what()); - return make_pair(EBUSY, std::move(output_ptr)); + if(gkfs::config::rpc::use_dirents_compression) { + // === STEP 1: Discover the original size from the Zstd frame header === + const unsigned long long uncompressed_size = + ZSTD_getFrameContentSize(compressed_buffer, out.dirents_size()); + + if(uncompressed_size == ZSTD_CONTENTSIZE_ERROR) { + throw std::runtime_error( + "Received data is not a valid Zstd frame."); + } + if(uncompressed_size == ZSTD_CONTENTSIZE_UNKNOWN) { + throw std::runtime_error( + "Zstd frame content size is unknown and was not written in the frame."); + } + + // === STEP 2: Decompress the data into a new buffer === + decompressed_data.resize(uncompressed_size); + const size_t result_size = + ZSTD_decompress(decompressed_data.data(), uncompressed_size, + compressed_buffer, out.dirents_size()); + + if(ZSTD_isError(result_size)) { + throw std::runtime_error( + "Zstd decompression failed: " + + std::string(ZSTD_getErrorName(result_size))); + } + if(result_size != uncompressed_size) { + throw std::runtime_error("Decompression size mismatch."); + } + + p = decompressed_data.data(); + end = p + uncompressed_size; + } else { + // No compression: Data is raw in the input buffer + p = static_cast(compressed_buffer); + end = p + out.dirents_size(); } - auto err = 0; - // send RPCs - std::vector> - handles; + // === STEP 3: Parse the data stream === + // AOS Layout: [bool is_dir][size_t size][time_t ctime][string name\0] + std::vector> entries; - gkfs::rpc::get_dirents_extended_proxy::input in(path, server, - exposed_buffers[0]); + while(p < end) { + // Read is_dir + bool is_dir = *reinterpret_cast(p); + p += sizeof(bool); - try { - LOG(DEBUG, "{}() Sending IPC to proxy", __func__); - handles.emplace_back( - ld_proxy_service->post( - endp, in)); - } catch(const std::exception& ex) { - LOG(ERROR, - "{}() Unable to send non-blocking proxy_get_dirents() on {} [peer: proxy] err '{}'", - __func__, path, ex.what()); - err = EBUSY; + // Read file_size + size_t file_size = *reinterpret_cast(p); + p += sizeof(size_t); + + // Read ctime + time_t ctime = *reinterpret_cast(p); + p += sizeof(time_t); + + // Read name (which is null-terminated) + std::string name(p); + p += name.length() + 1; + + entries.emplace_back(name, is_dir, file_size, ctime); } - LOG(DEBUG, - "{}() path '{}' sent rpc_srv_get_dirents() rpc to proxy. Waiting on reply next and deserialize", - __func__, path); + return entries; +} - // wait for RPC responses +pair>>> +forward_get_dirents_single_proxy(const string& path, int server) { + LOG(DEBUG, "{}() enter for path '{}', server '{}'", __func__, path, server); + auto endp = CTX->proxy_host(); - gkfs::rpc::get_dirents_extended_proxy::output out; + // Start with the default optimistic buffer size + size_t buffer_size = gkfs::config::rpc::dirents_buff_size_proxy; + auto large_buffer = std::unique_ptr(new char[buffer_size]); - try { - // XXX We might need a timeout here to not wait forever for an - // output that never comes? - out = handles[0].get().at(0); - // skip processing dirent data if there was an error during send - // In this case all responses are gathered but their contents skipped - - if(out.err() != 0) { - LOG(ERROR, - "{}() Failed to retrieve dir entries from proxy. Error '{}', path '{}'", - __func__, strerror(out.err()), path); + auto output_ptr = make_unique< + vector>>(); + int err = 0; + const int max_retries = 2; // Prevent infinite loops + + for(int attempt = 0; attempt < max_retries; ++attempt) { + hermes::exposed_memory exposed_buffer; + try { + exposed_buffer = ld_proxy_service->expose( + std::vector{hermes::mutable_buffer{ + large_buffer.get(), buffer_size}}, + hermes::access_mode::write_only); + } catch(const std::exception& ex) { + LOG(ERROR, "{}() Failed to expose buffer on attempt {}: '{}'", + __func__, attempt, ex.what()); + return make_pair(EBUSY, nullptr); + } + + gkfs::rpc::get_dirents_extended_proxy::input in(path, server, + exposed_buffer); + gkfs::rpc::get_dirents_extended_proxy::output out; + + try { + LOG(DEBUG, "{}() Sending IPC to proxy (attempt {}, buffer size {})", + __func__, attempt + 1, buffer_size); + auto handle = ld_proxy_service + ->post( + endp, in); + out = handle.get().at(0); + } catch(const std::exception& ex) { + LOG(ERROR, "{}() RPC to proxy failed on attempt {}: {}", __func__, + attempt, ex.what()); + err = EBUSY; + break; // Fatal error, break retry loop + } + + // --- RETRY LOGIC --- + if(out.err() == ENOBUFS) { + size_t required_size = out.dirents_size(); + LOG(WARNING, + "{}() Buffer too small. Proxy requested {} bytes. Retrying.", + __func__, required_size); + + buffer_size = required_size; + large_buffer = std::unique_ptr(new char[buffer_size]); + continue; // Continue to the next attempt with the new buffer + } else if(out.err() != 0) { + LOG(ERROR, "{}() Proxy returned a fatal error: {}", __func__, + strerror(out.err())); err = out.err(); - // We need to gather all responses before exiting + break; // Break the loop } - } catch(const std::exception& ex) { - LOG(ERROR, - "{}() Failed to get rpc output.. [path: {}, target host: proxy] err '{}'", - __func__, path, ex.what()); - err = EBUSY; - // We need to gather all responses before exiting - } - // The parenthesis is extremely important if not the cast will add as a - // size_t or a time_t and not as a char - auto out_buff_ptr = static_cast(exposed_buffers[0].begin()->data()); - auto bool_ptr = reinterpret_cast(out_buff_ptr); - auto size_ptr = reinterpret_cast( - (out_buff_ptr) + (out.dirents_size() * sizeof(bool))); - auto ctime_ptr = reinterpret_cast( - (out_buff_ptr) + - (out.dirents_size() * (sizeof(bool) + sizeof(size_t)))); - auto names_ptr = - out_buff_ptr + (out.dirents_size() * - (sizeof(bool) + sizeof(size_t) + sizeof(time_t))); - - for(std::size_t j = 0; j < out.dirents_size(); j++) { - - bool ftype = (*bool_ptr); - bool_ptr++; - - size_t size = *size_ptr; - size_ptr++; - - time_t ctime = *ctime_ptr; - ctime_ptr++; - - auto name = std::string(names_ptr); - // number of characters in entry + \0 terminator - names_ptr += name.size() + 1; - output_ptr->emplace_back( - std::forward_as_tuple(name, ftype, size, ctime)); + // --- SUCCESS --- + LOG(DEBUG, "{}() RPC to proxy successful. Processing data.", __func__); + try { + // decompress_and_parse_entries handles the config toggle internally + auto entries_vector = + decompress_and_parse_entries(out, large_buffer.get()); + output_ptr = make_unique< + vector>>( + std::move(entries_vector)); + err = 0; + } catch(const std::exception& ex) { + LOG(ERROR, "{}() Failed to process entries from proxy: {}", + __func__, ex.what()); + err = EBADMSG; + } + break; // Success, break the retry loop } + return make_pair(err, std::move(output_ptr)); } diff --git a/src/daemon/CMakeLists.txt b/src/daemon/CMakeLists.txt index 916435ab9157b985223364656ede4888fe2bc2f2..ee265e9ad898c2851fe60b788823049d5ad2f4ac 100644 --- a/src/daemon/CMakeLists.txt +++ b/src/daemon/CMakeLists.txt @@ -73,6 +73,7 @@ target_link_libraries( Margo::Margo # others Threads::Threads + ZStd::ZStd ) if (GKFS_ENABLE_AGIOS) diff --git a/src/daemon/backend/metadata/db.cpp b/src/daemon/backend/metadata/db.cpp index 76e9368697f582895003009c48d82103740b4c18..6483f74a5942d1bb5ed47a71f49f824e0bc81807 100644 --- a/src/daemon/backend/metadata/db.cpp +++ b/src/daemon/backend/metadata/db.cpp @@ -183,6 +183,17 @@ MetadataDB::get_dirents_extended(const std::string& dir) const { return backend_->get_dirents_extended(root_path); } +std::vector> +MetadataDB::get_all_dirents_extended(const std::string& dir) const { + auto root_path = dir; + assert(gkfs::path::is_absolute(root_path)); + if(!gkfs::path::has_trailing_slash(root_path) && root_path.size() != 1) { + // add trailing slash only if missing and is not the root_folder "/" + root_path.push_back('/'); + } + return backend_->get_all_dirents_extended(root_path); +} + /** * @internal * Code example for iterating all entries in KV store. This is for debug only as diff --git a/src/daemon/backend/metadata/parallax_backend.cpp b/src/daemon/backend/metadata/parallax_backend.cpp index 880718781113089cfea5ab1a93d98c18722d28c2..a21c2ddabc2ca8f50f073cdf2964f8cc1da564ea 100644 --- a/src/daemon/backend/metadata/parallax_backend.cpp +++ b/src/daemon/backend/metadata/parallax_backend.cpp @@ -534,6 +534,64 @@ ParallaxBackend::get_dirents_extended_impl(const std::string& dir) const { return entries; } +std::vector> +ParallaxBackend::get_all_dirents_extended_impl( + const std::string& root_path) const { + struct par_key K; + + str2par(root_path, K); + const char* error = NULL; + par_scanner S = par_init_scanner(par_db_, &K, PAR_GREATER_OR_EQUAL, &error); + if(error) { + throw_status_excpt(fmt::format( + "Failed to get_all_dirents_extended_impl: err {}", *error)); + } + + std::vector> entries; + + while(par_is_valid(S)) { + struct par_key K2 = par_get_key(S); + struct par_value value = par_get_value(S); + + std::string k(K2.data, K2.size); + std::string v(value.val_buffer, value.val_size); + + if(k.size() < root_path.size() || + k.substr(0, root_path.size()) != root_path) { + break; + } + + // Skip the root_path itself if it's an exact match + if(k.size() == root_path.size()) { + if(par_get_next(S) && !par_is_valid(S)) + break; + continue; + } + + Metadata md(v); +#ifdef HAS_RENAME + // Remove entries with negative blocks (rename) + if(md.blocks() == -1) { + if(par_get_next(S) && !par_is_valid(S)) + break; + else + continue; + } +#endif // HAS_RENAME + auto is_dir = S_ISDIR(md.mode()); + + entries.emplace_back(std::forward_as_tuple(std::move(k), is_dir, + md.size(), md.ctime())); + + if(par_get_next(S) && !par_is_valid(S)) + break; + } + // If we don't close the scanner we cannot delete keys + par_close_scanner(S); + + return entries; +} + /** * Code example for iterating all entries in KV store. This is for debug only as diff --git a/src/daemon/backend/metadata/rocksdb_backend.cpp b/src/daemon/backend/metadata/rocksdb_backend.cpp index da0d28de417bd40de8cb4d863762d833c1b65f6a..3d84fefbeb08a032cfcc45b5367fcbaecce243e5 100644 --- a/src/daemon/backend/metadata/rocksdb_backend.cpp +++ b/src/daemon/backend/metadata/rocksdb_backend.cpp @@ -393,6 +393,51 @@ RocksDBBackend::get_dirents_extended_impl(const std::string& dir) const { return entries; } +// Return all the extended entries with root in the path specified +std::vector> +RocksDBBackend::get_all_dirents_extended_impl(const std::string& dir) const { + auto root_path = dir; + rocksdb::ReadOptions ropts; + auto it = db_->NewIterator(ropts); + + std::vector> entries; + + for(it->Seek(root_path); it->Valid() && it->key().starts_with(root_path); + it->Next()) { + + if(it->key().size() == root_path.size()) { + // we skip this path cause it is exactly the root_path + continue; + } + + /***** Get File name *****/ + auto name = it->key().ToString(); + if(name.find_first_of('/', root_path.size()) != std::string::npos) { + // skip stuff deeper then one level depth + // continue; + } + // remove prefix + name = name.substr(root_path.size()); + + // relative path of directory entries must not be empty + assert(!name.empty()); + + Metadata md(it->value().ToString()); +#ifdef HAS_RENAME + // Remove entries with negative blocks (rename) + if(md.blocks() == -1) { + continue; + } +#endif // HAS_RENAME + auto is_dir = S_ISDIR(md.mode()); + + entries.emplace_back(std::forward_as_tuple(std::move(name), is_dir, + md.size(), md.ctime())); + } + assert(it->status().ok()); + return entries; +} + /** * Code example for iterating all entries in KV store. This is for debug only as diff --git a/src/daemon/handler/srv_metadata.cpp b/src/daemon/handler/srv_metadata.cpp index 8ba4b30056e2583deeb15966151eb46598a07871..42b66114f69de073617f7931e377730e31730171 100644 --- a/src/daemon/handler/srv_metadata.cpp +++ b/src/daemon/handler/srv_metadata.cpp @@ -52,7 +52,7 @@ #include #include - +#include using namespace std; namespace { @@ -594,82 +594,120 @@ rpc_srv_get_dirents(hg_handle_t handle) { // tot_names_size (# characters in entry) + # entries * (bool size + char // size for \0 character) - size_t out_size = + size_t uncompressed_size = tot_names_size + entries.size() * (sizeof(bool) + sizeof(char)); - if(bulk_size < out_size) { - // Source buffer is smaller than total output size - GKFS_DATA->spdlogger()->error( - "{}() Entries do not fit source buffer. bulk_size '{}' < out_size '{}' must be satisfied!", - __func__, bulk_size, out_size); - out.err = ENOBUFS; - return gkfs::rpc::cleanup_respond(&handle, &in, &out); - } - void* bulk_buf; // buffer for bulk transfer - // create bulk handle and allocated memory for buffer with out_size - // information - ret = margo_bulk_create(mid, 1, nullptr, &out_size, HG_BULK_READ_ONLY, - &bulk_handle); - if(ret != HG_SUCCESS) { - GKFS_DATA->spdlogger()->error("{}() Failed to create bulk handle", - __func__); - return gkfs::rpc::cleanup_respond(&handle, &in, &out, &bulk_handle); - } - // access the internally allocated memory buffer and put it into bulk_buf - uint32_t actual_count; // number of segments. we use one here because we - // push the whole buffer at once - ret = margo_bulk_access(bulk_handle, 0, out_size, HG_BULK_READ_ONLY, 1, - &bulk_buf, &out_size, &actual_count); - if(ret != HG_SUCCESS || actual_count != 1) { - GKFS_DATA->spdlogger()->error( - "{}() Failed to access allocated buffer from bulk handle", - __func__); - return gkfs::rpc::cleanup_respond(&handle, &in, &out, &bulk_handle); - } + std::vector compressed_data; + void* segment_ptr = nullptr; + size_t transfer_size = 0; + std::vector uncompressed_data; + uncompressed_data.reserve(uncompressed_size); - GKFS_DATA->spdlogger()->trace( - "{}() path '{}' entries '{}' out_size '{}'. Set up local read only bulk handle and allocated buffer with size '{}'", - __func__, in.path, entries.size(), out_size, out_size); + if(gkfs::config::rpc::use_dirents_compression) { + // Calculate uncompressed size for AOS layout + // name + \0 + bool - // Serialize output data on local buffer - auto out_buff_ptr = static_cast(bulk_buf); - auto bool_ptr = reinterpret_cast(out_buff_ptr); - auto names_ptr = out_buff_ptr + entries.size(); + const size_t compressed_bound = ZSTD_compressBound(uncompressed_size); - for(auto const& e : entries) { - if(e.first.empty()) { - GKFS_DATA->spdlogger()->warn( - "{}() Entry in readdir() empty. If this shows up, something else is very wrong.", - __func__); + compressed_data.resize(compressed_bound); + + + for(const auto& e : entries) { + GKFS_DATA->spdlogger()->debug("{}() Processing dirent '{}'", + __func__, e.first); + bool is_dir = e.second; + const char* bool_p = reinterpret_cast(&is_dir); + uncompressed_data.insert(uncompressed_data.end(), bool_p, + bool_p + sizeof(bool)); + uncompressed_data.insert(uncompressed_data.end(), e.first.c_str(), + e.first.c_str() + e.first.length() + 1); } - *bool_ptr = e.second; - bool_ptr++; - const auto name = e.first.c_str(); - ::strcpy(names_ptr, name); - // number of characters + \0 terminator - names_ptr += e.first.size() + 1; - } + size_t compressed_size = + ZSTD_compress(compressed_data.data(), compressed_bound, + uncompressed_data.data(), uncompressed_size, 1); - GKFS_DATA->spdlogger()->trace( - "{}() path '{}' entries '{}' out_size '{}'. Copied data to bulk_buffer. NEXT bulk_transfer", - __func__, in.path, entries.size(), out_size); + if(ZSTD_isError(compressed_size)) { + GKFS_DATA->spdlogger()->error("{}() Zstd compression failed: {}", + __func__, + ZSTD_getErrorName(compressed_size)); + out.err = EIO; + return gkfs::rpc::cleanup_respond(&handle, &in, &out); + } - ret = margo_bulk_transfer(mid, HG_BULK_PUSH, hgi->addr, in.bulk_handle, 0, - bulk_handle, 0, out_size); + if(bulk_size < compressed_size) { + GKFS_DATA->spdlogger()->error( + "{}() Compressed data ('{}' bytes) does not fit client buffer ('{}' bytes)", + __func__, compressed_size, bulk_size); + out.err = ENOBUFS; + out.dirents_size = compressed_size; + return gkfs::rpc::cleanup_respond(&handle, &in, &out); + } + + segment_ptr = compressed_data.data(); + transfer_size = compressed_size; + + GKFS_DATA->spdlogger()->trace( + "{}() Serialized '{}' entries to '{}' bytes, compressed to '{}' bytes.", + __func__, entries.size(), uncompressed_size, compressed_size); + + } else { + // === Compression Disabled === + if(bulk_size < uncompressed_size) { + GKFS_DATA->spdlogger()->error( + "{}() Uncompressed data ('{}' bytes) does not fit client buffer ('{}' bytes)", + __func__, uncompressed_size, bulk_size); + out.err = ENOBUFS; + out.dirents_size = uncompressed_size; + return gkfs::rpc::cleanup_respond(&handle, &in, &out); + } + + // Serialize output data on local buffer (SOA) + auto out_buff_ptr = static_cast(uncompressed_data.data()); + auto bool_ptr = reinterpret_cast(out_buff_ptr); + auto names_ptr = out_buff_ptr + entries.size(); + + for(auto const& e : entries) { + *bool_ptr = e.second; + bool_ptr++; + ::strcpy(names_ptr, e.first.c_str()); + names_ptr += e.first.size() + 1; + } + + segment_ptr = uncompressed_data.data(); + transfer_size = uncompressed_size; + + GKFS_DATA->spdlogger()->trace( + "{}() Serialized '{}' entries to '{}' bytes (Compression disabled).", + __func__, entries.size(), uncompressed_size); + } + // Create a zero-copy bulk handle that wraps our data vector for the push + // operation. + ret = margo_bulk_create(mid, 1, &segment_ptr, &transfer_size, + HG_BULK_READ_ONLY, &bulk_handle); if(ret != HG_SUCCESS) { GKFS_DATA->spdlogger()->error( - "{}() Failed to push '{}' dirents on path '{}' to client with bulk size '{}' and out_size '{}'", - __func__, entries.size(), in.path, bulk_size, out_size); + "{}() Failed to create zero-copy bulk handle", __func__); + return gkfs::rpc::cleanup_respond(&handle, &in, &out, &bulk_handle); + } + + // Push data to client + ret = margo_bulk_transfer(mid, HG_BULK_PUSH, hgi->addr, in.bulk_handle, 0, + bulk_handle, 0, transfer_size); + if(ret != HG_SUCCESS) { + GKFS_DATA->spdlogger()->error("{}() Failed to push data to client", + __func__); out.err = EBUSY; return gkfs::rpc::cleanup_respond(&handle, &in, &out, &bulk_handle); } - out.dirents_size = entries.size(); + // Respond + out.dirents_size = transfer_size; out.err = 0; + GKFS_DATA->spdlogger()->debug( - "{}() Sending output response err '{}' dirents_size '{}'. DONE", - __func__, out.err, out.dirents_size); + "{}() Sending output response: err='{}', size='{}'. DONE", __func__, + out.err, out.dirents_size); if(GKFS_DATA->enable_stats()) { GKFS_DATA->stats()->add_value_iops( gkfs::utils::Stats::IopsOp::iops_dirent); @@ -677,6 +715,7 @@ rpc_srv_get_dirents(hg_handle_t handle) { return gkfs::rpc::cleanup_respond(&handle, &in, &out, &bulk_handle); } + /* Sends the name-size-ctime of a specific directory * Used to accelerate find * It mimics get_dirents, but uses a tuple @@ -696,6 +735,9 @@ rpc_srv_get_dirents(hg_handle_t handle) { * * All exceptions must be caught here and dealt with accordingly. Any errors are * placed in the response. + * + * We return all the dirents to avoid recursing directories (it is faster, and + * for io500 is better) * @endinteral * @param handle Mercury RPC handle * @return Mercury error code to Mercury @@ -708,7 +750,7 @@ rpc_srv_get_dirents_extended(hg_handle_t handle) { out.dirents_size = 0; hg_bulk_t bulk_handle = nullptr; - // Get input parmeters + // Get input parameters auto ret = margo_get_input(handle, &in); if(ret != HG_SUCCESS) { GKFS_DATA->spdlogger()->error( @@ -718,132 +760,150 @@ rpc_srv_get_dirents_extended(hg_handle_t handle) { return gkfs::rpc::cleanup_respond(&handle, &in, &out); } - // Retrieve size of source buffer + // Retrieve size of client's destination buffer auto hgi = margo_get_info(handle); auto mid = margo_hg_handle_get_instance(handle); - auto bulk_size = margo_bulk_get_size(in.bulk_handle); - GKFS_DATA->spdlogger()->debug("{}() Got RPC: path '{}' bulk_size '{}' ", - __func__, in.path, bulk_size); + auto client_bulk_size = margo_bulk_get_size(in.bulk_handle); + GKFS_DATA->spdlogger()->debug( + "{}() Got RPC: path '{}' client_bulk_size '{}' ", __func__, in.path, + client_bulk_size); // Get directory entries from local DB vector> entries{}; try { - entries = gkfs::metadata::get_dirents_extended(in.path); + entries = gkfs::metadata::get_all_dirents_extended(in.path); } catch(const ::exception& e) { GKFS_DATA->spdlogger()->error("{}() Error during get_dirents(): '{}'", __func__, e.what()); return gkfs::rpc::cleanup_respond(&handle, &in, &out); } - GKFS_DATA->spdlogger()->trace( - "{}() path '{}' Read database with '{}' entries", __func__, in.path, - entries.size()); - + // Handle empty directory case if(entries.empty()) { out.err = 0; return gkfs::rpc::cleanup_respond(&handle, &in, &out); } - // Calculate total output size - // TODO OPTIMIZATION: this can be calculated inside db_get_dirents - size_t tot_names_size = 0; - for(auto const& e : entries) { - tot_names_size += (get<0>(e)).size(); + // Serialize data into a vector + std::vector uncompressed_data; + // Optimization: Reserve a reasonable starting size to avoid reallocations + // Assuming avg filename length of 32 + metadata sizes + uncompressed_data.reserve(entries.size() * 48); + + for(const auto& e : entries) { + const auto& name = get<0>(e); + bool is_dir = get<1>(e); + size_t file_size = get<2>(e); + time_t ctime = get<3>(e); + + // Append data fields sequentially into the vector. The client will + // parse in this exact order. + const char* bool_p = reinterpret_cast(&is_dir); + uncompressed_data.insert(uncompressed_data.end(), bool_p, + bool_p + sizeof(bool)); + + const char* size_p = reinterpret_cast(&file_size); + uncompressed_data.insert(uncompressed_data.end(), size_p, + size_p + sizeof(size_t)); + + const char* time_p = reinterpret_cast(&ctime); + uncompressed_data.insert(uncompressed_data.end(), time_p, + time_p + sizeof(time_t)); + + // Append string and null terminator + uncompressed_data.insert(uncompressed_data.end(), name.c_str(), + name.c_str() + name.length() + 1); } - // tot_names_size (# characters in entry) + # entries * (bool size + char - // size for \0 character) - size_t out_size = - tot_names_size + entries.size() * (sizeof(bool) + sizeof(char) + - sizeof(size_t) + sizeof(time_t)); - if(bulk_size < out_size) { - // Source buffer is smaller than total output size - GKFS_DATA->spdlogger()->error( - "{}() Entries do not fit source buffer. bulk_size '{}' < out_size '{}' must be satisfied!", - __func__, bulk_size, out_size); - out.err = ENOBUFS; - return gkfs::rpc::cleanup_respond(&handle, &in, &out); - } - - void* bulk_buf; // buffer for bulk transfer - // create bulk handle and allocated memory for buffer with out_size - // information - ret = margo_bulk_create(mid, 1, nullptr, &out_size, HG_BULK_READ_ONLY, - &bulk_handle); - if(ret != HG_SUCCESS) { - GKFS_DATA->spdlogger()->error("{}() Failed to create bulk handle", - __func__); - return gkfs::rpc::cleanup_respond(&handle, &in, &out, &bulk_handle); - } - // access the internally allocated memory buffer and put it into bulk_buf - uint32_t actual_count; // number of segments. we use one here because we - // push the whole buffer at once - ret = margo_bulk_access(bulk_handle, 0, out_size, HG_BULK_READ_ONLY, 1, - &bulk_buf, &out_size, &actual_count); - if(ret != HG_SUCCESS || actual_count != 1) { - GKFS_DATA->spdlogger()->error( - "{}() Failed to access allocated buffer from bulk handle", - __func__); - return gkfs::rpc::cleanup_respond(&handle, &in, &out, &bulk_handle); - } + const size_t uncompressed_size = uncompressed_data.size(); + void* segment_ptr = nullptr; + size_t transfer_size = 0; + + // Variable to hold compressed data if compression is enabled + std::vector compressed_data; + + if(gkfs::config::rpc::use_dirents_compression) { + // === Compression Enabled === + const size_t compressed_bound = ZSTD_compressBound(uncompressed_size); + compressed_data.resize(compressed_bound); + + // Level 1 is fastest, 3 is default. using 1 for low latency. + const size_t compressed_size = + ZSTD_compress(compressed_data.data(), compressed_bound, + uncompressed_data.data(), uncompressed_size, 1); + + if(ZSTD_isError(compressed_size)) { + GKFS_DATA->spdlogger()->error("{}() Zstd compression failed: {}", + __func__, + ZSTD_getErrorName(compressed_size)); + out.err = EIO; + return gkfs::rpc::cleanup_respond(&handle, &in, &out); + } - GKFS_DATA->spdlogger()->trace( - "{}() path '{}' entries '{}' out_size '{}'. Set up local read only bulk handle and allocated buffer with size '{}'", - __func__, in.path, entries.size(), out_size, out_size); - - // Serialize output data on local buffer - // The parenthesis are extremely important, if not the + will be size_t or - // time_t size and not char - auto out_buff_ptr = static_cast(bulk_buf); - auto bool_ptr = reinterpret_cast(out_buff_ptr); - auto size_ptr = reinterpret_cast((out_buff_ptr) + - (entries.size() * sizeof(bool))); - auto ctime_ptr = reinterpret_cast( - (out_buff_ptr) + - (entries.size() * (sizeof(bool) + sizeof(size_t)))); - auto names_ptr = - out_buff_ptr + - (entries.size() * (sizeof(bool) + sizeof(size_t) + sizeof(time_t))); + // Check fits in client buffer + if(client_bulk_size < compressed_size) { + GKFS_DATA->spdlogger()->error( + "{}() Compressed data ('{}' bytes) does not fit client buffer ('{}' bytes)", + __func__, compressed_size, client_bulk_size); + out.err = ENOBUFS; + out.dirents_size = compressed_size; + return gkfs::rpc::cleanup_respond(&handle, &in, &out); + } - for(auto const& e : entries) { - if((get<0>(e)).empty()) { - GKFS_DATA->spdlogger()->warn( - "{}() Entry in readdir() empty. If this shows up, something else is very wrong.", - __func__); + segment_ptr = compressed_data.data(); + transfer_size = compressed_size; + + GKFS_DATA->spdlogger()->trace( + "{}() Serialized '{}' entries to '{}' bytes, compressed to '{}' bytes.", + __func__, entries.size(), uncompressed_size, compressed_size); + + } else { + // === Compression Disabled === + if(client_bulk_size < uncompressed_size) { + GKFS_DATA->spdlogger()->error( + "{}() Uncompressed data ('{}' bytes) does not fit client buffer ('{}' bytes)", + __func__, uncompressed_size, client_bulk_size); + out.err = ENOBUFS; + out.dirents_size = uncompressed_size; + return gkfs::rpc::cleanup_respond(&handle, &in, &out); } - *bool_ptr = (get<1>(e)); - bool_ptr++; - *size_ptr = (get<2>(e)); - size_ptr++; + segment_ptr = uncompressed_data.data(); + transfer_size = uncompressed_size; - *ctime_ptr = (get<3>(e)); - ctime_ptr++; + GKFS_DATA->spdlogger()->trace( + "{}() Serialized '{}' entries to '{}' bytes (Compression disabled).", + __func__, entries.size(), uncompressed_size); + } - const auto name = (get<0>(e)).c_str(); - ::strcpy(names_ptr, name); - // number of characters + \0 terminator - names_ptr += ((get<0>(e)).size() + 1); + // Create a zero-copy bulk handle that wraps our data vector for the push + // operation. + ret = margo_bulk_create(mid, 1, &segment_ptr, &transfer_size, + HG_BULK_READ_ONLY, &bulk_handle); + if(ret != HG_SUCCESS) { + GKFS_DATA->spdlogger()->error( + "{}() Failed to create zero-copy bulk handle", __func__); + return gkfs::rpc::cleanup_respond(&handle, &in, &out, &bulk_handle); } - GKFS_DATA->spdlogger()->trace( - "{}() path '{}' entries '{}' out_size '{}'. Copied data to bulk_buffer. NEXT bulk_transfer", - __func__, in.path, entries.size(), out_size); + // Push data to client ret = margo_bulk_transfer(mid, HG_BULK_PUSH, hgi->addr, in.bulk_handle, 0, - bulk_handle, 0, out_size); + bulk_handle, 0, transfer_size); if(ret != HG_SUCCESS) { - GKFS_DATA->spdlogger()->error( - "{}() Failed to push '{}' dirents on path '{}' to client with bulk size '{}' and out_size '{}'", - __func__, entries.size(), in.path, bulk_size, out_size); + GKFS_DATA->spdlogger()->error("{}() Failed to push data to client", + __func__); out.err = EBUSY; return gkfs::rpc::cleanup_respond(&handle, &in, &out, &bulk_handle); } - out.dirents_size = entries.size(); + // Respond + out.dirents_size = transfer_size; out.err = 0; + GKFS_DATA->spdlogger()->debug( - "{}() Sending output response err '{}' dirents_size '{}'. DONE", - __func__, out.err, out.dirents_size); + "{}() Sending output response: err='{}', size='{}'. DONE", __func__, + out.err, out.dirents_size); + return gkfs::rpc::cleanup_respond(&handle, &in, &out, &bulk_handle); } diff --git a/src/daemon/ops/metadentry.cpp b/src/daemon/ops/metadentry.cpp index 08cdffcbbf3a03b15d6764d4535f4b25a9af9428..1c53a6da3f3e11e822afff62e4d12ef191aefa88 100644 --- a/src/daemon/ops/metadentry.cpp +++ b/src/daemon/ops/metadentry.cpp @@ -70,6 +70,11 @@ get_dirents_extended(const std::string& dir) { return GKFS_DATA->mdb()->get_dirents_extended(dir); } +std::vector> +get_all_dirents_extended(const std::string& dir) { + return GKFS_DATA->mdb()->get_all_dirents_extended(dir); +} + void create(const std::string& path, Metadata& md) { diff --git a/src/proxy/rpc/forward_metadata.cpp b/src/proxy/rpc/forward_metadata.cpp index 097f3e4ee340aa08e08c018ac45f8aaad9390fb8..8095630459fd92a84a32ad1820f58d97e67b96c2 100644 --- a/src/proxy/rpc/forward_metadata.cpp +++ b/src/proxy/rpc/forward_metadata.cpp @@ -407,75 +407,146 @@ forward_update_metadentry_size(const string& path, const size_t size, return make_pair(err, ret_offset); } -pair -forward_get_dirents_single(const std::string& path, int server, void* buf, - size_t bulk_size) { - hg_bulk_t bulk_handle = nullptr; - hg_handle_t rpc_handle = nullptr; - rpc_get_dirents_in_t daemon_in{}; - // register local target buffer for bulk access - auto* bulk_buf = buf; - auto size = make_shared(bulk_size); // XXX Why shared ptr? - auto ret = margo_bulk_create(PROXY_DATA->client_rpc_mid(), 1, &bulk_buf, - size.get(), HG_BULK_WRITE_ONLY, &bulk_handle); - if(ret != HG_SUCCESS) { - PROXY_DATA->log()->error("{}() Failed to create rpc bulk handle", - __func__); - return ::make_pair(EBUSY, 0); - } - daemon_in.path = path.c_str(); - daemon_in.bulk_handle = bulk_handle; - auto* endp = PROXY_DATA->rpc_endpoints().at(server); - ret = margo_create(PROXY_DATA->client_rpc_mid(), endp, - PROXY_DATA->rpc_client_ids().rpc_get_dirents_extended_id, - &rpc_handle); - if(ret != HG_SUCCESS) { - margo_destroy(rpc_handle); - margo_bulk_free(bulk_handle); - return ::make_pair(EBUSY, 0); - } - // Send RPC - margo_request rpc_waiter{}; - ret = margo_iforward(rpc_handle, &daemon_in, &rpc_waiter); - if(ret != HG_SUCCESS) { - PROXY_DATA->log()->error( - "{}() Unable to send non-blocking rpc for path {} and recipient {}", - __func__, path, server); +/** + * Forwards a get_dirents request from the proxy to a single daemon. + * This function handles the retry mechanism if the proxy's initial buffer + * is too small for the daemon's compressed response. + * + * @param path The directory path. + * @param server The target daemon server ID. + * @return A pair containing: + * - int: The final error code (0 on success). + * - std::vector: A vector containing the compressed data on + * success. + */ +std::pair> +forward_get_dirents_single(const std::string& path, int server) { + + // Start with an optimistic buffer for the daemon's compressed response. + size_t daemon_buffer_size = + gkfs::config::rpc::dirents_buff_size; // Use daemon-facing buffer + // config + auto daemon_buffer = std::make_unique(daemon_buffer_size); + + int err = 0; + const int max_retries = 2; // Prevent infinite loops + + for(int attempt = 0; attempt < max_retries; ++attempt) { + hg_bulk_t bulk_handle = nullptr; + hg_handle_t rpc_handle = nullptr; + + // Use the current daemon_buffer for this attempt + void* bulk_buf_ptr = daemon_buffer.get(); + auto ret = margo_bulk_create(PROXY_DATA->client_rpc_mid(), 1, + &bulk_buf_ptr, &daemon_buffer_size, + HG_BULK_WRITE_ONLY, &bulk_handle); + if(ret != HG_SUCCESS) { + PROXY_DATA->log()->error( + "{}() Failed to create bulk handle for daemon RPC", + __func__); + return {EBUSY, {}}; + } + + rpc_get_dirents_in_t daemon_in{}; + daemon_in.path = path.c_str(); + daemon_in.bulk_handle = bulk_handle; + + auto* endp = PROXY_DATA->rpc_endpoints().at(server); + ret = margo_create( + PROXY_DATA->client_rpc_mid(), endp, + PROXY_DATA->rpc_client_ids().rpc_get_dirents_extended_id, + &rpc_handle); + if(ret != HG_SUCCESS) { + PROXY_DATA->log()->error( + "{}() Failed to create margo handle for daemon", __func__); + margo_bulk_free(bulk_handle); + return {EBUSY, {}}; + } + + margo_request rpc_waiter{}; + ret = margo_iforward(rpc_handle, &daemon_in, &rpc_waiter); + if(ret != HG_SUCCESS) { + PROXY_DATA->log()->error( + "{}() Failed to forward RPC to daemon for path {}", + __func__, path); + margo_destroy(rpc_handle); + margo_bulk_free(bulk_handle); + return {EBUSY, {}}; + } + + PROXY_DATA->log()->debug( + "{}() RPC sent to daemon, waiting for reply...", __func__); + ret = margo_wait(rpc_waiter); + if(ret != HG_SUCCESS) { + PROXY_DATA->log()->error( + "{}() Failed to wait for margo_request handle for path {}", + __func__, path); + err = EBUSY; + // Fall through to cleanup + } + + + rpc_get_dirents_out_t daemon_out{}; + // Only get output if the wait succeeded + if(err == 0) { + ret = margo_get_output(rpc_handle, &daemon_out); + if(ret != HG_SUCCESS) { + PROXY_DATA->log()->error( + "{}() Failed to get rpc output from daemon", __func__); + err = EBUSY; + } + } + + // If any RPC step failed, clean up and return the error. + if(err != 0) { + margo_free_output(rpc_handle, &daemon_out); + margo_destroy(rpc_handle); + margo_bulk_free(bulk_handle); + return {err, {}}; + } + + // --- DAEMON RETRY LOGIC --- + if(daemon_out.err == ENOBUFS) { + size_t required_size = daemon_out.dirents_size; + PROXY_DATA->log()->warn( + "{}() Daemon buffer too small. Daemon requested {}. Retrying.", + __func__, required_size); + + daemon_buffer_size = required_size; + daemon_buffer = std::make_unique(daemon_buffer_size); + + margo_free_output(rpc_handle, &daemon_out); + margo_destroy(rpc_handle); + margo_bulk_free(bulk_handle); + continue; // Continue to the next attempt with the new buffer + } else if(daemon_out.err != 0) { + err = daemon_out.err; + PROXY_DATA->log()->error("{}() Daemon returned a fatal error: {}", + __func__, strerror(err)); + margo_free_output(rpc_handle, &daemon_out); + margo_destroy(rpc_handle); + margo_bulk_free(bulk_handle); + return {err, {}}; + } + + // --- SUCCESS FROM DAEMON --- + size_t final_compressed_size = daemon_out.dirents_size; + PROXY_DATA->log()->debug( + "{}() Successfully received {} bytes from daemon.", __func__, + final_compressed_size); + std::vector result_data(final_compressed_size); + memcpy(result_data.data(), daemon_buffer.get(), final_compressed_size); + + margo_free_output(rpc_handle, &daemon_out); margo_destroy(rpc_handle); margo_bulk_free(bulk_handle); - return ::make_pair(EBUSY, 0); - } - PROXY_DATA->log()->debug("{}() 1 RPC sent, waiting for reply ...", - __func__); - int err = 0; - size_t dirents_size = 0; - ret = margo_wait(rpc_waiter); - if(ret != HG_SUCCESS) { - PROXY_DATA->log()->error( - "{}() Unable to wait for margo_request handle for path {} recipient {}", - __func__, path, server); - err = EBUSY; - } - // decode response - rpc_get_dirents_out_t daemon_out{}; - ret = margo_get_output(rpc_handle, &daemon_out); - if(ret != HG_SUCCESS) { - PROXY_DATA->log()->error( - "{}() Failed to get rpc output for path {} recipient {}", - __func__, path, server); - err = EBUSY; + return {0, std::move(result_data)}; } - PROXY_DATA->log()->debug( - "{}() Got response from target '{}': err '{}' with dirent_size '{}'", - __func__, server, daemon_out.err, daemon_out.dirents_size); - if(daemon_out.err != 0) - err = daemon_out.err; - else - dirents_size = daemon_out.dirents_size; - margo_free_output(rpc_handle, &daemon_out); - margo_destroy(rpc_handle); - margo_bulk_free(bulk_handle); - return ::make_pair(err, dirents_size); -} + // If we exit the loop, it means we exceeded max retries + PROXY_DATA->log()->error( + "{}() Exceeded max retries communicating with daemon for path {}", + __func__, path); + return {EBUSY, {}}; +} } // namespace gkfs::rpc diff --git a/src/proxy/rpc/srv_metadata.cpp b/src/proxy/rpc/srv_metadata.cpp index c321380edb80c8d89eba6978bd8589f18e97f05f..eafe4795eef544597ada3b6e9ed2706abdef73ec 100644 --- a/src/proxy/rpc/srv_metadata.cpp +++ b/src/proxy/rpc/srv_metadata.cpp @@ -197,7 +197,6 @@ proxy_rpc_srv_get_dirents_extended(hg_handle_t handle) { rpc_proxy_get_dirents_in_t client_in{}; rpc_get_dirents_out_t client_out{}; - hg_bulk_t bulk_handle = nullptr; auto ret = margo_get_input(handle, &client_in); if(ret != HG_SUCCESS) { @@ -205,69 +204,89 @@ proxy_rpc_srv_get_dirents_extended(hg_handle_t handle) { __func__); return gkfs::rpc::cleanup_respond(&handle, &client_in, &client_out); } - PROXY_DATA->log()->debug("{}() path: '{}', server: '{}'", __func__, - client_in.path, client_in.server); auto hgi = margo_get_info(handle); auto mid = margo_hg_handle_get_instance(handle); - auto bulk_size = margo_bulk_get_size(client_in.bulk_handle); - PROXY_DATA->log()->debug("{}() Got RPC with path '{}' bulk_size '{}'", - __func__, client_in.path, bulk_size); - /* - * Set up buffer for push from daemon - */ - void* bulk_buf; // buffer for bulk transfer - // create bulk handle and allocated memory for buffer with buf_sizes - // information - ret = margo_bulk_create(mid, 1, nullptr, &bulk_size, HG_BULK_READWRITE, - &bulk_handle); - if(ret != HG_SUCCESS) { - PROXY_DATA->log()->error("{}() Failed to create bulk handle", __func__); + auto client_bulk_size = margo_bulk_get_size(client_in.bulk_handle); + PROXY_DATA->log()->debug( + "{}() Got RPC: path '{}', server '{}', client_bulk_size '{}'", + __func__, client_in.path, client_in.server, client_bulk_size); + + // --- 1. Forward the request to the daemon layer --- + // This call now encapsulates the entire retry loop with the daemon. + auto daemon_response = gkfs::rpc::forward_get_dirents_single( + client_in.path, client_in.server); + + int daemon_err = daemon_response.first; + // Renamed from compressed_data to payload to support both modes + const auto& payload = daemon_response.second; + + // --- 2. Handle errors from the daemon --- + if(daemon_err != 0) { + PROXY_DATA->log()->error( + "{}() Failure when forwarding to daemon with err '{}'", + __func__, strerror(daemon_err)); + client_out.err = daemon_err; + client_out.dirents_size = 0; return gkfs::rpc::cleanup_respond(&handle, &client_in, &client_out); } - // access the internally allocated memory buffer - uint32_t actual_count; // number of segments. we use one here because we - // pull the whole buffer at once - ret = margo_bulk_access(bulk_handle, 0, bulk_size, HG_BULK_READWRITE, 1, - &bulk_buf, &bulk_size, &actual_count); - if(ret != HG_SUCCESS || actual_count != 1) { - PROXY_DATA->log()->error( - "{}() Failed to access allocated buffer from bulk handle", - __func__); - return gkfs::rpc::cleanup_respond(&handle, &client_in, &client_out, - &bulk_handle); + + // --- 3. Check if the successfully received data fits in the client's + // buffer --- + size_t payload_size = payload.size(); + + if(client_bulk_size < payload_size) { + PROXY_DATA->log()->warn( + "{}() Client buffer is too small ({} < {}). Data is {}. Informing client to retry.", + __func__, client_bulk_size, payload_size, + gkfs::config::rpc::use_dirents_compression ? "compressed" + : "uncompressed"); + + client_out.err = ENOBUFS; + client_out.dirents_size = payload_size; // Tell client the size it needs + + // Respond WITHOUT transferring data + return gkfs::rpc::cleanup_respond(&handle, &client_in, &client_out); } - // Forward request to daemon, using bulk_buf, containing the allocated - // buffer (which is pushed the data by the daemon) - auto daemon_out = gkfs::rpc::forward_get_dirents_single( - client_in.path, client_in.server, bulk_buf, bulk_size); - if(daemon_out.first != 0) { + + // --- 4. Success Path: Data fits, push it to the client --- + hg_bulk_t push_handle = nullptr; + + void* push_buf = const_cast(payload.data()); + size_t push_size = payload_size; + + // Create a zero-copy bulk handle that wraps our data vector for the push + // operation. + ret = margo_bulk_create(mid, 1, &push_buf, &push_size, HG_BULK_READ_ONLY, + &push_handle); + if(ret != HG_SUCCESS) { PROXY_DATA->log()->error( - "{}() Failure when forwarding to daemon with err '{}'", - __func__, daemon_out.first); - client_out.err = daemon_out.first; - return gkfs::rpc::cleanup_respond(&handle, &client_in, &client_out, - &bulk_handle); + "{}() Failed to create bulk handle for client push", __func__); + client_out.err = EBUSY; + return gkfs::rpc::cleanup_respond(&handle, &client_in, &client_out); } - // Push data to client here if no error was reported by the daemon + ret = margo_bulk_transfer(mid, HG_BULK_PUSH, hgi->addr, - client_in.bulk_handle, 0, bulk_handle, 0, - bulk_size); + client_in.bulk_handle, 0, push_handle, 0, + push_size); + + // We MUST free the temporary handle after the transfer. + margo_bulk_free(push_handle); + if(ret != HG_SUCCESS) { PROXY_DATA->log()->error( - "{}() Failed to push data from client for path '{}' with size '{}'", - __func__, client_in.path, bulk_size); + "{}() Failed to push data to client for path '{}' with size '{}'", + __func__, client_in.path, push_size); client_out.err = EBUSY; - return gkfs::rpc::cleanup_respond(&handle, &client_in, &client_out, - &bulk_handle); + } else { + client_out.err = 0; + client_out.dirents_size = payload_size; } - client_out.err = daemon_out.first; - client_out.dirents_size = daemon_out.second; PROXY_DATA->log()->debug("{}() Sending output err '{}' dirents_size '{}'", __func__, client_out.err, client_out.dirents_size); - return gkfs::rpc::cleanup_respond(&handle, &client_in, &client_out, - &bulk_handle); + + return gkfs::rpc::cleanup_respond(&handle, &client_in, &client_out); } DEFINE_MARGO_RPC_HANDLER(proxy_rpc_srv_get_dirents_extended) diff --git a/tests/integration/directories/test_directories.py b/tests/integration/directories/test_directories.py index 7e0fae21002ae99770c8896ca22a3da780f291ea..725ec2b1db24e996e77ae6d9796242302fbc592f 100644 --- a/tests/integration/directories/test_directories.py +++ b/tests/integration/directories/test_directories.py @@ -295,7 +295,12 @@ def test_extended(gkfs_daemon, gkfs_shell, gkfs_client): ) assert cmd.exit_code == 0 - assert cmd.stdout.decode() == "MATCHED 0/4\n" + output = cmd.stdout.decode() + expected_line = "MATCHED 0/4\n" + + assert expected_line in output, \ + f"Expected to find '{expected_line.strip()}' in the output, but got:\n---\n{output}\n---" + cmd = gkfs_shell.sfind( topdir, @@ -407,6 +412,10 @@ def test_extended_proxy(gkfs_daemon_proxy, gkfs_proxy, gkfs_shell_proxy, gkfs_cl ) assert cmd.exit_code == 0 - assert cmd.stdout.decode() == "MATCHED 0/4\n" + output = cmd.stdout.decode() + expected_line = "MATCHED 0/4\n" + + assert expected_line in output, \ + f"Expected to find '{expected_line.strip()}' in the output, but got:\n---\n{output}\n---" diff --git a/tests/scripts/compile_dep.sh/0.9.6-dev/agios.out b/tests/scripts/compile_dep.sh/0.9.6-dev/agios.out new file mode 100644 index 0000000000000000000000000000000000000000..ecc1b703fcc503fb48879e30a2e0c372f9fd2de5 --- /dev/null +++ b/tests/scripts/compile_dep.sh/0.9.6-dev/agios.out @@ -0,0 +1,12 @@ +######## Installing: lz4 ############################### +######## Installing: zstd ############################### +######## Installing: capstone ############################### +######## Installing: json-c ############################### +######## Installing: libfabric ############################### +######## Installing: mercury ############################### +######## Installing: argobots ############################### +######## Installing: margo ############################### +######## Installing: rocksdb ############################### +######## Installing: syscall_intercept ############################### +######## Installing: agios ############################### + diff --git a/tests/scripts/compile_dep.sh/0.9.6-dev/arm.out b/tests/scripts/compile_dep.sh/0.9.6-dev/arm.out new file mode 100644 index 0000000000000000000000000000000000000000..df3b379f830c9047d3f4eebd438e3ceb0cc6dec9 --- /dev/null +++ b/tests/scripts/compile_dep.sh/0.9.6-dev/arm.out @@ -0,0 +1,12 @@ +######## Installing: lz4 ############################### +######## Installing: zstd ############################### +######## Installing: capstone ############################### +######## Installing: json-c ############################### +######## Installing: psm2 ############################### +WARNING: Install script for 'psm2' not found. Skipping. +######## Installing: libfabric ############################### +######## Installing: mercury ############################### +######## Installing: argobots ############################### +######## Installing: margo ############################### +######## Installing: rocksdb ############################### +######## Installing: syscall_intercept ############################### diff --git a/tests/scripts/compile_dep.sh/0.9.6-dev/ci.out b/tests/scripts/compile_dep.sh/0.9.6-dev/ci.out new file mode 100644 index 0000000000000000000000000000000000000000..9ec3c24bb12c16078d2b3869831331de15f5e305 --- /dev/null +++ b/tests/scripts/compile_dep.sh/0.9.6-dev/ci.out @@ -0,0 +1,8 @@ +######## Installing: libfabric ############################### +######## Installing: mercury ############################### +######## Installing: argobots ############################### +######## Installing: margo ############################### +######## Installing: rocksdb ############################### +######## Installing: syscall_intercept ############################### +######## Installing: agios ############################### +######## Installing: parallax ############################### diff --git a/tests/scripts/compile_dep.sh/0.9.6-dev/default.out b/tests/scripts/compile_dep.sh/0.9.6-dev/default.out new file mode 100644 index 0000000000000000000000000000000000000000..b42ca56ca0733d359d74ca416160e7d8fb781248 --- /dev/null +++ b/tests/scripts/compile_dep.sh/0.9.6-dev/default.out @@ -0,0 +1,11 @@ +######## Installing: lz4 ############################### +######## Installing: zstd ############################### +######## Installing: capstone ############################### +######## Installing: json-c ############################### +######## Installing: libfabric ############################### +######## Installing: mercury ############################### +######## Installing: argobots ############################### +######## Installing: margo ############################### +######## Installing: rocksdb ############################### +######## Installing: syscall_intercept ############################### + diff --git a/tests/scripts/compile_dep.sh/0.9.6-dev/default_zmq.out b/tests/scripts/compile_dep.sh/0.9.6-dev/default_zmq.out new file mode 100644 index 0000000000000000000000000000000000000000..28affca2552c854558b2d8f5e52c55f373ec95d1 --- /dev/null +++ b/tests/scripts/compile_dep.sh/0.9.6-dev/default_zmq.out @@ -0,0 +1,12 @@ +######## Installing: lz4 ############################### +######## Installing: zstd ############################### +######## Installing: capstone ############################### +######## Installing: json-c ############################### +######## Installing: libfabric ############################### +######## Installing: mercury ############################### +######## Installing: argobots ############################### +######## Installing: margo ############################### +######## Installing: rocksdb ############################### +######## Installing: syscall_intercept ############################### +######## Installing: libzmq ############################### +######## Installing: cppzmq ############################### diff --git a/tests/scripts/compile_dep.sh/0.9.6-dev/infiniband_verbs.out b/tests/scripts/compile_dep.sh/0.9.6-dev/infiniband_verbs.out new file mode 100644 index 0000000000000000000000000000000000000000..ee185cf1cd1a2adfa353d151ae90860c065e2e16 --- /dev/null +++ b/tests/scripts/compile_dep.sh/0.9.6-dev/infiniband_verbs.out @@ -0,0 +1,10 @@ +######## Installing: lz4 ############################### +######## Installing: zstd ############################### +######## Installing: capstone ############################### +######## Installing: json-c ############################### +######## Installing: libfabric%verbs ############################### +######## Installing: mercury ############################### +######## Installing: argobots ############################### +######## Installing: margo ############################### +######## Installing: rocksdb ############################### +######## Installing: syscall_intercept ############################### diff --git a/tests/scripts/compile_dep.sh/0.9.6-dev/marenostrum4.out b/tests/scripts/compile_dep.sh/0.9.6-dev/marenostrum4.out new file mode 100644 index 0000000000000000000000000000000000000000..684ec20be2047d76869c4a639eced133fd58dfa2 --- /dev/null +++ b/tests/scripts/compile_dep.sh/0.9.6-dev/marenostrum4.out @@ -0,0 +1,13 @@ +######## Installing: lz4 ############################### +######## Installing: zstd ############################### +######## Installing: capstone ############################### +######## Installing: json-c ############################### +######## Installing: psm2 ############################### +WARNING: Install script for 'psm2' not found. Skipping. +######## Installing: libfabric ############################### +######## Installing: mercury ############################### +######## Installing: argobots ############################### +######## Installing: margo ############################### +######## Installing: rocksdb ############################### +######## Installing: syscall_intercept ############################### +######## Installing: parallax ############################### diff --git a/tests/scripts/compile_dep.sh/0.9.6-dev/mogon2.out b/tests/scripts/compile_dep.sh/0.9.6-dev/mogon2.out new file mode 100644 index 0000000000000000000000000000000000000000..df3b379f830c9047d3f4eebd438e3ceb0cc6dec9 --- /dev/null +++ b/tests/scripts/compile_dep.sh/0.9.6-dev/mogon2.out @@ -0,0 +1,12 @@ +######## Installing: lz4 ############################### +######## Installing: zstd ############################### +######## Installing: capstone ############################### +######## Installing: json-c ############################### +######## Installing: psm2 ############################### +WARNING: Install script for 'psm2' not found. Skipping. +######## Installing: libfabric ############################### +######## Installing: mercury ############################### +######## Installing: argobots ############################### +######## Installing: margo ############################### +######## Installing: rocksdb ############################### +######## Installing: syscall_intercept ############################### diff --git a/tests/scripts/compile_dep.sh/0.9.6-dev/mogon3.out b/tests/scripts/compile_dep.sh/0.9.6-dev/mogon3.out new file mode 100644 index 0000000000000000000000000000000000000000..476a8649a889d7afae9271bba5c98fc689045578 --- /dev/null +++ b/tests/scripts/compile_dep.sh/0.9.6-dev/mogon3.out @@ -0,0 +1,11 @@ +######## Installing: lz4 ############################### +######## Installing: zstd ############################### +######## Installing: capstone ############################### +######## Installing: json-c ############################### +######## Installing: mercury ############################### +######## Installing: argobots ############################### +######## Installing: margo ############################### +######## Installing: rocksdb ############################### +######## Installing: syscall_intercept ############################### +######## Installing: libzmq ############################### +######## Installing: cppzmq ############################### diff --git a/tests/scripts/compile_dep.sh/0.9.6-dev/ngio.out b/tests/scripts/compile_dep.sh/0.9.6-dev/ngio.out new file mode 100644 index 0000000000000000000000000000000000000000..684ec20be2047d76869c4a639eced133fd58dfa2 --- /dev/null +++ b/tests/scripts/compile_dep.sh/0.9.6-dev/ngio.out @@ -0,0 +1,13 @@ +######## Installing: lz4 ############################### +######## Installing: zstd ############################### +######## Installing: capstone ############################### +######## Installing: json-c ############################### +######## Installing: psm2 ############################### +WARNING: Install script for 'psm2' not found. Skipping. +######## Installing: libfabric ############################### +######## Installing: mercury ############################### +######## Installing: argobots ############################### +######## Installing: margo ############################### +######## Installing: rocksdb ############################### +######## Installing: syscall_intercept ############################### +######## Installing: parallax ############################### diff --git a/tests/scripts/compile_dep.sh/0.9.6-dev/omnipath_psm2.out b/tests/scripts/compile_dep.sh/0.9.6-dev/omnipath_psm2.out new file mode 100644 index 0000000000000000000000000000000000000000..df3b379f830c9047d3f4eebd438e3ceb0cc6dec9 --- /dev/null +++ b/tests/scripts/compile_dep.sh/0.9.6-dev/omnipath_psm2.out @@ -0,0 +1,12 @@ +######## Installing: lz4 ############################### +######## Installing: zstd ############################### +######## Installing: capstone ############################### +######## Installing: json-c ############################### +######## Installing: psm2 ############################### +WARNING: Install script for 'psm2' not found. Skipping. +######## Installing: libfabric ############################### +######## Installing: mercury ############################### +######## Installing: argobots ############################### +######## Installing: margo ############################### +######## Installing: rocksdb ############################### +######## Installing: syscall_intercept ############################### diff --git a/tests/scripts/compile_dep.sh/0.9.6-dev/p9.out b/tests/scripts/compile_dep.sh/0.9.6-dev/p9.out new file mode 100644 index 0000000000000000000000000000000000000000..df3b379f830c9047d3f4eebd438e3ceb0cc6dec9 --- /dev/null +++ b/tests/scripts/compile_dep.sh/0.9.6-dev/p9.out @@ -0,0 +1,12 @@ +######## Installing: lz4 ############################### +######## Installing: zstd ############################### +######## Installing: capstone ############################### +######## Installing: json-c ############################### +######## Installing: psm2 ############################### +WARNING: Install script for 'psm2' not found. Skipping. +######## Installing: libfabric ############################### +######## Installing: mercury ############################### +######## Installing: argobots ############################### +######## Installing: margo ############################### +######## Installing: rocksdb ############################### +######## Installing: syscall_intercept ############################### diff --git a/tests/scripts/compile_dep.sh/latest b/tests/scripts/compile_dep.sh/latest index 03834411d1529ea7337b4a2c2a4a34b689d2a54e..7ed1cca5bae0f0e116f49fcfbe0a93a06bb95ca5 120000 --- a/tests/scripts/compile_dep.sh/latest +++ b/tests/scripts/compile_dep.sh/latest @@ -1 +1 @@ -0.9.5 \ No newline at end of file +0.9.6-dev \ No newline at end of file diff --git a/tests/scripts/dl_dep.sh/0.9.6-dev/agios.out b/tests/scripts/dl_dep.sh/0.9.6-dev/agios.out new file mode 100644 index 0000000000000000000000000000000000000000..82b6bbc55265d7fd05e214e19aad3557cadf5a4c --- /dev/null +++ b/tests/scripts/dl_dep.sh/0.9.6-dev/agios.out @@ -0,0 +1,12 @@ +Downloaded 'https://github.com/lz4/lz4/archive/v1.9.4.tar.gz' to 'lz4' +Downloaded 'https://github.com/facebook/zstd/archive/v1.5.7.tar.gz' to 'zstd' +Downloaded 'https://github.com/aquynh/capstone/archive/6.0.0-Alpha1.tar.gz' to 'capstone' +Downloaded 'https://github.com/json-c/json-c/archive/json-c-0.17-20230812.tar.gz' to 'json-c' +Cloned 'https://github.com/ofiwg/libfabric.git' to 'libfabric' with commit '[HEAD]' and flags '--branch=v2.2.0' +Cloned 'https://github.com/mercury-hpc/mercury' to 'mercury' with commit '[v2.4.1rc1]' and flags '--recurse-submodules' +Downloaded 'https://github.com/pmodels/argobots/archive/v1.2.tar.gz' to 'argobots' +Cloned 'https://github.com/mochi-hpc/mochi-margo' to 'margo' with commit '[v0.18.3]' and flags '' +Downloaded 'https://github.com/facebook/rocksdb/archive/v10.4.2.tar.gz' to 'rocksdb' +Cloned 'https://github.com/GekkoFS/syscall_intercept.git' to 'syscall_intercept' with commit '[d8b2a69961921ed123625c79a609331fc56a8931]' and flags '' +Cloned 'https://github.com/francielizanon/agios.git' to 'agios' with commit '[c26a6544200f823ebb8f890dd94e653d148bf226]' and flags '--branch=development' +Done diff --git a/tests/scripts/dl_dep.sh/0.9.6-dev/arm.out b/tests/scripts/dl_dep.sh/0.9.6-dev/arm.out new file mode 100644 index 0000000000000000000000000000000000000000..c62a85e98779bf02c631b1ce224eac908e1273f8 --- /dev/null +++ b/tests/scripts/dl_dep.sh/0.9.6-dev/arm.out @@ -0,0 +1,12 @@ +Downloaded 'https://github.com/lz4/lz4/archive/v1.9.4.tar.gz' to 'lz4' +Downloaded 'https://github.com/facebook/zstd/archive/v1.5.7.tar.gz' to 'zstd' +Downloaded 'https://github.com/aquynh/capstone/archive/6.0.0-Alpha1.tar.gz' to 'capstone' +Downloaded 'https://github.com/json-c/json-c/archive/json-c-0.17-20230812.tar.gz' to 'json-c' +Downloaded 'https://github.com/cornelisnetworks/opa-psm2/archive/PSM2_11.2.185.tar.gz' to 'psm2' +Cloned 'https://github.com/ofiwg/libfabric.git' to 'libfabric' with commit '[HEAD]' and flags '--branch=v2.2.0' +Cloned 'https://github.com/mercury-hpc/mercury' to 'mercury' with commit '[v2.4.1rc1]' and flags '--recurse-submodules' +Downloaded 'https://github.com/pmodels/argobots/archive/v1.2.tar.gz' to 'argobots' +Cloned 'https://github.com/mochi-hpc/mochi-margo' to 'margo' with commit '[v0.18.3]' and flags '' +Downloaded 'https://github.com/facebook/rocksdb/archive/v10.4.2.tar.gz' to 'rocksdb' +Cloned 'https://github.com/GekkoFS/syscall_intercept.git' to 'syscall_intercept' with commit '[34a41033fce94195700c5ab1e097f40741d7f016]' and flags '' +Done diff --git a/tests/scripts/dl_dep.sh/0.9.6-dev/ci.out b/tests/scripts/dl_dep.sh/0.9.6-dev/ci.out new file mode 100644 index 0000000000000000000000000000000000000000..c934934214b3e618ac3a563839b449c30c6dcd2c --- /dev/null +++ b/tests/scripts/dl_dep.sh/0.9.6-dev/ci.out @@ -0,0 +1,11 @@ +Cloned 'https://github.com/ofiwg/libfabric.git' to 'libfabric' with commit '[HEAD]' and flags '--branch=v2.2.0' +Cloned 'https://github.com/mercury-hpc/mercury' to 'mercury' with commit '[v2.4.1rc1]' and flags '--recurse-submodules' +Downloaded 'https://github.com/pmodels/argobots/archive/v1.2.tar.gz' to 'argobots' +Cloned 'https://github.com/mochi-hpc/mochi-margo' to 'margo' with commit '[v0.18.3]' and flags '' +Downloaded 'https://github.com/facebook/rocksdb/archive/v10.4.2.tar.gz' to 'rocksdb' +Cloned 'https://github.com/GekkoFS/syscall_intercept.git' to 'syscall_intercept' with commit '[2c8765fa292bc9c28a22624c528580d54658813d]' and flags '' +Applying patch '/builds/gitlab/hpc/gekkofs/scripts/patches/syscall_intercept.patch'... +Cloned 'https://github.com/francielizanon/agios.git' to 'agios' with commit '[c26a6544200f823ebb8f890dd94e653d148bf226]' and flags '--branch=development' +Cloned 'https://github.com/CARV-ICS-FORTH/parallax.git' to 'parallax' with commit '[ffdea6e820f5c4c2d33e60d9a4b15ef9e6bbcfdd]' and flags '' +Downloaded 'https://github.com/jupp0r/prometheus-cpp/releases/download/v1.0.0/prometheus-cpp-with-submodules.tar.gz' to 'prometheus-cpp' +Done diff --git a/tests/scripts/dl_dep.sh/0.9.6-dev/default.out b/tests/scripts/dl_dep.sh/0.9.6-dev/default.out new file mode 100644 index 0000000000000000000000000000000000000000..a5224522a51c33607ceb650c3b4f412f7c4b243e --- /dev/null +++ b/tests/scripts/dl_dep.sh/0.9.6-dev/default.out @@ -0,0 +1,11 @@ +Downloaded 'https://github.com/lz4/lz4/archive/v1.9.4.tar.gz' to 'lz4' +Downloaded 'https://github.com/facebook/zstd/archive/v1.5.7.tar.gz' to 'zstd' +Downloaded 'https://github.com/aquynh/capstone/archive/6.0.0-Alpha1.tar.gz' to 'capstone' +Downloaded 'https://github.com/json-c/json-c/archive/json-c-0.17-20230812.tar.gz' to 'json-c' +Cloned 'https://github.com/ofiwg/libfabric.git' to 'libfabric' with commit '[HEAD]' and flags '--branch=v2.2.0' +Cloned 'https://github.com/mercury-hpc/mercury' to 'mercury' with commit '[v2.4.1rc1]' and flags '--recurse-submodules' +Downloaded 'https://github.com/pmodels/argobots/archive/v1.2.tar.gz' to 'argobots' +Cloned 'https://github.com/mochi-hpc/mochi-margo' to 'margo' with commit '[v0.18.3]' and flags '' +Downloaded 'https://github.com/facebook/rocksdb/archive/v10.4.2.tar.gz' to 'rocksdb' +Cloned 'https://github.com/GekkoFS/syscall_intercept.git' to 'syscall_intercept' with commit '[d8b2a69961921ed123625c79a609331fc56a8931]' and flags '' +Done diff --git a/tests/scripts/dl_dep.sh/0.9.6-dev/default_zmq.out b/tests/scripts/dl_dep.sh/0.9.6-dev/default_zmq.out new file mode 100644 index 0000000000000000000000000000000000000000..ae2eb57175b5e5ffded6b1c660531e87f2c9db23 --- /dev/null +++ b/tests/scripts/dl_dep.sh/0.9.6-dev/default_zmq.out @@ -0,0 +1,13 @@ +Downloaded 'https://github.com/lz4/lz4/archive/v1.9.4.tar.gz' to 'lz4' +Downloaded 'https://github.com/facebook/zstd/archive/v1.5.7.tar.gz' to 'zstd' +Downloaded 'https://github.com/aquynh/capstone/archive/6.0.0-Alpha1.tar.gz' to 'capstone' +Downloaded 'https://github.com/json-c/json-c/archive/json-c-0.17-20230812.tar.gz' to 'json-c' +Cloned 'https://github.com/ofiwg/libfabric.git' to 'libfabric' with commit '[HEAD]' and flags '--branch=v2.2.0' +Cloned 'https://github.com/mercury-hpc/mercury' to 'mercury' with commit '[v2.4.1rc1]' and flags '--recurse-submodules' +Downloaded 'https://github.com/pmodels/argobots/archive/v1.2.tar.gz' to 'argobots' +Cloned 'https://github.com/mochi-hpc/mochi-margo' to 'margo' with commit '[v0.18.3]' and flags '' +Downloaded 'https://github.com/facebook/rocksdb/archive/v10.4.2.tar.gz' to 'rocksdb' +Cloned 'https://github.com/GekkoFS/syscall_intercept.git' to 'syscall_intercept' with commit '[d8b2a69961921ed123625c79a609331fc56a8931]' and flags '' +Downloaded 'https://github.com/zeromq/libzmq/archive/v4.3.5.tar.gz' to 'libzmq' +Downloaded 'https://github.com/zeromq/cppzmq/archive/v4.10.0.tar.gz' to 'cppzmq' +Done diff --git a/tests/scripts/dl_dep.sh/0.9.6-dev/infiniband_verbs.out b/tests/scripts/dl_dep.sh/0.9.6-dev/infiniband_verbs.out new file mode 100644 index 0000000000000000000000000000000000000000..a5224522a51c33607ceb650c3b4f412f7c4b243e --- /dev/null +++ b/tests/scripts/dl_dep.sh/0.9.6-dev/infiniband_verbs.out @@ -0,0 +1,11 @@ +Downloaded 'https://github.com/lz4/lz4/archive/v1.9.4.tar.gz' to 'lz4' +Downloaded 'https://github.com/facebook/zstd/archive/v1.5.7.tar.gz' to 'zstd' +Downloaded 'https://github.com/aquynh/capstone/archive/6.0.0-Alpha1.tar.gz' to 'capstone' +Downloaded 'https://github.com/json-c/json-c/archive/json-c-0.17-20230812.tar.gz' to 'json-c' +Cloned 'https://github.com/ofiwg/libfabric.git' to 'libfabric' with commit '[HEAD]' and flags '--branch=v2.2.0' +Cloned 'https://github.com/mercury-hpc/mercury' to 'mercury' with commit '[v2.4.1rc1]' and flags '--recurse-submodules' +Downloaded 'https://github.com/pmodels/argobots/archive/v1.2.tar.gz' to 'argobots' +Cloned 'https://github.com/mochi-hpc/mochi-margo' to 'margo' with commit '[v0.18.3]' and flags '' +Downloaded 'https://github.com/facebook/rocksdb/archive/v10.4.2.tar.gz' to 'rocksdb' +Cloned 'https://github.com/GekkoFS/syscall_intercept.git' to 'syscall_intercept' with commit '[d8b2a69961921ed123625c79a609331fc56a8931]' and flags '' +Done diff --git a/tests/scripts/dl_dep.sh/0.9.6-dev/marenostrum4.out b/tests/scripts/dl_dep.sh/0.9.6-dev/marenostrum4.out new file mode 100644 index 0000000000000000000000000000000000000000..2e0403e4531a7fa7d10b646283830ef9ad408ae8 --- /dev/null +++ b/tests/scripts/dl_dep.sh/0.9.6-dev/marenostrum4.out @@ -0,0 +1,14 @@ +Downloaded 'https://github.com/lz4/lz4/archive/v1.9.4.tar.gz' to 'lz4' +Downloaded 'https://github.com/facebook/zstd/archive/v1.5.7.tar.gz' to 'zstd' +Downloaded 'https://github.com/aquynh/capstone/archive/6.0.0-Alpha1.tar.gz' to 'capstone' +Downloaded 'https://github.com/json-c/json-c/archive/json-c-0.17-20230812.tar.gz' to 'json-c' +Downloaded 'https://github.com/cornelisnetworks/opa-psm2/archive/PSM2_11.2.185.tar.gz' to 'psm2' +Cloned 'https://github.com/ofiwg/libfabric.git' to 'libfabric' with commit '[HEAD]' and flags '--branch=v2.2.0' +Cloned 'https://github.com/mercury-hpc/mercury' to 'mercury' with commit '[v2.4.1rc1]' and flags '--recurse-submodules' +Downloaded 'https://github.com/pmodels/argobots/archive/v1.2.tar.gz' to 'argobots' +Cloned 'https://github.com/mochi-hpc/mochi-margo' to 'margo' with commit '[v0.18.3]' and flags '' +Downloaded 'https://github.com/facebook/rocksdb/archive/v10.4.2.tar.gz' to 'rocksdb' +Cloned 'https://github.com/GekkoFS/syscall_intercept.git' to 'syscall_intercept' with commit '[d8b2a69961921ed123625c79a609331fc56a8931]' and flags '' +Cloned 'https://github.com/HowardHinnant/date.git' to 'date' with commit '[e7e1482087f58913b80a20b04d5c58d9d6d90155]' and flags '' +Cloned 'https://github.com/CARV-ICS-FORTH/parallax.git' to 'parallax' with commit '[c130decd7a71c60c20b98d6a23924f05f754c3cd]' and flags '' +Done diff --git a/tests/scripts/dl_dep.sh/0.9.6-dev/mogon2.out b/tests/scripts/dl_dep.sh/0.9.6-dev/mogon2.out new file mode 100644 index 0000000000000000000000000000000000000000..c29f60c4616949a5718df8ea91c9b2840064ca26 --- /dev/null +++ b/tests/scripts/dl_dep.sh/0.9.6-dev/mogon2.out @@ -0,0 +1,13 @@ +Downloaded 'https://github.com/lz4/lz4/archive/v1.9.4.tar.gz' to 'lz4' +Downloaded 'https://github.com/facebook/zstd/archive/v1.5.7.tar.gz' to 'zstd' +Downloaded 'https://github.com/aquynh/capstone/archive/6.0.0-Alpha1.tar.gz' to 'capstone' +Downloaded 'https://github.com/json-c/json-c/archive/json-c-0.17-20230812.tar.gz' to 'json-c' +Cloned 'https://github.com/ofiwg/libfabric.git' to 'libfabric' with commit '[HEAD]' and flags '--branch=v2.2.0' +Cloned 'https://github.com/mercury-hpc/mercury' to 'mercury' with commit '[v2.4.1rc1]' and flags '--recurse-submodules' +Downloaded 'https://github.com/pmodels/argobots/archive/v1.1.tar.gz' to 'argobots' +Cloned 'https://github.com/mochi-hpc/mochi-margo' to 'margo' with commit '[v0.15.0]' and flags '' +Downloaded 'https://github.com/facebook/rocksdb/archive/v10.4.2.tar.gz' to 'rocksdb' +Cloned 'https://github.com/GekkoFS/syscall_intercept.git' to 'syscall_intercept' with commit '[d8b2a69961921ed123625c79a609331fc56a8931]' and flags '' +Cloned 'https://github.com/HowardHinnant/date.git' to 'date' with commit '[e7e1482087f58913b80a20b04d5c58d9d6d90155]' and flags '' +Downloaded 'https://github.com/cornelisnetworks/opa-psm2/archive/PSM2_11.2.185.tar.gz' to 'psm2' +Done diff --git a/tests/scripts/dl_dep.sh/0.9.6-dev/mogon3.out b/tests/scripts/dl_dep.sh/0.9.6-dev/mogon3.out new file mode 100644 index 0000000000000000000000000000000000000000..65cd164e655ff3739244d2246ea8f5df83e1c278 --- /dev/null +++ b/tests/scripts/dl_dep.sh/0.9.6-dev/mogon3.out @@ -0,0 +1,12 @@ +Downloaded 'https://github.com/lz4/lz4/archive/v1.9.4.tar.gz' to 'lz4' +Downloaded 'https://github.com/facebook/zstd/archive/v1.5.7.tar.gz' to 'zstd' +Downloaded 'https://github.com/aquynh/capstone/archive/6.0.0-Alpha1.tar.gz' to 'capstone' +Downloaded 'https://github.com/json-c/json-c/archive/json-c-0.17-20230812.tar.gz' to 'json-c' +Cloned 'https://github.com/mercury-hpc/mercury' to 'mercury' with commit '[v2.4.1rc1]' and flags '--recurse-submodules' +Downloaded 'https://github.com/pmodels/argobots/archive/v1.2.tar.gz' to 'argobots' +Cloned 'https://github.com/mochi-hpc/mochi-margo' to 'margo' with commit '[v0.18.3]' and flags '' +Downloaded 'https://github.com/facebook/rocksdb/archive/v10.4.2.tar.gz' to 'rocksdb' +Cloned 'https://github.com/GekkoFS/syscall_intercept.git' to 'syscall_intercept' with commit '[d8b2a69961921ed123625c79a609331fc56a8931]' and flags '' +Downloaded 'https://github.com/zeromq/libzmq/archive/v4.3.5.tar.gz' to 'libzmq' +Downloaded 'https://github.com/zeromq/cppzmq/archive/v4.10.0.tar.gz' to 'cppzmq' +Done diff --git a/tests/scripts/dl_dep.sh/0.9.6-dev/ngio.out b/tests/scripts/dl_dep.sh/0.9.6-dev/ngio.out new file mode 100644 index 0000000000000000000000000000000000000000..2e0403e4531a7fa7d10b646283830ef9ad408ae8 --- /dev/null +++ b/tests/scripts/dl_dep.sh/0.9.6-dev/ngio.out @@ -0,0 +1,14 @@ +Downloaded 'https://github.com/lz4/lz4/archive/v1.9.4.tar.gz' to 'lz4' +Downloaded 'https://github.com/facebook/zstd/archive/v1.5.7.tar.gz' to 'zstd' +Downloaded 'https://github.com/aquynh/capstone/archive/6.0.0-Alpha1.tar.gz' to 'capstone' +Downloaded 'https://github.com/json-c/json-c/archive/json-c-0.17-20230812.tar.gz' to 'json-c' +Downloaded 'https://github.com/cornelisnetworks/opa-psm2/archive/PSM2_11.2.185.tar.gz' to 'psm2' +Cloned 'https://github.com/ofiwg/libfabric.git' to 'libfabric' with commit '[HEAD]' and flags '--branch=v2.2.0' +Cloned 'https://github.com/mercury-hpc/mercury' to 'mercury' with commit '[v2.4.1rc1]' and flags '--recurse-submodules' +Downloaded 'https://github.com/pmodels/argobots/archive/v1.2.tar.gz' to 'argobots' +Cloned 'https://github.com/mochi-hpc/mochi-margo' to 'margo' with commit '[v0.18.3]' and flags '' +Downloaded 'https://github.com/facebook/rocksdb/archive/v10.4.2.tar.gz' to 'rocksdb' +Cloned 'https://github.com/GekkoFS/syscall_intercept.git' to 'syscall_intercept' with commit '[d8b2a69961921ed123625c79a609331fc56a8931]' and flags '' +Cloned 'https://github.com/HowardHinnant/date.git' to 'date' with commit '[e7e1482087f58913b80a20b04d5c58d9d6d90155]' and flags '' +Cloned 'https://github.com/CARV-ICS-FORTH/parallax.git' to 'parallax' with commit '[c130decd7a71c60c20b98d6a23924f05f754c3cd]' and flags '' +Done diff --git a/tests/scripts/dl_dep.sh/0.9.6-dev/omnipath_psm2.out b/tests/scripts/dl_dep.sh/0.9.6-dev/omnipath_psm2.out new file mode 100644 index 0000000000000000000000000000000000000000..1cce65f8ae324f0ccada055008eb6b736fec1d82 --- /dev/null +++ b/tests/scripts/dl_dep.sh/0.9.6-dev/omnipath_psm2.out @@ -0,0 +1,13 @@ +Downloaded 'https://github.com/lz4/lz4/archive/v1.9.4.tar.gz' to 'lz4' +Downloaded 'https://github.com/facebook/zstd/archive/v1.5.7.tar.gz' to 'zstd' +Downloaded 'https://github.com/aquynh/capstone/archive/6.0.0-Alpha1.tar.gz' to 'capstone' +Downloaded 'https://github.com/json-c/json-c/archive/json-c-0.17-20230812.tar.gz' to 'json-c' +Downloaded 'https://github.com/cornelisnetworks/opa-psm2/archive/PSM2_11.2.185.tar.gz' to 'psm2' +Cloned 'https://github.com/ofiwg/libfabric.git' to 'libfabric' with commit '[HEAD]' and flags '--branch=v2.2.0' +Cloned 'https://github.com/mercury-hpc/mercury' to 'mercury' with commit '[v2.4.1rc1]' and flags '--recurse-submodules' +Downloaded 'https://github.com/pmodels/argobots/archive/v1.2.tar.gz' to 'argobots' +Cloned 'https://github.com/mochi-hpc/mochi-margo' to 'margo' with commit '[v0.15.0]' and flags '' +Downloaded 'https://github.com/facebook/rocksdb/archive/v10.4.2.tar.gz' to 'rocksdb' +Cloned 'https://github.com/GekkoFS/syscall_intercept.git' to 'syscall_intercept' with commit '[d8b2a69961921ed123625c79a609331fc56a8931]' and flags '' +Cloned 'https://github.com/HowardHinnant/date.git' to 'date' with commit '[e7e1482087f58913b80a20b04d5c58d9d6d90155]' and flags '' +Done diff --git a/tests/scripts/dl_dep.sh/0.9.6-dev/p9.out b/tests/scripts/dl_dep.sh/0.9.6-dev/p9.out new file mode 100644 index 0000000000000000000000000000000000000000..7a14a688aab95b3073da5ef942dd2f17d89d8ad5 --- /dev/null +++ b/tests/scripts/dl_dep.sh/0.9.6-dev/p9.out @@ -0,0 +1,14 @@ +Downloaded 'https://github.com/lz4/lz4/archive/v1.9.4.tar.gz' to 'lz4' +Downloaded 'https://github.com/facebook/zstd/archive/v1.5.7.tar.gz' to 'zstd' +Downloaded 'https://github.com/aquynh/capstone/archive/6.0.0-Alpha1.tar.gz' to 'capstone' +Downloaded 'https://github.com/json-c/json-c/archive/json-c-0.17-20230812.tar.gz' to 'json-c' +Downloaded 'https://github.com/cornelisnetworks/opa-psm2/archive/PSM2_11.2.185.tar.gz' to 'psm2' +Cloned 'https://github.com/ofiwg/libfabric.git' to 'libfabric' with commit '[HEAD]' and flags '--branch=v1.20.1' +Cloned 'https://github.com/mercury-hpc/mercury' to 'mercury' with commit '[v2.4.0]' and flags '--recurse-submodules' +Downloaded 'https://github.com/pmodels/argobots/archive/v1.2.tar.gz' to 'argobots' +Cloned 'https://github.com/mochi-hpc/mochi-margo' to 'margo' with commit '[v0.18.3]' and flags '' +Downloaded 'https://github.com/facebook/rocksdb/archive/v8.10.0.tar.gz' to 'rocksdb' +Cloned 'https://github.com/GekkoFS/syscall_intercept.git' to 'syscall_intercept' with commit '[6eb27a9d2053bb2ac3bb9ce30e13b64ce055c19f]' and flags '' +Applying patch '/builds/gitlab/hpc/gekkofs/scripts/patches/syscall_intercept.patch'... +Cloned 'https://github.com/HowardHinnant/date.git' to 'date' with commit '[e7e1482087f58913b80a20b04d5c58d9d6d90155]' and flags '' +Done diff --git a/tests/scripts/dl_dep.sh/0.9.6-dev/riscv.out b/tests/scripts/dl_dep.sh/0.9.6-dev/riscv.out new file mode 100644 index 0000000000000000000000000000000000000000..d2d1052e961cff6121d29c0289ecc8f9738e8db3 --- /dev/null +++ b/tests/scripts/dl_dep.sh/0.9.6-dev/riscv.out @@ -0,0 +1,11 @@ +Downloaded 'https://github.com/lz4/lz4/archive/v1.9.4.tar.gz' to 'lz4' +Downloaded 'https://github.com/facebook/zstd/archive/v1.5.7.tar.gz' to 'zstd' +Downloaded 'https://github.com/aquynh/capstone/archive/6.0.0-Alpha2.tar.gz' to 'capstone' +Downloaded 'https://github.com/json-c/json-c/archive/json-c-0.17-20230812.tar.gz' to 'json-c' +Cloned 'https://github.com/ofiwg/libfabric.git' to 'libfabric' with commit '[HEAD]' and flags '--branch=v1.20.1' +Cloned 'https://github.com/mercury-hpc/mercury' to 'mercury' with commit '[v2.4.0]' and flags '--recurse-submodules' +Downloaded 'https://github.com/pmodels/argobots/archive/v1.2.tar.gz' to 'argobots' +Cloned 'https://github.com/mochi-hpc/mochi-margo' to 'margo' with commit '[v0.18.3]' and flags '' +Downloaded 'https://github.com/facebook/rocksdb/archive/v8.10.0.tar.gz' to 'rocksdb' +Cloned 'https://github.com/GekkoFS/syscall_intercept.git' to 'syscall_intercept' with commit '[2a25b21926ab115d667e135389458a7a159e8bb1]' and flags '' +Done diff --git a/tests/scripts/dl_dep.sh/latest b/tests/scripts/dl_dep.sh/latest index 03834411d1529ea7337b4a2c2a4a34b689d2a54e..7ed1cca5bae0f0e116f49fcfbe0a93a06bb95ca5 120000 --- a/tests/scripts/dl_dep.sh/latest +++ b/tests/scripts/dl_dep.sh/latest @@ -1 +1 @@ -0.9.5 \ No newline at end of file +0.9.6-dev \ No newline at end of file