.. _program_listing_file_include_config.hpp: Program Listing for File config.hpp =================================== |exhale_lsh| :ref:`Return to documentation for file ` (``include/config.hpp``) .. |exhale_lsh| unicode:: U+021B0 .. UPWARDS ARROW WITH TIP LEFTWARDS .. code-block:: cpp /* Copyright 2018-2025, Barcelona Supercomputing Center (BSC), Spain Copyright 2015-2025, Johannes Gutenberg Universitaet Mainz, Germany This software was partially supported by the EC H2020 funded project NEXTGenIO (Project ID: 671951, www.nextgenio.eu). This software was partially supported by the ADA-FS project under the SPPEXA project funded by the DFG. This software was partially supported by the the European Union’s Horizon 2020 JTI-EuroHPC research and innovation programme, by the project ADMIRE (Project ID: 956748, admire-eurohpc.eu) This project was partially promoted by the Ministry for Digital Transformation and the Civil Service, within the framework of the Recovery, Transformation and Resilience Plan - Funded by the European Union -NextGenerationEU. This file is part of GekkoFS. GekkoFS is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. GekkoFS is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GekkoFS. If not, see . SPDX-License-Identifier: GPL-3.0-or-later */ #ifndef GEKKOFS_CONFIG_HPP #define GEKKOFS_CONFIG_HPP #include // environment prefixes (are concatenated in env module at compile time) #define CLIENT_ENV_PREFIX "LIBGKFS_" #define DAEMON_ENV_PREFIX "GKFS_DAEMON_" #define COMMON_ENV_PREFIX "GKFS_" #define PROXY_ENV_PREFIX "GKFS_PROXY_" namespace gkfs::config { // writes to dev null instead of chunk space, read is reading /dev/zero constexpr bool limbo_mode = false; constexpr auto hostfile_path = "./gkfs_hosts.txt"; // We do not default this, ENV variable always required. constexpr auto forwarding_file_path = ""; namespace cache { // Optimization for readdir which avoids consecutive stat calls constexpr bool use_dentry_cache = false; // When enabled, the dentry cache is cleared when a directory is closed. // Disabling this may cause semantic issues. constexpr bool clear_dentry_cache_on_close = true; // When enabled, write operations no longer update the file size on each write. // Instead, the size is updated every `write_size_flush_threshold` writes per // file. fsync/close flushes the size to the server immediately. constexpr bool use_write_size_cache = false; constexpr auto write_size_flush_threshold = 1000; } // namespace cache namespace client_metrics { // Default directory where client metrics are stored. Can be set via // LIBGKFS_METRICS_PATH. Filename consists of starting time, pid, and hostname // Note: when LIBGKFS_METRICS_IP is given, ZeroMQ is used instead constexpr auto flush_path = "/tmp/gkfs_client_metrics"; constexpr auto flush_interval = 5; // in seconds } // namespace client_metrics namespace io { /* * Zero buffer before read. This is relevant if sparse files are used. * If buffer is not zeroed, sparse regions contain invalid data. */ constexpr auto zero_buffer_before_read = false; /* * When the daemon handler serves a read request, it starts tasklets (for each * chunk) from the io pool to read all chunks of that read request in parallel. * Then another thread is waiting for the first tasklet to finish before * initiating the bulk transfer back to the client for this chunk. * This will continue in sequence, allowing gaps between bulk transfers while * waiting. Although this is CPU efficient, it does not provide the highest I/O. * if spin_lock_read is enabled it will try all tasklets if they are finished * regardless of their order minimizing the gap between bulk transfers. * Due to spinning in a loop this increases CPU utilization */ constexpr auto spin_lock_read = false; } // namespace io namespace log { constexpr auto client_log_path = "/tmp/gkfs_client.log"; constexpr auto daemon_log_path = "/tmp/gkfs_daemon.log"; constexpr auto proxy_log_path = "/tmp/gkfs_proxy.log"; constexpr auto client_log_level = "info,errors,critical,hermes"; constexpr auto daemon_log_level = 4; // info constexpr auto proxy_log_level = 4; // info } // namespace log namespace metadata { // directory name where the metadata db instance is placed constexpr auto dir = "metadata"; // which metadata should be considered apart from size and mode // Blocks are used to store the rename status (-1 is a renamed file) constexpr auto use_atime = false; constexpr auto use_ctime = false; constexpr auto use_mtime = false; constexpr auto use_link_cnt = false; #ifdef HAS_RENAME constexpr auto use_blocks = true; #else constexpr auto use_blocks = false; #endif // HAS_RENAME /* * If true, all chunks on the same host are removed during a metadata remove * rpc. This is a technical optimization that reduces the number of RPCs for * remove operations. This setting could be useful for future asynchronous * remove implementations where the data should not be removed immediately. */ constexpr auto implicit_data_removal = true; // metadata logic // Check for existence of file metadata before create. This done on RocksDB // level constexpr auto create_exist_check = true; } // namespace metadata namespace data { // directory name below rootdir where chunks are placed constexpr auto chunk_dir = "chunks"; } // namespace data namespace proxy { constexpr auto pid_path = "/tmp/gkfs_proxy.pid"; constexpr auto fwd_create = true; constexpr auto fwd_stat = true; constexpr auto fwd_remove = true; constexpr auto fwd_get_size = true; constexpr auto fwd_update_size = true; constexpr auto fwd_io = true; constexpr auto fwd_truncate = true; constexpr auto fwd_chunk_stat = true; constexpr auto fwd_get_dirents_single = true; // Only use proxy for io if write/read size is higher than set value constexpr auto fwd_io_count_threshold = 0; } // namespace proxy namespace rpc { constexpr auto chunksize = 524288; // in bytes (e.g., 524288 == 512KB) // size of preallocated buffer to hold directory entries in rpc call constexpr auto dirents_buff_size = (8 * 1024 * 1024); // 8 mega constexpr auto dirents_buff_size_proxy = (128 * 1024 * 1024); // 8 mega /* * Indicates the number of concurrent progress to drive I/O operations of chunk * files to and from local file systems The value is directly mapped to created * Argobots xstreams, controlled in a single pool with ABT_snoozer scheduler */ constexpr auto daemon_io_xstreams = 8; // Number of threads used for RPC handlers at the daemon constexpr auto daemon_handler_xstreams = 4; // Number of threads used for RPC handlers at the proxy constexpr auto proxy_handler_xstreams = 3; } // namespace rpc namespace rocksdb { // Write-ahead logging of rocksdb constexpr auto use_write_ahead_log = false; } // namespace rocksdb namespace stats { constexpr auto max_stats = 1000000; constexpr auto prometheus_gateway = "127.0.0.1:9091"; } // namespace stats } // namespace gkfs::config #endif // GEKKOFS_CONFIG_HPP