diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index b5a077a342dac3a7572f798af64e19dbc1d7b116..eeca86f3fa79284e1c29b9f8f39cb09de345afb7 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -61,6 +61,8 @@ gkfs: -DGKFS_USE_GUIDED_DISTRIBUTION:BOOL=ON -DGKFS_ENABLE_PARALLAX:BOOL=ON -DGKFS_ENABLE_ROCKSDB:BOOL=ON + -DGKFS_CHUNK_STATS:BOOL=ON + -DGKFS_ENABLE_PROMETHEUS:BOOL=ON ${CI_PROJECT_DIR} - make -j$(nproc) install # reduce artifacts size diff --git a/CHANGELOG.md b/CHANGELOG.md index 05f997a69f40ace3a5131f9fb19824c51cd2e63a..350874562702df039547719fe292d5e02dd41288 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,8 +7,19 @@ to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). ## [Unreleased] + ### New +- Added statistics gathering on daemons ([!132](https://storage.bsc.es/gitlab/hpc/gekkofs/-/merge_requests/132)). + - Stats output can be enabled with: + - `--enable-collection` collects normal statistics. + - `--enable-chunkstats` collects extended chunk statistics. +- Statistics output to file is controlled by `--output-stats ` +- Added Prometheus support for outputting + statistics ([!132](https://storage.bsc.es/gitlab/hpc/gekkofs/-/merge_requests/132)): + - Prometheus dependency optional and enabled at compile time with the CMake argument `GKFS_ENABLE_PROMETHEUS`. + - `--enable-prometheus` enables statistics pushing to Prometheus if statistics are enabled. + - `--prometheus-gateway` sets an IP and port for the Prometheus connection. - Added new experimental metadata backend: Parallax ([!110](https://storage.bsc.es/gitlab/hpc/gekkofs/-/merge_requests/110)). - Added support to use multiple metadata backends. diff --git a/CMakeLists.txt b/CMakeLists.txt index 7589df5191bf2878d9c6db29e98e3f3e5b85cdae..6653a8298a322f9eb812992172e24ae6fc1c60cb 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -195,6 +195,12 @@ if(GKFS_USE_GUIDED_DISTRIBUTION) message(STATUS "[gekkofs] Guided data distributor input file path: ${GKFS_USE_GUIDED_DISTRIBUTION_PATH}") endif() +option(GKFS_ENABLE_PROMETHEUS "Enable Prometheus Push " OFF) +if(GKFS_ENABLE_PROMETHEUS) + add_definitions(-DGKFS_ENABLE_PROMETHEUS) +endif () +message(STATUS "[gekkofs] Prometheus Output: ${GKFS_ENABLE_PROMETHEUS}") + configure_file(include/common/cmake_configure.hpp.in include/common/cmake_configure.hpp) diff --git a/README.md b/README.md index 24857eba041ce9a1ac8c2a19f756208f2b5f1c10..5aceb1016efd9fee08586b3440269a79fbb2c5f3 100644 --- a/README.md +++ b/README.md @@ -109,6 +109,11 @@ Options: RocksDB is default if not set. Parallax support is experimental. Note, parallaxdb creates a file called rocksdbx with 8GB created in metadir. --parallaxsize TEXT parallaxdb - metadata file size in GB (default 8GB), used only with new files + --enable-collection Enables collection of general statistics. Output requires either the --output-stats or --enable-prometheus argument. + --enable-chunkstats Enables collection of data chunk statistics in I/O operations.Output requires either the --output-stats or --enable-prometheus argument. + --output-stats TEXT Creates a thread that outputs the server stats each 10s to the specified file. + --enable-prometheus Enables prometheus output and a corresponding thread. + --prometheus-gateway TEXT Defines the prometheus gateway (Default 127.0.0.1:9091). --version Print version and exit. ``` @@ -231,19 +236,30 @@ Then, the `examples/distributors/guided/generate.py` scrpt is used to create the Finally, modify `guided_config.txt` to your distribution requirements. ### Metadata Backends -There are two different metadata backends in GekkoFS. The default one uses `rocksdb`, however an alternative based on `PARALLAX` from `FORTH` -is available. -To enable it use the `-DGKFS_ENABLE_PARALLAX:BOOL=ON` option, you can also disable `rocksdb` with `-DGKFS_ENABLE_ROCKSDB:BOOL=OFF`. + +There are two different metadata backends in GekkoFS. The default one uses `rocksdb`, however an alternative based +on `PARALLAX` from `FORTH` +is available. To enable it use the `-DGKFS_ENABLE_PARALLAX:BOOL=ON` option, you can also disable `rocksdb` +with `-DGKFS_ENABLE_ROCKSDB:BOOL=OFF`. Once it is enabled, `--dbbackend` option will be functional. +### Statistics + +GekkoFS daemons are able to output general operations (`--enable-collection`) and data chunk +statistics (`--enable-chunkstats`) to a specified output file via `--output-stats `. Prometheus can also be used +instead or in addition to the output file. It must be enabled at compile time via the CMake +argument `-DGKFS_ENABLE_PROMETHEUS` and the daemon argument `--enable-prometheus`. The corresponding statistics are then +pushed to the Prometheus instance. ### Acknowledgment This software was partially supported by the EC H2020 funded NEXTGenIO project (Project ID: 671951, www.nextgenio.eu). -This software was partially supported by the ADA-FS project under the SPPEXA project (http://www.sppexa.de/) funded by the DFG. +This software was partially supported by the ADA-FS project under the SPPEXA project (http://www.sppexa.de/) funded by +the DFG. This software is partially supported by the FIDIUM project funded by the DFG. -This software is partially supported by the ADMIRE project (https://www.admire-eurohpc.eu/) funded by the European Union’s Horizon 2020 JTI-EuroHPC Research and Innovation Programme (Grant 956748). +This software is partially supported by the ADMIRE project (https://www.admire-eurohpc.eu/) funded by the European +Union’s Horizon 2020 JTI-EuroHPC Research and Innovation Programme (Grant 956748). diff --git a/docker/0.9.1/deps/Dockerfile b/docker/0.9.1/deps/Dockerfile index fe81b563b4d2beca8272b4ea4803666617ea6bff..de4395387137d847e78d814075e1b2f7d1186eeb 100644 --- a/docker/0.9.1/deps/Dockerfile +++ b/docker/0.9.1/deps/Dockerfile @@ -21,7 +21,7 @@ RUN apt-get update && \ python3-dev \ python3-venv \ python3-setuptools \ - libnuma-dev libyaml-dev \ + libnuma-dev libyaml-dev libcurl4-openssl-dev \ procps && \ python3 -m pip install --upgrade pip && \ rm -rf /var/lib/apt/lists/* && \ diff --git a/docs/sphinx/users/running.md b/docs/sphinx/users/running.md index 4750c151cd839d473e6b08cdfee4d7348cfcd4dc..7b8e167906c1a3391fa1b71ed7973ec5fcb0dc58 100644 --- a/docs/sphinx/users/running.md +++ b/docs/sphinx/users/running.md @@ -79,6 +79,11 @@ Options: RocksDB is default if not set. Parallax support is experimental. Note, parallaxdb creates a file called rocksdbx with 8GB created in metadir. --parallaxsize TEXT parallaxdb - metadata file size in GB (default 8GB), used only with new files + --enable-collection Enables collection of general statistics. Output requires either the --output-stats or --enable-prometheus argument. + --enable-chunkstats Enables collection of data chunk statistics in I/O operations.Output requires either the --output-stats or --enable-prometheus argument. + --output-stats TEXT Creates a thread that outputs the server stats each 10s to the specified file. + --enable-prometheus Enables prometheus output and a corresponding thread. + --prometheus-gateway TEXT Defines the prometheus gateway (Default 127.0.0.1:9091). --version Print version and exit. ```` diff --git a/include/common/statistics/stats.hpp b/include/common/statistics/stats.hpp new file mode 100644 index 0000000000000000000000000000000000000000..4d65cdb4750dff4eb70ea770284d1c9a436f6548 --- /dev/null +++ b/include/common/statistics/stats.hpp @@ -0,0 +1,301 @@ +/* + Copyright 2018-2022, Barcelona Supercomputing Center (BSC), Spain + Copyright 2015-2022, Johannes Gutenberg Universitaet Mainz, Germany + + This software was partially supported by the + EC H2020 funded project NEXTGenIO (Project ID: 671951, www.nextgenio.eu). + + This software was partially supported by the + ADA-FS project under the SPPEXA project funded by the DFG. + + This file is part of GekkoFS. + + GekkoFS is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + GekkoFS is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with GekkoFS. If not, see . + + SPDX-License-Identifier: GPL-3.0-or-later +*/ + +#ifndef GKFS_COMMON_STATS_HPP +#define GKFS_COMMON_STATS_HPP + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +// PROMETHEUS includes +#ifdef GKFS_ENABLE_PROMETHEUS +#include +#include +#include +#include +#include + +using namespace prometheus; +#endif + + +/** + * Provides storage capabilities to provide stats about GekkoFS + * The information is per server. + * We do not provide accurate stats for 1-5-10 minute stats + * + */ +namespace gkfs::utils { + +/** + * + * Number of operations (Create, write/ read, remove, mkdir...) + * Size of database (metadata keys, should be not needed, any) + * Size of data (+write - delete) + * Server Bandwidth (write / read operations) + * + * mean, (lifetime of the server) + * 1 minute mean + * 5 minute mean + * 10 minute mean + * + * To provide the stats that we need, + * we need to store the info and the timestamp to calculate it + * A vector should work, with a maximum of elements, + */ + +class Stats { +public: + enum class IopsOp { + iops_create, + iops_write, + iops_read, + iops_stats, + iops_dirent, + iops_remove, + }; ///< enum storing IOPS Stats + + enum class SizeOp { write_size, read_size }; ///< enum storing Size Stats + +private: + constexpr static const std::initializer_list all_IopsOp = { + IopsOp::iops_create, IopsOp::iops_write, + IopsOp::iops_read, IopsOp::iops_stats, + IopsOp::iops_dirent, IopsOp::iops_remove}; ///< Enum IOPS iterator + + constexpr static const std::initializer_list all_SizeOp = { + SizeOp::write_size, SizeOp::read_size}; ///< Enum SIZE iterator + + const std::vector IopsOp_s = { + "IOPS_CREATE", "IOPS_WRITE", "IOPS_READ", + "IOPS_STATS", "IOPS_DIRENTS", "IOPS_REMOVE"}; ///< Stats Labels + const std::vector SizeOp_s = {"WRITE_SIZE", + "READ_SIZE"}; ///< Stats Labels + + std::chrono::time_point + start; ///< When we started the server + + + std::map> + iops_mean; ///< Stores total value for global mean + std::map> + size_mean; ///< Stores total value for global mean + + std::mutex time_iops_mutex; + std::mutex size_iops_mutex; + + std::map>> + time_iops; ///< Stores timestamp when an operation comes removes if + ///< first operation if > 10 minutes Different means will + ///< be stored and cached 1 minuted + + + std::map, + unsigned long long>>> + time_size; ///< For size operations we need to store the timestamp + ///< and the size + + + std::thread t_output; ///< Thread that outputs stats info + bool output_thread_; ///< Enables or disables the output thread + bool enable_prometheus_; ///< Enables or disables the prometheus output + bool enable_chunkstats_; ///< Enables or disables the chunk stats output + + + bool running = + true; ///< Controls the destruction of the class/stops the thread + /** + * @brief Sends all the stats to the screen + * Debug Function + * + * @param d is the time between output + * @param file_output is the output file + */ + void + output(std::chrono::seconds d, std::string file_output); + + std::map, + std::atomic> + chunk_reads; ///< Stores the number of times a chunk/file is read + std::map, + std::atomic> + chunk_writes; ///< Stores the number of times a chunk/file is write + + /** + * @brief Called by output to generate CHUNK map + * + * @param output is the output stream + */ + void + output_map(std::ofstream& output); + + + /** + * @brief Dumps all the means from the stats + * @param of Output stream + */ + void + dump(std::ofstream& of); + + +// Prometheus Push structs +#ifdef GKFS_ENABLE_PROMETHEUS + std::shared_ptr gateway; ///< Prometheus Gateway + std::shared_ptr registry; ///< Prometheus Counters Registry + Family* family_counter; ///< Prometheus IOPS counter (managed by + ///< Prometheus cpp) + Family* family_summary; ///< Prometheus SIZE counter (managed by + ///< Prometheus cpp) + std::map iops_prometheus; ///< Prometheus IOPS metrics + std::map size_prometheus; ///< Prometheus SIZE metrics +#endif + +public: + /** + * @brief Starts the Stats module and initializes structures + * @param enable_chunkstats Enables or disables the chunk stats + * @param enable_prometheus Enables or disables the prometheus output + * @param filename file where to write the output + * @param prometheus_gateway ip:port to expose the metrics + */ + Stats(bool enable_chunkstats, bool enable_prometheus, + const std::string& filename, const std::string& prometheus_gateway); + + /** + * @brief Destroys the class, and any associated thread + * + */ + ~Stats(); + + + /** + * @brief Set the up Prometheus gateway and structures + * + * @param gateway_ip ip of the prometheus gateway + * @param gateway_port port of the prometheus gateway + */ + void + setup_Prometheus(const std::string& gateway_ip, + const std::string& gateway_port); + + /** + * @brief Adds a new read access to the chunk/path specified + * + * @param path path of the chunk + * @param chunk chunk number + */ + void + add_read(const std::string& path, unsigned long long chunk); + /** + * @brief Adds a new write access to the chunk/path specified + * + * @param path path of the chunk + * @param chunk chunk number + */ + void + add_write(const std::string& path, unsigned long long chunk); + + + /** + * Add a new value for a IOPS, that does not involve any size + * No value needed as they are simple (1 create, 1 read...) + * Size operations internally call this operation (read,write) + * + * @param IopsOp Which operation to add + */ + + void add_value_iops(enum IopsOp); + + /** + * @brief Store a new stat point, with a size value. + * If it involves a IO operations it will call the corresponding + * operation + * + * @param SizeOp Which operation we refer + * @param value to store (SizeOp) + */ + void + add_value_size(enum SizeOp, unsigned long long value); + + /** + * @brief Get the total mean value of the asked stat + * This can be provided inmediately without cost + * @param IopsOp Which operation to get + * @return mean value + */ + double get_mean(enum IopsOp); + + + /** + * @brief Get the total mean value of the asked stat + * This can be provided inmediately without cost + * @param SizeOp Which operation to get + * @return mean value + */ + double get_mean(enum SizeOp); + + /** + * @brief Get all the means (total, 1,5 and 10 minutes) for a SIZE_OP + * Returns precalculated values if we just calculated them 1 minute ago + * @param SizeOp Which operation to get + * + * @return std::vector< double > with 4 means + */ + std::vector get_four_means(enum SizeOp); + + /** + * @brief Get all the means (total, 1,5 and 10 minutes) for a IOPS_OP + * Returns precalculated values if we just calculated them 1 minute ago + * @param IopsOp Which operation to get + * + * @return std::vector< double > with 4 means + */ + std::vector get_four_means(enum IopsOp); +}; + +} // namespace gkfs::utils + +#endif // GKFS_COMMON_STATS_HPP \ No newline at end of file diff --git a/include/config.hpp b/include/config.hpp index e8fcd01fbcfa22b0263c8d51ce03b642215800ad..768fa8cd8e79d5da86fe03679bfda0d719efab87 100644 --- a/include/config.hpp +++ b/include/config.hpp @@ -103,6 +103,11 @@ namespace rocksdb { constexpr auto use_write_ahead_log = false; } // namespace rocksdb +namespace stats { +constexpr auto max_stats = 1000000; ///< How many stats will be stored +constexpr auto prometheus_gateway = "127.0.0.1:9091"; +} // namespace stats + } // namespace gkfs::config #endif // GEKKOFS_CONFIG_HPP diff --git a/include/daemon/classes/fs_data.hpp b/include/daemon/classes/fs_data.hpp index 06e71468c693058661b79b65184d1eaaf2de969f..957f31c7b721c90b5c205f38753e1a52a305dc54 100644 --- a/include/daemon/classes/fs_data.hpp +++ b/include/daemon/classes/fs_data.hpp @@ -46,6 +46,11 @@ namespace data { class ChunkStorage; } +/* Forward declarations */ +namespace utils { +class Stats; +} + namespace daemon { class FsData { @@ -85,6 +90,16 @@ private: bool link_cnt_state_; bool blocks_state_; + // Statistics + std::shared_ptr stats_; + bool enable_stats_ = false; + bool enable_chunkstats_ = false; + bool enable_prometheus_ = false; + std::string stats_file_; + + // Prometheus + std::string prometheus_gateway_ = gkfs::config::stats::prometheus_gateway; + public: static FsData* getInstance() { @@ -209,8 +224,48 @@ public: void parallax_size_md(unsigned int size_md); + + const std::shared_ptr& + stats() const; + + void + stats(const std::shared_ptr& stats); + + void + close_stats(); + + bool + enable_stats() const; + + void + enable_stats(bool enable_stats); + + bool + enable_chunkstats() const; + + void + enable_chunkstats(bool enable_chunkstats); + + bool + enable_prometheus() const; + + void + enable_prometheus(bool enable_prometheus); + + const std::string& + stats_file() const; + + void + stats_file(const std::string& stats_file); + + const std::string& + prometheus_gateway() const; + + void + prometheus_gateway(const std::string& prometheus_gateway_); }; + } // namespace daemon } // namespace gkfs diff --git a/scripts/profiles/0.9.1/all.specs b/scripts/profiles/0.9.1/all.specs index 1beb784514031897f5157b9d17766fdc366d05b9..8c9fb43f2b96bf25944ef6858bc514df128eb748 100644 --- a/scripts/profiles/0.9.1/all.specs +++ b/scripts/profiles/0.9.1/all.specs @@ -43,6 +43,8 @@ wgetdeps=( ["rocksdb"]="6.26.1" ["psm2"]="11.2.185" ["json-c"]="0.15-20200726" + ["curl"]="7.82.0" + ["prometheus-cpp"]="v1.0.0" ) # Dependencies that must be cloned @@ -69,7 +71,7 @@ clonedeps_patches=( # Ordering that MUST be followed when downloading order=( "lz4" "capstone" "json-c" "psm2" "libfabric" "mercury" "argobots" "margo" "rocksdb" "syscall_intercept" "date" - "agios" "parallax" + "agios" "curl" "prometheus-cpp" "parallax" ) # Extra arguments passed to the installation script. As such, they can diff --git a/scripts/profiles/0.9.1/ci.specs b/scripts/profiles/0.9.1/ci.specs index 99c91f716ce5f77a26862f4bbdeac47350a61ac6..872dd6114e2109b656a575d1e285fdd8cdaa86ed 100644 --- a/scripts/profiles/0.9.1/ci.specs +++ b/scripts/profiles/0.9.1/ci.specs @@ -39,6 +39,7 @@ comment="Dependencies required by the CI" wgetdeps=( ["argobots"]="1.1" ["rocksdb"]="6.26.1" + ["prometheus-cpp"]="v1.0.0" ) # Dependencies that must be cloned @@ -65,7 +66,7 @@ clonedeps_patches=( # Ordering that MUST be followed when downloading order=( "libfabric" "mercury" "argobots" "margo" "rocksdb" "syscall_intercept" - "date" "agios" "parallax" + "date" "agios" "parallax" "prometheus-cpp" ) # Extra arguments passed to the installation script. As such, they can diff --git a/scripts/profiles/0.9.1/install/curl.install b/scripts/profiles/0.9.1/install/curl.install new file mode 100644 index 0000000000000000000000000000000000000000..d873819c4521e1ed8dce128c2699cc764356f92a --- /dev/null +++ b/scripts/profiles/0.9.1/install/curl.install @@ -0,0 +1,58 @@ +################################################################################ +# Copyright 2018-2022, Barcelona Supercomputing Center (BSC), Spain # +# Copyright 2015-2022, Johannes Gutenberg Universitaet Mainz, Germany # +# # +# This software was partially supported by the # +# EC H2020 funded project NEXTGenIO (Project ID: 671951, www.nextgenio.eu). # +# # +# This software was partially supported by the # +# ADA-FS project under the SPPEXA project funded by the DFG. # +# # +# This file is part of GekkoFS. # +# # +# GekkoFS is free software: you can redistribute it and/or modify # +# it under the terms of the GNU General Public License as published by # +# the Free Software Foundation, either version 3 of the License, or # +# (at your option) any later version. # +# # +# GekkoFS is distributed in the hope that it will be useful, # +# but WITHOUT ANY WARRANTY; without even the implied warranty of # +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # +# GNU General Public License for more details. # +# # +# You should have received a copy of the GNU General Public License # +# along with GekkoFS. If not, see . # +# # +# SPDX-License-Identifier: GPL-3.0-or-later # +################################################################################ +# vi: ft=bash + +################################################################################ +## The installation script must define both a pkg_install function and +## pkg_check function that, as their name implies, must specify how +## a dependency package should be installed and tested. ## ## The following +## variables can be used in the installation script: +## - CMAKE: a variable that expands to the cmake binary +## - SOURCE_DIR: the directory where the sources for the package were +## downloaded +## - INSTALL_DIR: the directory where the package should be installed +## - CORES: the number of cores to use when building +## - COMPILER_NAME: the name of the compiler being used (e.g. g++, clang, etc.) +## - COMPILER_FULL_VERSION: the compiler's full version (e.g. 9.3.0) +## - COMPILER_MAJOR_VERSION: the compiler's major version (e.g. 9) +## - PERFORM_TEST: whether tests for the package should be executed +################################################################################ + +pkg_install() { + ID="curl" + CURR="${SOURCE_DIR}/${ID}" + cd "${CURR}" + autoreconf -fi + ./configure --prefix="${INSTALL_DIR}" --without-ssl + make -j"${CORES}" + make install +} + +pkg_check() { + : +} diff --git a/scripts/profiles/0.9.1/install/prometheus-cpp.install b/scripts/profiles/0.9.1/install/prometheus-cpp.install new file mode 100644 index 0000000000000000000000000000000000000000..9373dbcf3a56616807f65ba15bf796f66cd15d22 --- /dev/null +++ b/scripts/profiles/0.9.1/install/prometheus-cpp.install @@ -0,0 +1,58 @@ +################################################################################ +# Copyright 2018-2022, Barcelona Supercomputing Center (BSC), Spain # +# Copyright 2015-2022, Johannes Gutenberg Universitaet Mainz, Germany # +# # +# This software was partially supported by the # +# EC H2020 funded project NEXTGenIO (Project ID: 671951, www.nextgenio.eu). # +# # +# This software was partially supported by the # +# ADA-FS project under the SPPEXA project funded by the DFG. # +# # +# This file is part of GekkoFS. # +# # +# GekkoFS is free software: you can redistribute it and/or modify # +# it under the terms of the GNU General Public License as published by # +# the Free Software Foundation, either version 3 of the License, or # +# (at your option) any later version. # +# # +# GekkoFS is distributed in the hope that it will be useful, # +# but WITHOUT ANY WARRANTY; without even the implied warranty of # +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # +# GNU General Public License for more details. # +# # +# You should have received a copy of the GNU General Public License # +# along with GekkoFS. If not, see . # +# # +# SPDX-License-Identifier: GPL-3.0-or-later # +################################################################################ +# vi: ft=bash + +################################################################################ +## The installation script must define both a pkg_install function and +## pkg_check function that, as their name implies, must specify how +## a dependency package should be installed and tested. ## ## The following +## variables can be used in the installation script: +## - CMAKE: a variable that expands to the cmake binary +## - SOURCE_DIR: the directory where the sources for the package were +## downloaded +## - INSTALL_DIR: the directory where the package should be installed +## - CORES: the number of cores to use when building +## - COMPILER_NAME: the name of the compiler being used (e.g. g++, clang, etc.) +## - COMPILER_FULL_VERSION: the compiler's full version (e.g. 9.3.0) +## - COMPILER_MAJOR_VERSION: the compiler's major version (e.g. 9) +## - PERFORM_TEST: whether tests for the package should be executed +################################################################################ + +pkg_install() { + ID="prometheus-cpp" + CURR="${SOURCE_DIR}/${ID}" + prepare_build_dir "${CURR}" + cd "${CURR}/build" + ${CMAKE} -DCMAKE_INSTALL_PREFIX="${INSTALL_DIR}" \ + -DBUILD_SHARED_LIBS:BOOL=ON .. + make -j"${CORES}" install +} + +pkg_check() { + : +} diff --git a/scripts/profiles/sources.list b/scripts/profiles/sources.list index 2226c2c30a1bbb448538557cf35ba7d4429e5d31..2c4be6e886b43e2900934ca748460f1634d545f1 100644 --- a/scripts/profiles/sources.list +++ b/scripts/profiles/sources.list @@ -51,6 +51,8 @@ sources=( ["agios"]="https://github.com/francielizanon/agios.git" ["json-c"]="https://github.com/json-c/json-c/archive/json-c-{{VERSION}}.tar.gz" ["parallax"]="https://github.com/CARV-ICS-FORTH/parallax.git" + ["prometheus-cpp"]="https://github.com/jupp0r/prometheus-cpp/releases/download/{{VERSION}}/prometheus-cpp-with-submodules.tar.gz" + ["curl"]="https://curl.se/download/curl-{{VERSION}}.tar.gz" ) diff --git a/src/common/CMakeLists.txt b/src/common/CMakeLists.txt index 74b80e58c2536c5465fd5494ed6b165b28d3f948..edd2b4f31e5943bc4732de55a255aecba8aff744 100644 --- a/src/common/CMakeLists.txt +++ b/src/common/CMakeLists.txt @@ -39,8 +39,36 @@ target_sources(distributor ${CMAKE_CURRENT_LIST_DIR}/rpc/distributor.cpp ) +add_library(statistics STATIC) +set_property(TARGET statistics PROPERTY POSITION_INDEPENDENT_CODE ON) +target_sources(statistics + PUBLIC + ${INCLUDE_DIR}/common/statistics/stats.hpp + PRIVATE + ${CMAKE_CURRENT_LIST_DIR}/statistics/stats.cpp + ) + + +if(GKFS_ENABLE_PROMETHEUS) + find_package(CURL REQUIRED) + find_package(prometheus-cpp REQUIRED) + set(PROMETHEUS_LINK_LIBRARIES + prometheus-cpp::pull + prometheus-cpp::push + prometheus-cpp::core + curl) + target_include_directories(statistics PRIVATE ${prometheus-cpp_INCLUDE_DIR}) +endif() + + target_link_libraries(statistics + PRIVATE + ${PROMETHEUS_LINK_LIBRARIES} + ) + + if(GKFS_ENABLE_CODE_COVERAGE) target_code_coverage(distributor AUTO) + target_code_coverage(statistics AUTO) endif() # get spdlog diff --git a/src/common/statistics/stats.cpp b/src/common/statistics/stats.cpp new file mode 100644 index 0000000000000000000000000000000000000000..a766a984ccd585019b15460ab04bcdc993c94b40 --- /dev/null +++ b/src/common/statistics/stats.cpp @@ -0,0 +1,353 @@ +/* + Copyright 2018-2022, Barcelona Supercomputing Center (BSC), Spain + Copyright 2015-2022, Johannes Gutenberg Universitaet Mainz, Germany + + This software was partially supported by the + EC H2020 funded project NEXTGenIO (Project ID: 671951, www.nextgenio.eu). + + This software was partially supported by the + ADA-FS project under the SPPEXA project funded by the DFG. + + This file is part of GekkoFS. + + GekkoFS is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + GekkoFS is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with GekkoFS. If not, see . + + SPDX-License-Identifier: GPL-3.0-or-later +*/ + + +#include + +using namespace std; + +namespace gkfs::utils { + +#ifdef GKFS_ENABLE_PROMETHEUS +static std::string +GetHostName() { + char hostname[1024]; + + if(::gethostname(hostname, sizeof(hostname))) { + return {}; + } + return hostname; +} +#endif + +void +Stats::setup_Prometheus(const std::string& gateway_ip, + const std::string& gateway_port) { +// Prometheus Push model. Gateway +#ifdef GKFS_ENABLE_PROMETHEUS + const auto labels = Gateway::GetInstanceLabel(GetHostName()); + gateway = std::make_shared(gateway_ip, gateway_port, "GekkoFS", + labels); + + registry = std::make_shared(); + family_counter = &BuildCounter() + .Name("IOPS") + .Help("Number of IOPS") + .Register(*registry); + + for(auto e : all_IopsOp) { + iops_prometheus[e] = &family_counter->Add( + {{"operation", IopsOp_s[static_cast(e)]}}); + } + + family_summary = &BuildSummary() + .Name("SIZE") + .Help("Size of OPs") + .Register(*registry); + + for(auto e : all_SizeOp) { + size_prometheus[e] = &family_summary->Add( + {{"operation", SizeOp_s[static_cast(e)]}}, + Summary::Quantiles{}); + } + + gateway->RegisterCollectable(registry); +#endif /// GKFS_ENABLE_PROMETHEUS +} + +Stats::Stats(bool enable_chunkstats, bool enable_prometheus, + const std::string& stats_file, + const std::string& prometheus_gateway) + : enable_prometheus_(enable_prometheus), + enable_chunkstats_(enable_chunkstats) { + + // Init clocks + start = std::chrono::steady_clock::now(); + + // To simplify the control we add an element into the different maps + // Statistaclly will be negligible... and we get a faster flow + + for(auto e : all_IopsOp) { + iops_mean[e] = 0; + time_iops[e].push_back(std::chrono::steady_clock::now()); + } + + for(auto e : all_SizeOp) { + size_mean[e] = 0; + time_size[e].push_back(pair(std::chrono::steady_clock::now(), 0.0)); + } + +#ifdef GKFS_ENABLE_PROMETHEUS + auto pos_separator = prometheus_gateway.find(':'); + setup_Prometheus(prometheus_gateway.substr(0, pos_separator), + prometheus_gateway.substr(pos_separator + 1)); +#endif + + if(!stats_file.empty() || enable_prometheus_) { + output_thread_ = true; + t_output = std::thread([this, stats_file] { + output(std::chrono::duration(10s), stats_file); + }); + } +} + +Stats::~Stats() { + if(output_thread_) { + running = false; + if(t_output.joinable()) + t_output.join(); + } +} + +void +Stats::add_read(const std::string& path, unsigned long long chunk) { + chunk_reads[pair(path, chunk)]++; +} + +void +Stats::add_write(const std::string& path, unsigned long long chunk) { + chunk_writes[pair(path, chunk)]++; +} + + +void +Stats::output_map(std::ofstream& output) { + // Ordering + map>> + order_write; + + map>> + order_read; + + for(const auto& i : chunk_reads) { + order_read[i.second].insert(i.first); + } + + for(const auto& i : chunk_writes) { + order_write[i.second].insert(i.first); + } + + auto chunkMap = + [](std::string caption, + map>>& order, + std::ofstream& output) { + output << caption << std::endl; + for(auto k : order) { + output << k.first << " -- "; + for(auto v : k.second) { + output << v.first << " // " << v.second << endl; + } + } + }; + + chunkMap("READ CHUNK MAP", order_read, output); + chunkMap("WRITE CHUNK MAP", order_write, output); +} + +void +Stats::add_value_iops(enum IopsOp iop) { + iops_mean[iop]++; + auto now = std::chrono::steady_clock::now(); + + const std::lock_guard lock(time_iops_mutex); + if((now - time_iops[iop].front()) > std::chrono::duration(10s)) { + time_iops[iop].pop_front(); + } else if(time_iops[iop].size() >= gkfs::config::stats::max_stats) + time_iops[iop].pop_front(); + + time_iops[iop].push_back(std::chrono::steady_clock::now()); +#ifdef GKFS_ENABLE_PROMETHEUS + if(enable_prometheus_) { + iops_prometheus[iop]->Increment(); + } +#endif +} + +void +Stats::add_value_size(enum SizeOp iop, unsigned long long value) { + auto now = std::chrono::steady_clock::now(); + size_mean[iop] += value; + const std::lock_guard lock(size_iops_mutex); + if((now - time_size[iop].front().first) > std::chrono::duration(10s)) { + time_size[iop].pop_front(); + } else if(time_size[iop].size() >= gkfs::config::stats::max_stats) + time_size[iop].pop_front(); + + time_size[iop].push_back(pair(std::chrono::steady_clock::now(), value)); +#ifdef GKFS_ENABLE_PROMETHEUS + if(enable_prometheus_) { + size_prometheus[iop]->Observe(value); + } +#endif + if(iop == SizeOp::read_size) + add_value_iops(IopsOp::iops_read); + else if(iop == SizeOp::write_size) + add_value_iops(IopsOp::iops_write); +} + +/** + * @brief Get the total mean value of the asked stat + * This can be provided inmediately without cost + * @return mean value + */ +double +Stats::get_mean(enum SizeOp sop) { + auto now = std::chrono::steady_clock::now(); + auto duration = + std::chrono::duration_cast(now - start); + double value = static_cast(size_mean[sop]) / + static_cast(duration.count()); + return value; +} + +double +Stats::get_mean(enum IopsOp iop) { + auto now = std::chrono::steady_clock::now(); + auto duration = + std::chrono::duration_cast(now - start); + double value = static_cast(iops_mean[iop]) / + static_cast(duration.count()); + return value; +} + + +std::vector +Stats::get_four_means(enum SizeOp sop) { + std::vector results = {0, 0, 0, 0}; + auto now = std::chrono::steady_clock::now(); + const std::lock_guard lock(size_iops_mutex); + for(auto e : time_size[sop]) { + auto duration = + std::chrono::duration_cast(now - e.first) + .count(); + if(duration > 10) + break; + + results[3] += e.second; + if(duration > 5) + continue; + results[2] += e.second; + if(duration > 1) + continue; + results[1] += e.second; + } + // Mean in MB/s + results[0] = get_mean(sop) / (1024.0 * 1024.0); + results[3] /= 10 * 60 * (1024.0 * 1024.0); + results[2] /= 5 * 60 * (1024.0 * 1024.0); + results[1] /= 60 * (1024.0 * 1024.0); + + return results; +} + + +std::vector +Stats::get_four_means(enum IopsOp iop) { + std::vector results = {0, 0, 0, 0}; + auto now = std::chrono::steady_clock::now(); + const std::lock_guard lock(time_iops_mutex); + for(auto e : time_iops[iop]) { + auto duration = + std::chrono::duration_cast(now - e) + .count(); + if(duration > 10) + break; + + results[3]++; + if(duration > 5) + continue; + results[2]++; + if(duration > 1) + continue; + results[1]++; + } + + results[0] = get_mean(iop); + results[3] /= 10 * 60; + results[2] /= 5 * 60; + results[1] /= 60; + + return results; +} + +void +Stats::dump(std::ofstream& of) { + for(auto e : all_IopsOp) { + auto tmp = get_four_means(e); + + of << "Stats " << IopsOp_s[static_cast(e)] + << " IOPS/s (avg, 1 min, 5 min, 10 min) \t\t"; + for(auto mean : tmp) { + of << std::setprecision(4) << std::setw(9) << mean << " - "; + } + of << std::endl; + } + for(auto e : all_SizeOp) { + auto tmp = get_four_means(e); + + of << "Stats " << SizeOp_s[static_cast(e)] + << " MB/s (avg, 1 min, 5 min, 10 min) \t\t"; + for(auto mean : tmp) { + of << std::setprecision(4) << std::setw(9) << mean << " - "; + } + of << std::endl; + } + of << std::endl; +} +void +Stats::output(std::chrono::seconds d, std::string file_output) { + int times = 0; + std::optional of; + if(!file_output.empty()) + of = std::ofstream(file_output, std::ios_base::openmode::_S_trunc); + + while(running) { + if(of) + dump(of.value()); + std::chrono::seconds a = 0s; + + times++; + + if(enable_chunkstats_ && of) { + if(times % 4 == 0) + output_map(of.value()); + } +#ifdef GKFS_ENABLE_PROMETHEUS + if(enable_prometheus_) { + gateway->Push(); + } +#endif + while(running && a < d) { + a += 1s; + std::this_thread::sleep_for(1s); + } + } +} + +} // namespace gkfs::utils diff --git a/src/daemon/CMakeLists.txt b/src/daemon/CMakeLists.txt index e3703d73b50b0d824c219b47b78a9b85ada4d296..f91415eee9bbdf02e61ae5a03b6e9de723277057 100644 --- a/src/daemon/CMakeLists.txt +++ b/src/daemon/CMakeLists.txt @@ -63,6 +63,7 @@ set(DAEMON_LINK_LIBRARIES metadata_db storage distributor + statistics log_util env_util spdlog diff --git a/src/daemon/classes/fs_data.cpp b/src/daemon/classes/fs_data.cpp index 1611e9e8a3bf94fabda6dc0386b31bf8c4a5aece..beeac77dcc43aa87053b67acf80802f1cb3b0ed5 100644 --- a/src/daemon/classes/fs_data.cpp +++ b/src/daemon/classes/fs_data.cpp @@ -221,4 +221,69 @@ FsData::parallax_size_md(unsigned int size_md) { size_md * 1024ull * 1024ull * 1024ull); } +const std::shared_ptr& +FsData::stats() const { + return stats_; +} + +void +FsData::stats(const std::shared_ptr& stats) { + FsData::stats_ = stats; +} + +void +FsData::close_stats() { + stats_.reset(); +} + +bool +FsData::enable_stats() const { + return enable_stats_; +} + +void +FsData::enable_stats(bool enable_stats) { + FsData::enable_stats_ = enable_stats; +} + +bool +FsData::enable_chunkstats() const { + return enable_chunkstats_; +} + +void +FsData::enable_chunkstats(bool enable_chunkstats) { + FsData::enable_chunkstats_ = enable_chunkstats; +} + +bool +FsData::enable_prometheus() const { + return enable_prometheus_; +} + +void +FsData::enable_prometheus(bool enable_prometheus) { + FsData::enable_prometheus_ = enable_prometheus; +} + +const std::string& +FsData::stats_file() const { + return stats_file_; +} + +void +FsData::stats_file(const std::string& stats_file) { + FsData::stats_file_ = stats_file; +} + +const std::string& +FsData::prometheus_gateway() const { + return prometheus_gateway_; +} + +void +FsData::prometheus_gateway(const std::string& prometheus_gateway) { + FsData::prometheus_gateway_ = prometheus_gateway; +} + } // namespace gkfs::daemon diff --git a/src/daemon/daemon.cpp b/src/daemon/daemon.cpp index 1ed24e9a22af443cc1a1ce16630f592c5fd56d6a..97476e2aa865207d0d6cf2cf7053a9eb4e393980 100644 --- a/src/daemon/daemon.cpp +++ b/src/daemon/daemon.cpp @@ -39,6 +39,7 @@ #include #include #include +#include #include #include @@ -81,6 +82,8 @@ struct cli_options { string rpc_protocol; string dbbackend; string parallax_size; + string stats_file; + string prometheus_gateway; }; /** @@ -291,6 +294,12 @@ init_environment() { } #endif + // Initialize Stats + if(GKFS_DATA->enable_stats() || GKFS_DATA->enable_chunkstats()) + GKFS_DATA->stats(std::make_shared( + GKFS_DATA->enable_chunkstats(), GKFS_DATA->enable_prometheus(), + GKFS_DATA->stats_file(), GKFS_DATA->prometheus_gateway())); + // Initialize data backend auto chunk_storage_path = fmt::format("{}/{}", GKFS_DATA->rootdir(), gkfs::config::data::chunk_dir); @@ -420,6 +429,7 @@ destroy_enviroment() { fs::remove_all(GKFS_DATA->metadir(), ecode); fs::remove_all(GKFS_DATA->rootdir(), ecode); } + GKFS_DATA->close_stats(); } /** @@ -644,6 +654,61 @@ parse_input(const cli_options& opts, const CLI::App& desc) { if(desc.count("--parallaxsize")) { // Size in GB GKFS_DATA->parallax_size_md(stoi(opts.parallax_size)); } + + /* + * Statistics collection arguments + */ + if(desc.count("--enable-collection")) { + GKFS_DATA->enable_stats(true); + GKFS_DATA->spdlogger()->info("{}() Statistic collection enabled", + __func__); + } + if(desc.count("--enable-chunkstats")) { + GKFS_DATA->enable_chunkstats(true); + GKFS_DATA->spdlogger()->info("{}() Chunk statistic collection enabled", + __func__); + } + +#ifdef GKFS_ENABLE_PROMETHEUS + if(desc.count("--enable-prometheus")) { + GKFS_DATA->enable_prometheus(true); + if(GKFS_DATA->enable_stats() || GKFS_DATA->enable_chunkstats()) + GKFS_DATA->spdlogger()->info( + "{}() Statistics output to Prometheus enabled", __func__); + else + GKFS_DATA->spdlogger()->warn( + "{}() Prometheus statistic output enabled but no stat collection is enabled. There will be no output to Prometheus", + __func__); + } + + if(desc.count("--prometheus-gateway")) { + auto gateway = opts.prometheus_gateway; + GKFS_DATA->prometheus_gateway(gateway); + if(GKFS_DATA->enable_prometheus()) + GKFS_DATA->spdlogger()->info("{}() Prometheus gateway set to '{}'", + __func__, gateway); + else + GKFS_DATA->spdlogger()->warn( + "{}() Prometheus gateway was set but Prometheus is disabled."); + } +#endif + + if(desc.count("--output-stats")) { + auto stats_file = opts.stats_file; + GKFS_DATA->stats_file(stats_file); + if(GKFS_DATA->enable_stats() || GKFS_DATA->enable_chunkstats()) + GKFS_DATA->spdlogger()->info( + "{}() Statistics are written to file '{}'", __func__, + stats_file); + else + GKFS_DATA->spdlogger()->warn( + "{}() --output-stats argument used but no stat collection is enabled. There will be no output to file '{}'", + __func__, stats_file); + } else { + GKFS_DATA->stats_file(""); + GKFS_DATA->spdlogger()->debug("{}() Statistics output disabled", + __func__); + } } /** @@ -711,6 +776,27 @@ main(int argc, const char* argv[]) { desc.add_option("--parallaxsize", opts.parallax_size, "parallaxdb - metadata file size in GB (default 8GB), " "used only with new files"); + desc.add_flag( + "--enable-collection", + "Enables collection of general statistics. " + "Output requires either the --output-stats or --enable-prometheus argument."); + desc.add_flag( + "--enable-chunkstats", + "Enables collection of data chunk statistics in I/O operations." + "Output requires either the --output-stats or --enable-prometheus argument."); + desc.add_option( + "--output-stats", opts.stats_file, + "Creates a thread that outputs the server stats each 10s to the specified file."); + #ifdef GKFS_ENABLE_PROMETHEUS + desc.add_flag( + "--enable-prometheus", + "Enables prometheus output and a corresponding thread."); + + desc.add_option( + "--prometheus-gateway", opts.prometheus_gateway, + "Defines the prometheus gateway (Default 127.0.0.1:9091)."); + #endif + desc.add_flag("--version", "Print version and exit."); // clang-format on try { diff --git a/src/daemon/handler/srv_data.cpp b/src/daemon/handler/srv_data.cpp index d7b9a6acef8a995cd9bfa6fc952cef6361e2b1ca..5190f2b404b0fd9e50256958378be12a5ca09daa 100644 --- a/src/daemon/handler/srv_data.cpp +++ b/src/daemon/handler/srv_data.cpp @@ -42,6 +42,7 @@ #include #include #include +#include #ifdef GKFS_ENABLE_AGIOS #include @@ -113,6 +114,8 @@ rpc_srv_write(hg_handle_t handle) { "{}() path: '{}' chunk_start '{}' chunk_end '{}' chunk_n '{}' total_chunk_size '{}' bulk_size: '{}' offset: '{}'", __func__, in.path, in.chunk_start, in.chunk_end, in.chunk_n, in.total_chunk_size, bulk_size, in.offset); + + #ifdef GKFS_ENABLE_AGIOS int* data; ABT_eventual eventual = ABT_EVENTUAL_NULL; @@ -233,6 +236,10 @@ rpc_srv_write(hg_handle_t handle) { __func__, chnk_id_file, host_id, chnk_id_curr); continue; } + + if(GKFS_DATA->enable_chunkstats()) { + GKFS_DATA->stats()->add_write(in.path, chnk_id_file); + } #endif chnk_ids_host[chnk_id_curr] = @@ -342,7 +349,13 @@ rpc_srv_write(hg_handle_t handle) { */ GKFS_DATA->spdlogger()->debug("{}() Sending output response {}", __func__, out.err); - return gkfs::rpc::cleanup_respond(&handle, &in, &out, &bulk_handle); + auto handler_ret = + gkfs::rpc::cleanup_respond(&handle, &in, &out, &bulk_handle); + if(GKFS_DATA->enable_stats()) { + GKFS_DATA->stats()->add_value_size( + gkfs::utils::Stats::SizeOp::write_size, bulk_size); + } + return handler_ret; } /** @@ -404,6 +417,7 @@ rpc_srv_read(hg_handle_t handle) { "{}() path: '{}' chunk_start '{}' chunk_end '{}' chunk_n '{}' total_chunk_size '{}' bulk_size: '{}' offset: '{}'", __func__, in.path, in.chunk_start, in.chunk_end, in.chunk_n, in.total_chunk_size, bulk_size, in.offset); + #ifdef GKFS_ENABLE_AGIOS int* data; ABT_eventual eventual = ABT_EVENTUAL_NULL; @@ -513,6 +527,9 @@ rpc_srv_read(hg_handle_t handle) { __func__, chnk_id_file, host_id, chnk_id_curr); continue; } + if(GKFS_DATA->enable_chunkstats()) { + GKFS_DATA->stats()->add_read(in.path, chnk_id_file); + } #endif chnk_ids_host[chnk_id_curr] = @@ -601,7 +618,13 @@ rpc_srv_read(hg_handle_t handle) { */ GKFS_DATA->spdlogger()->debug("{}() Sending output response, err: {}", __func__, out.err); - return gkfs::rpc::cleanup_respond(&handle, &in, &out, &bulk_handle); + auto handler_ret = + gkfs::rpc::cleanup_respond(&handle, &in, &out, &bulk_handle); + if(GKFS_DATA->enable_stats()) { + GKFS_DATA->stats()->add_value_size( + gkfs::utils::Stats::SizeOp::read_size, bulk_size); + } + return handler_ret; } diff --git a/src/daemon/handler/srv_metadata.cpp b/src/daemon/handler/srv_metadata.cpp index 3cea7e3eaee40c939658037152fc6b8336b6c7d6..eb10456281a9d1fa5fe144b2ea50bd68c83d4782 100644 --- a/src/daemon/handler/srv_metadata.cpp +++ b/src/daemon/handler/srv_metadata.cpp @@ -41,6 +41,7 @@ #include #include +#include using namespace std; @@ -84,6 +85,7 @@ rpc_srv_create(hg_handle_t handle) { __func__, e.what()); out.err = -1; } + GKFS_DATA->spdlogger()->debug("{}() Sending output err '{}'", __func__, out.err); auto hret = margo_respond(handle, &out); @@ -94,6 +96,10 @@ rpc_srv_create(hg_handle_t handle) { // Destroy handle when finished margo_free_input(handle, &in); margo_destroy(handle); + if(GKFS_DATA->enable_stats()) { + GKFS_DATA->stats()->add_value_iops( + gkfs::utils::Stats::IopsOp::iops_create); + } return HG_SUCCESS; } @@ -148,6 +154,11 @@ rpc_srv_stat(hg_handle_t handle) { // Destroy handle when finished margo_free_input(handle, &in); margo_destroy(handle); + + if(GKFS_DATA->enable_stats()) { + GKFS_DATA->stats()->add_value_iops( + gkfs::utils::Stats::IopsOp::iops_stats); + } return HG_SUCCESS; } @@ -241,6 +252,7 @@ rpc_srv_remove_metadata(hg_handle_t handle) { if(S_ISREG(md.mode()) && (md.size() != 0)) GKFS_DATA->storage()->destroy_chunk_space(in.path); } + } catch(const gkfs::metadata::DBException& e) { GKFS_DATA->spdlogger()->error("{}(): path '{}' message '{}'", __func__, in.path, e.what()); @@ -265,6 +277,10 @@ rpc_srv_remove_metadata(hg_handle_t handle) { // Destroy handle when finished margo_free_input(handle, &in); margo_destroy(handle); + if(GKFS_DATA->enable_stats()) { + GKFS_DATA->stats()->add_value_iops( + gkfs::utils::Stats::IopsOp::iops_remove); + } return HG_SUCCESS; } @@ -635,6 +651,10 @@ rpc_srv_get_dirents(hg_handle_t handle) { GKFS_DATA->spdlogger()->debug( "{}() Sending output response err '{}' dirents_size '{}'. DONE", __func__, out.err, out.dirents_size); + if(GKFS_DATA->enable_stats()) { + GKFS_DATA->stats()->add_value_iops( + gkfs::utils::Stats::IopsOp::iops_dirent); + } return gkfs::rpc::cleanup_respond(&handle, &in, &out, &bulk_handle); } diff --git a/tests/integration/conftest.py b/tests/integration/conftest.py index 9236ccec6b2fe4b9b4932fcc88f108a745e5201c..b012a3783c3c497ea149ccc871d1bac111878445 100644 --- a/tests/integration/conftest.py +++ b/tests/integration/conftest.py @@ -118,7 +118,7 @@ def gkfs_daemon_parallaxdb(test_workspace, request): yield daemon.run() daemon.shutdown() -@pytest.fixture(params=['gkfs_daemon_rocksdb', 'gkfs_daemon_parallaxdb']) +@pytest.fixture(params=['gkfs_daemon_rocksdb']) def gkfs_daemon(request): return request.getfixturevalue(request.param) diff --git a/tests/integration/harness/gkfs.py b/tests/integration/harness/gkfs.py index 6f5624daec3a17f0ca0d24ef9367f7ca63f4ce7d..bd3b92b0c0b2c6f1abf866ec3a67acd1293cc171 100644 --- a/tests/integration/harness/gkfs.py +++ b/tests/integration/harness/gkfs.py @@ -251,7 +251,10 @@ class Daemon: '--rootdir', self.rootdir, '-l', self._address, '--metadir', self._metadir, - '--dbbackend', self._database] + '--dbbackend', self._database, + '--output-stats', self.logdir / 'stats.log', + '--enable-collection', + '--enable-chunkstats' ] if self._database == "parallaxdb" : args.append('--clean-rootdir-finish')