diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 276a1841982bf1ef8d4393a6e8e780e7ccffe24f..1a2457ca37deaba250fc1567daf328e945055e2f 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -5,22 +5,25 @@ stages: - test variables: - DEPS_SRC_PATH: "${CI_PROJECT_DIR}/deps/src" - DEPS_INSTALL_PATH: "${CI_PROJECT_DIR}/deps/install" - DEPS_COMMIT: "${CI_PROJECT_DIR}/deps/install/gkfs_deps_commit" - BUILD_PATH: "${CI_PROJECT_DIR}/gkfs/build" - INSTALL_PATH: "${CI_PROJECT_DIR}/gkfs/install" - TESTS_BUILD_PATH: "${CI_PROJECT_DIR}/test/build" - LOG_PATH: "${CI_PROJECT_DIR}/logs" - LD_LIBRARY_PATH: "${CI_PROJECT_DIR}/deps/install/lib;${CI_PROJECT_DIR}/deps/install/lib64" + DEPS_SRC_PATH: "${CI_PROJECT_DIR}/deps/src" + DEPS_INSTALL_PATH: "${CI_PROJECT_DIR}/deps/install" + DEPS_COMMIT: "${CI_PROJECT_DIR}/deps/install/gkfs_deps_commit" + BUILD_PATH: "${CI_PROJECT_DIR}/gkfs/build" + INSTALL_PATH: "${CI_PROJECT_DIR}/gkfs/install" + INTEGRATION_TESTS_BIN_PATH: "${CI_PROJECT_DIR}/gkfs/install/share/gkfs/tests/integration" + INTEGRATION_TESTS_RUN_PATH: "${CI_PROJECT_DIR}/gkfs/install/share/gkfs/tests/integration/run" + TESTS_BUILD_PATH: "${CI_PROJECT_DIR}/test/build" + PYTEST: "${CI_PROJECT_DIR}/gkfs/install/share/gkfs/tests/integration/pytest-venv/bin/py.test" + LOG_PATH: "${CI_PROJECT_DIR}/logs" + LD_LIBRARY_PATH: "${CI_PROJECT_DIR}/deps/install/lib;${CI_PROJECT_DIR}/deps/install/lib64" # Configuration variables - GKFS_LOG_LEVEL: "100" - GKFS_DAEMON_LOG_PATH: "${CI_PROJECT_DIR}/logs/daemon.log" - LIBGKFS_LOG: "all" - LIBGKFS_LOG_OUTPUT: "${CI_PROJECT_DIR}/logs/gkfs_client.log" - GIT_SUBMODULE_STRATEGY: recursive + GKFS_LOG_LEVEL: "100" + GKFS_DAEMON_LOG_PATH: "${CI_PROJECT_DIR}/logs/daemon.log" + LIBGKFS_LOG: "all" + LIBGKFS_LOG_OUTPUT: "${CI_PROJECT_DIR}/logs/gkfs_client.log" + GIT_SUBMODULE_STRATEGY: recursive -image: gekkofs/gekkofs:build_env +image: gekkofs/gekkofs:build_env-0.8.0 compile dependencies: stage: build deps @@ -51,6 +54,8 @@ compile GekkoFS: -Wdev -Wdeprecate -DCMAKE_BUILD_TYPE=Debug + -DGKFS_BUILD_TESTS:BOOL=ON + -DGKFS_INSTALL_TESTS:BOOL=ON -DRPC_PROTOCOL="ofi+sockets" -DCMAKE_PREFIX_PATH=${DEPS_INSTALL_PATH} -DCMAKE_INSTALL_PREFIX=${INSTALL_PATH} @@ -72,6 +77,17 @@ compile tests: paths: - ${TESTS_BUILD_PATH} +integration tests: + stage: test + script: + - mkdir -p ${INTEGRATION_TESTS_RUN_PATH} + - cd ${INTEGRATION_TESTS_BIN_PATH} + - TMPDIR=${INTEGRATION_TESTS_RUN_PATH} unbuffer ${PYTEST} -v | tee ${INTEGRATION_TESTS_RUN_PATH}/session.log + artifacts: + when: on_failure + paths: + - "${INTEGRATION_TESTS_RUN_PATH}" + test wr: stage: test script: diff --git a/CMake/GkfsPythonTesting.cmake b/CMake/GkfsPythonTesting.cmake new file mode 100644 index 0000000000000000000000000000000000000000..9017cde4c84dd0b2d2dd00f577217f76a0726ec8 --- /dev/null +++ b/CMake/GkfsPythonTesting.cmake @@ -0,0 +1,218 @@ +include(CMakeParseArguments) + +function(gkfs_enable_python_testing) + # Parse arguments + set(MULTI BINARY_DIRECTORIES LIBRARY_PREFIX_DIRECTORIES) + + cmake_parse_arguments(PYTEST "${OPTION}" "${SINGLE}" "${MULTI}" ${ARGN}) + + if(PYTEST_UNPARSED_ARGUMENTS) + message(WARNING "Unparsed arguments in gkfs_enable_python_testing: This often indicates typos!") + endif() + + if(PYTEST_BINARY_DIRECTORIES) + set(GKFS_PYTEST_BINARY_DIRECTORIES ${PYTEST_BINARY_DIRECTORIES} PARENT_SCOPE) + endif() + + if(PYTEST_LIBRARY_PREFIX_DIRECTORIES) + set(GKFS_PYTEST_LIBRARY_PREFIX_DIRECTORIES ${PYTEST_LIBRARY_PREFIX_DIRECTORIES} PARENT_SCOPE) + endif() + + set(PYTEST_BINDIR_ARGS, "") + if(PYTEST_BINARY_DIRECTORIES) + foreach(dir IN LISTS PYTEST_BINARY_DIRECTORIES) + list(APPEND PYTEST_BINDIR_ARGS "--bin-dir=${dir}") + endforeach() + endif() + + set(PYTEST_LIBDIR_ARGS, "") + if(PYTEST_LIBRARY_PREFIX_DIRECTORIES) + foreach(dir IN LISTS PYTEST_LIBRARY_PREFIX_DIRECTORIES) + + if(NOT IS_ABSOLUTE ${dir}) + set(dir ${CMAKE_BINARY_DIR}/${dir}) + endif() + + file(TO_CMAKE_PATH "${dir}/lib" libdir) + file(TO_CMAKE_PATH "${dir}/lib64" lib64dir) + + if(EXISTS ${libdir}) + list(APPEND PYTEST_LIBDIR_ARGS "--lib-dir=${libdir}") + endif() + + if(EXISTS ${lib64dir}) + list(APPEND PYTEST_LIBDIR_ARGS "--lib-dir=${lib64dir}") + endif() + endforeach() + endif() + + # convert path lists to space separated arguments + string(REPLACE ";" " " PYTEST_BINDIR_ARGS "${PYTEST_BINDIR_ARGS}") + string(REPLACE ";" " " PYTEST_BINDIR_ARGS "${PYTEST_BINDIR_ARGS}") + + configure_file(pytest.ini.in pytest.ini @ONLY) + configure_file(conftest.py.in conftest.py @ONLY) + configure_file(harness/cli.py harness/cli.py COPYONLY) + + if(GKFS_INSTALL_TESTS) + configure_file(pytest.install.ini.in pytest.install.ini @ONLY) + install(FILES ${CMAKE_CURRENT_BINARY_DIR}/pytest.install.ini + DESTINATION ${CMAKE_INSTALL_DATAROOTDIR}/gkfs/tests/integration + RENAME pytest.ini + ) + + install(FILES conftest.py + DESTINATION ${CMAKE_INSTALL_DATAROOTDIR}/gkfs/tests/integration + ) + + if(NOT PYTEST_VIRTUALENV) + set(PYTEST_VIRTUALENV ${CMAKE_INSTALL_FULL_DATAROOTDIR}/gkfs/tests/integration/pytest-venv) + endif() + + # Python's virtual environments are not relocatable, we need to + # recreate the virtualenv at the appropriate install location + # find an appropriate python interpreter + find_package(Python3 + 3.6 + REQUIRED + COMPONENTS Interpreter) + + if(NOT Python3_FOUND) + message(FATAL_ERROR "Unable to find Python 3") + endif() + + install( + CODE "message(\"Install pytest virtual environment...\")" + CODE "message(\"-- Create virtual environment: ${PYTEST_VIRTUALENV}\")" + CODE "execute_process(COMMAND ${Python3_EXECUTABLE} -m venv ${PYTEST_VIRTUALENV})" + CODE "message(\"-- Installing packages...\")" + CODE "execute_process(COMMAND ${PYTEST_VIRTUALENV}/bin/pip install --upgrade pip -v)" + CODE "execute_process(COMMAND ${PYTEST_VIRTUALENV}/bin/pip install -r ${CMAKE_CURRENT_BINARY_DIR}/requirements.txt --upgrade -v)" + ) + endif() + + # enable testing + set(GKFS_PYTHON_TESTING_ENABLED ON PARENT_SCOPE) + +endfunction() + +function(gkfs_add_python_test) + # ignore call if testing is not enabled + if(NOT CMAKE_TESTING_ENABLED OR NOT GKFS_PYTHON_TESTING_ENABLED) + return() + endif() + + # Parse arguments + set(OPTION) + set(SINGLE NAME PYTHON_VERSION WORKING_DIRECTORY VIRTUALENV) + set(MULTI SOURCE BINARY_DIRECTORIES LIBRARY_PREFIX_DIRECTORIES) + + cmake_parse_arguments(PYTEST "${OPTION}" "${SINGLE}" "${MULTI}" ${ARGN}) + + if(PYTEST_UNPARSED_ARGUMENTS) + message(WARNING "Unparsed arguments in gkfs_add_python_test: This often indicates typos!") + endif() + + if(NOT PYTEST_NAME) + message(FATAL_ERROR "gkfs_add_python_test requires a NAME argument") + endif() + + # set default values for arguments not provided + if(NOT PYTEST_PYTHON_VERSION) + set(PYTEST_PYTHON_VERSION 3.0) + endif() + + if(NOT PYTEST_WORKING_DIRECTORY) + set(PYTEST_WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}) + endif() + + if(NOT PYTEST_VIRTUALENV) + set(PYTEST_VIRTUALENV ${CMAKE_CURRENT_BINARY_DIR}/pytest-venv) + endif() + + # if the test doesn't provide a list of binary or library prefix + # directories, use the one set on gkfs_enable_python_testing() + if(NOT PYTEST_BINARY_DIRECTORIES) + set(PYTEST_BINARY_DIRECTORIES ${GKFS_PYTEST_BINARY_DIRECTORIES}) + endif() + + if(NOT PYTEST_LIBRARY_PREFIX_DIRECTORIES) + set(PYTEST_LIBRARY_PREFIX_DIRECTORIES ${GKFS_PYTEST_LIBRARY_PREFIX_DIRECTORIES}) + endif() + + set(PYTEST_COMMAND_ARGS, "") + if(PYTEST_BINARY_DIRECTORIES) + foreach(dir IN LISTS PYTEST_BINARY_DIRECTORIES) + list(APPEND PYTEST_COMMAND_ARGS "--bin-dir=${dir}") + endforeach() + endif() + + if(PYTEST_LIBRARY_PREFIX_DIRECTORIES) + foreach(dir IN LISTS PYTEST_LIBRARY_PREFIX_DIRECTORIES) + + if(NOT IS_ABSOLUTE ${dir}) + set(dir ${CMAKE_BINARY_DIR}/${dir}) + endif() + + file(TO_CMAKE_PATH "${dir}/lib" libdir) + file(TO_CMAKE_PATH "${dir}/lib64" lib64dir) + + if(EXISTS "${dir}/lib") + list(APPEND PYTEST_COMMAND_ARGS "--lib-dir=${libdir}") + endif() + + if(EXISTS "${dir}/lib64") + list(APPEND PYTEST_COMMAND_ARGS "--lib-dir=${lib64dir}") + endif() + endforeach() + endif() + + # Extend the given virtualenv to be a full path. + if(NOT IS_ABSOLUTE ${PYTEST_VIRTUALENV}) + set(PYTEST_VIRTUALENV ${CMAKE_BINARY_DIR}/${PYTEST_VIRTUALENV}) + endif() + + # find an appropriate python interpreter + find_package(Python3 + ${PYTEST_PYTHON_VERSION} + REQUIRED + COMPONENTS Interpreter) + + set(PYTEST_VIRTUALENV_PIP ${PYTEST_VIRTUALENV}/bin/pip) + set(PYTEST_VIRTUALENV_INTERPRETER ${PYTEST_VIRTUALENV}/bin/python) + + # create a virtual environment to run the test + configure_file(requirements.txt.in requirements.txt @ONLY) + + add_custom_command( + OUTPUT ${PYTEST_VIRTUALENV} + COMMENT "Creating virtual environment ${PYTEST_VIRTUALENV}" + COMMAND Python3::Interpreter -m venv "${PYTEST_VIRTUALENV}" + COMMAND ${PYTEST_VIRTUALENV_PIP} install --upgrade pip -q + COMMAND ${PYTEST_VIRTUALENV_PIP} install -r requirements.txt --upgrade -q + ) + + if(NOT TARGET venv) + # ensure that the virtual environment is created by the build process + # (this is required because we can't add dependencies between + # "test targets" and "normal targets" + add_custom_target(venv + ALL + DEPENDS ${PYTEST_VIRTUALENV} + DEPENDS ${CMAKE_CURRENT_BINARY_DIR}/requirements.txt) + endif() + + add_test(NAME ${PYTEST_NAME} + COMMAND ${PYTEST_VIRTUALENV_INTERPRETER} + -m pytest -v -s + ${PYTEST_COMMAND_ARGS} + ${PYTEST_SOURCE} + WORKING_DIRECTORY ${PYTEST_WORKING_DIRECTORY}) + + # instruct Python to not create __pycache__ directories, + # otherwise they will pollute ${PYTEST_WORKING_DIRECTORY} which + # is typically ${PROJECT_SOURCE_DIR} + set_tests_properties(${PYTEST_NAME} PROPERTIES + ENVIRONMENT PYTHONDONTWRITEBYTECODE=1) + +endfunction() diff --git a/CMakeLists.txt b/CMakeLists.txt index 64f54933a20ffb72ba13620d55ac52dc2bcd3b06..0e6d61290c9cd164b509ad2abb17b0d1b793380b 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -5,6 +5,7 @@ project( VERSION 0.7.0 ) +enable_testing() if(NOT CMAKE_COMPILER_IS_GNUCC) message(FATAL_ERROR "The choosen C compiler is not gcc and is not supported") @@ -209,3 +210,17 @@ add_subdirectory(src/global) add_subdirectory(src/daemon) # Client library add_subdirectory(src/client) + +option(GKFS_BUILD_TESTS "Build GekkoFS self tests" OFF) + +include(CMakeDependentOption) +cmake_dependent_option(GKFS_INSTALL_TESTS "Install GekkoFS self tests" OFF "GKFS_BUILD_TESTS" OFF) + +if(GKFS_BUILD_TESTS) + message(STATUS "[gekkofs] Preparing tests...") + set(GKFS_TESTS_INTERFACE "lo" CACHE STRING "Network interface to use when running tests (default: lo)") + message(STATUS "[gekkofs] Network interface for tests: ${GKFS_TESTS_INTERFACE}") + add_subdirectory(tests) +else() + unset(GKFS_TESTS_INTERFACE CACHE) +endif() diff --git a/README.md b/README.md index 6e55f4d9ddbc21e8327bee875d239e216dbf626f..7b36ace3a5a0d77c5c3d99a2f5deb1c8a37afdd2 100644 --- a/README.md +++ b/README.md @@ -115,6 +115,21 @@ make make install ``` +In order to build self-tests, the *optional* GKFS_BUILD_TESTS CMake option needs +to be enabled when building. Once that is done, tests can be run by running +`make test` in the `build` directory: + +```bash +mkdir build && cd build +cmake -DGKFS_BUILD_TESTS=ON -DCMAKE_BUILD_TYPE=Release -DRPC_PROTOCOL='ofi+sockets' .. +make +make test +make install +``` + +**IMPORTANT:** Please note that the testing framework requires Python 3.6 as an +additional dependency in order to run. + ## Run GekkoFS First on each node a daemon has to be started. This can be done in two ways using the `gkfs_daemon` binary directly or diff --git a/docker/debian_build_env.docker b/docker/debian_build_env.docker index 5bea1fcec22397e7a272331e2a9ec761fd180eb4..b697b49e999a176ef05e1abae885f5e37d5c614b 100644 --- a/docker/debian_build_env.docker +++ b/docker/debian_build_env.docker @@ -35,14 +35,19 @@ RUN apt-get update && apt-get install -y --no-install-recommends \ libboost-program-options-dev \ valgrind \ uuid-dev \ + python3 \ + python3-dev \ + python3-venv \ + expect \ # Clean apt cache to reduce image layer size && rm -rf /var/lib/apt/lists/* -# Download dependencies source -COPY scripts/dl_dep.sh $SCRIPTS_PATH/ -RUN /bin/bash $SCRIPTS_PATH/dl_dep.sh $DEPS_SRC_PATH all - -# Compile dependencies -COPY scripts/compile_dep.sh $SCRIPTS_PATH/ -COPY scripts/patches $SCRIPTS_PATH/patches -RUN /bin/bash $SCRIPTS_PATH/compile_dep.sh $DEPS_SRC_PATH $INSTALL_PATH +## COPY scripts/dl_dep.sh $SCRIPTS_PATH/ +## COPY scripts/compile_dep.sh $SCRIPTS_PATH/ +## COPY scripts/patches $SCRIPTS_PATH/patches +## +## # Download dependencies source +## RUN /bin/bash $SCRIPTS_PATH/dl_dep.sh $DEPS_SRC_PATH all +## +## # Compile dependencies +## RUN /bin/bash $SCRIPTS_PATH/compile_dep.sh $DEPS_SRC_PATH $INSTALL_PATH diff --git a/test/README.md b/test/README.md new file mode 100644 index 0000000000000000000000000000000000000000..fa0ad2bec309d59a8e935539de2df4c00f9489d1 --- /dev/null +++ b/test/README.md @@ -0,0 +1,11 @@ +# README + +This directory contains old/deprecated GekkoFS tests. It is kept here until all +tests have been migrated to the new testing framework. + + +*** +**IMPORTANT:** + +Some of these tests are still active in the CI scripts. +*** diff --git a/tests/CMakeLists.txt b/tests/CMakeLists.txt new file mode 100644 index 0000000000000000000000000000000000000000..03d80a7f5ff8e51a92270ed00108617e920274e7 --- /dev/null +++ b/tests/CMakeLists.txt @@ -0,0 +1,14 @@ +include(GkfsPythonTesting) + +add_custom_target(check + COMMAND ${CMAKE_CTEST_COMMAND} + --force-new-ctest-process + --verbose + --output-on-failure +) + +# integration tests +add_subdirectory(integration) + +# unit tests +add_subdirectory(unit) diff --git a/tests/README.md b/tests/README.md new file mode 100644 index 0000000000000000000000000000000000000000..6ad024d13d1c32bc07e4bae34394182e6f854e21 --- /dev/null +++ b/tests/README.md @@ -0,0 +1,5 @@ +# README + +This directory contains GekkoFS unit, functional, and integration tests. Please +refer to the wiki page about [testing GekkoFS](../-/wikis/Testing) for more +information. diff --git a/tests/integration/CMakeLists.txt b/tests/integration/CMakeLists.txt new file mode 100644 index 0000000000000000000000000000000000000000..b5a50f5d60c362864efdb803ba0a48a04f4f7442 --- /dev/null +++ b/tests/integration/CMakeLists.txt @@ -0,0 +1,51 @@ +# ensure helper programs in the testing harness get built +add_subdirectory(harness) + +gkfs_enable_python_testing( + BINARY_DIRECTORIES ${CMAKE_BINARY_DIR}/src/daemon/ + ${CMAKE_BINARY_DIR}/src/client/ + ${CMAKE_BINARY_DIR}/tests/integration/harness/ + LIBRARY_PREFIX_DIRECTORIES ${CMAKE_PREFIX_PATH} +) + +# define CTest tests for functional test groups +gkfs_add_python_test( + NAME test_directories + PYTHON_VERSION 3.6 + WORKING_DIRECTORY ${PROJECT_SOURCE_DIR}/tests/integration + SOURCE directories/test_directories.py +) + +gkfs_add_python_test( + NAME test_shell + PYTHON_VERSION 3.6 + WORKING_DIRECTORY ${PROJECT_SOURCE_DIR}/tests/integration + SOURCE shell/ +) + +if(GKFS_INSTALL_TESTS) + install(DIRECTORY harness + DESTINATION ${CMAKE_INSTALL_DATAROOTDIR}/gkfs/tests/integration + FILES_MATCHING + REGEX ".*\\.py" + PATTERN "__pycache__" EXCLUDE + PATTERN ".pytest_cache" EXCLUDE + PATTERN "gkfs.io" EXCLUDE + ) + + install(DIRECTORY directories + DESTINATION ${CMAKE_INSTALL_DATAROOTDIR}/gkfs/tests/integration + FILES_MATCHING + REGEX ".*\\.py" + PATTERN "__pycache__" EXCLUDE + PATTERN ".pytest_cache" EXCLUDE + ) + + install(DIRECTORY shell + DESTINATION ${CMAKE_INSTALL_DATAROOTDIR}/gkfs/tests/integration + FILES_MATCHING + REGEX ".*\\.py" + PATTERN "__pycache__" EXCLUDE + PATTERN ".pytest_cache" EXCLUDE + ) +endif() diff --git a/tests/integration/conftest.py b/tests/integration/conftest.py new file mode 100644 index 0000000000000000000000000000000000000000..91d1ffe2af12c11d79ece510b07f74a749892bd2 --- /dev/null +++ b/tests/integration/conftest.py @@ -0,0 +1,120 @@ +################################################################################ +# Copyright 2018-2020, Barcelona Supercomputing Center (BSC), Spain # +# Copyright 2015-2020, Johannes Gutenberg Universitaet Mainz, Germany # +# # +# This software was partially supported by the # +# EC H2020 funded project NEXTGenIO (Project ID: 671951, www.nextgenio.eu). # +# # +# This software was partially supported by the # +# ADA-FS project under the SPPEXA project funded by the DFG. # +# # +# SPDX-License-Identifier: MIT # +################################################################################ + +import pytest +import logging +from collections import namedtuple +from _pytest.logging import caplog as _caplog +from pathlib import Path +from harness.logger import logger, initialize_logging, finalize_logging +from harness.cli import add_cli_options, set_default_log_formatter +from harness.workspace import Workspace, FileCreator +from harness.gkfs import Daemon, Client, ShellClient +from harness.reporter import report_test_status, report_test_headline, report_assertion_pass + +def pytest_configure(config): + """ + Some configurations for our particular usage of pytest + """ + set_default_log_formatter(config, "%(message)s") + +def pytest_assertion_pass(item, lineno, orig, expl): + + location = namedtuple( + 'Location', ['path', 'module', 'function', 'lineno'])( + str(item.parent.fspath), item.parent.name, item.name, lineno) + + report_assertion_pass(logger, location, orig, expl) + +def pytest_addoption(parser): + """ + Adds extra options from the GKFS harness to the py.test CLI. + """ + add_cli_options(parser) + +@pytest.fixture(autouse=True) +def caplog(test_workspace, request, _caplog): + + # we don't need to see the logs from sh + _caplog.set_level(logging.CRITICAL, 'sh.command') + _caplog.set_level(logging.CRITICAL, 'sh.command.process') + _caplog.set_level(logging.CRITICAL, 'sh.command.process.streamreader') + _caplog.set_level(logging.CRITICAL, 'sh.stream_bufferer') + _caplog.set_level(logging.CRITICAL, 'sh.streamreader') + + test_log_path = test_workspace.logdir / (request.node.name + ".log") + + h = initialize_logging(logger, test_log_path) + report_test_headline(logger, request.node.nodeid, request.config, test_workspace) + + yield _caplog + + finalize_logging(logger, h) + +def pytest_runtest_logreport(report): + """ + Pytest hook called after a test phase (setup, call, teardownd) + has completed. + """ + + report_test_status(logger, report) + +@pytest.fixture +def test_workspace(tmp_path, request): + """ + Initializes a test workspace by creating a temporary directory for it. + """ + + yield Workspace(tmp_path, + request.config.getoption('--bin-dir'), + request.config.getoption('--lib-dir')) + +@pytest.fixture +def gkfs_daemon(test_workspace, request): + """ + Initializes a local gekkofs daemon + """ + + interface = request.config.getoption('--interface') + + daemon = Daemon(interface, test_workspace) + yield daemon.run() + daemon.shutdown() + +@pytest.fixture +def gkfs_client(test_workspace): + """ + Sets up a gekkofs client environment so that + operations (system calls, library calls, ...) can + be requested from a co-running daemon. + """ + + return Client(test_workspace) + +@pytest.fixture +def gkfs_shell(test_workspace): + """ + Sets up a gekkofs environment so that shell commands + (stat, ls, mkdir, etc.) can be issued to a co-running daemon. + """ + + return ShellClient(test_workspace) + +@pytest.fixture +def file_factory(test_workspace): + """ + Returns a factory that can create custom input files + in the test workspace. + """ + + return FileCreator(test_workspace) diff --git a/tests/integration/conftest.py.in b/tests/integration/conftest.py.in new file mode 100644 index 0000000000000000000000000000000000000000..8eaf4cafcbb3bd7c61d20e5f981d0afdf5e78eb7 --- /dev/null +++ b/tests/integration/conftest.py.in @@ -0,0 +1,27 @@ +################################################################################ +# Copyright 2018-2020, Barcelona Supercomputing Center (BSC), Spain # +# Copyright 2015-2020, Johannes Gutenberg Universitaet Mainz, Germany # +# # +# This software was partially supported by the # +# EC H2020 funded project NEXTGenIO (Project ID: 671951, www.nextgenio.eu). # +# # +# This software was partially supported by the # +# ADA-FS project under the SPPEXA project funded by the DFG. # +# # +# SPDX-License-Identifier: MIT # +################################################################################ + +from harness.cli import add_cli_options, set_default_log_formatter +from pathlib import Path + +def pytest_configure(config): + """ + Some configurations for our particularusage of pytest + """ + set_default_log_formatter(config, "%(message)s") + +def pytest_addoption(parser): + """ + Adds extra options from the GKFS harness to the py.test CLI. + """ + add_cli_options(parser) diff --git a/tests/integration/directories/README.md b/tests/integration/directories/README.md new file mode 100644 index 0000000000000000000000000000000000000000..1ea36f3eed1a1cf8a941678ed980219c820e9e27 --- /dev/null +++ b/tests/integration/directories/README.md @@ -0,0 +1,4 @@ +# README + +This directory contains functional tests for any directory-related +functionalities in GekkoFS. diff --git a/tests/integration/directories/test_directories.py b/tests/integration/directories/test_directories.py new file mode 100644 index 0000000000000000000000000000000000000000..fc342f4d0487a2301815c9cd50938ed5984f3a2c --- /dev/null +++ b/tests/integration/directories/test_directories.py @@ -0,0 +1,213 @@ +################################################################################ +# Copyright 2018-2020, Barcelona Supercomputing Center (BSC), Spain # +# Copyright 2015-2020, Johannes Gutenberg Universitaet Mainz, Germany # +# # +# This software was partially supported by the # +# EC H2020 funded project NEXTGenIO (Project ID: 671951, www.nextgenio.eu). # +# # +# This software was partially supported by the # +# ADA-FS project under the SPPEXA project funded by the DFG. # +# # +# SPDX-License-Identifier: MIT # +################################################################################ + +import harness +from pathlib import Path +import errno +import stat +import os +import ctypes +import sh +import sys +import pytest +from harness.logger import logger + +nonexisting = "nonexisting" + + +#@pytest.mark.xfail(reason="invalid errno returned on success") +def test_mkdir(gkfs_daemon, gkfs_client): + """Create a new directory in the FS's root""" + + topdir = gkfs_daemon.mountdir / "top" + longer = Path(topdir.parent, topdir.name + "_plus") + dir_a = topdir / "dir_a" + dir_b = topdir / "dir_b" + file_a = topdir / "file_a" + subdir_a = dir_a / "subdir_a" + + # create topdir + ret = gkfs_client.mkdir( + topdir, + stat.S_IRWXU | stat.S_IRWXG | stat.S_IRWXO) + + assert ret.retval == 0 + assert ret.errno == 115 #FIXME: Should be 0! + + # test stat on existing dir + ret = gkfs_client.stat(topdir) + + assert ret.retval == 0 + assert ret.errno == 115 #FIXME: Should be 0! + assert stat.S_ISDIR(ret.statbuf.st_mode) + + # open topdir + ret = gkfs_client.open(topdir, os.O_DIRECTORY) + assert ret.retval != -1 + assert ret.errno == 115 #FIXME: Should be 0! + + + # read and write should be impossible on directories + ret = gkfs_client.read(topdir, 1) + + assert ret.buf is None + assert ret.retval == -1 + assert ret.errno == errno.EISDIR + + # buf = bytes('42', sys.stdout.encoding) + # print(buf.hex()) + buf = b'42' + ret = gkfs_client.write(topdir, buf, 1) + + assert ret.retval == -1 + assert ret.errno == errno.EISDIR + + + # read top directory that is empty + ret = gkfs_client.opendir(topdir) + + assert ret.dirp is not None + assert ret.errno == 115 #FIXME: Should be 0! + + ret = gkfs_client.readdir(topdir) + + # XXX: This might change in the future if we add '.' and '..' + assert len(ret.dirents) == 0 + assert ret.errno == 115 #FIXME: Should be 0! + + # close directory + # TODO: disabled for now because we have no way to keep DIR* alive + # between gkfs.io executions + # ret = gkfs_client.opendir(XXX) + # assert ret.errno == 115 #FIXME: Should be 0! + + + # populate top directory + for d in [dir_a, dir_b]: + ret = gkfs_client.mkdir( + d, + stat.S_IRWXU | stat.S_IRWXG | stat.S_IRWXO) + + assert ret.retval == 0 + assert ret.errno == 115 #FIXME: Should be 0! + + ret = gkfs_client.open(file_a, + os.O_CREAT, + stat.S_IRWXU | stat.S_IRWXG | stat.S_IRWXO) + + assert ret.retval != -1 + assert ret.errno == 115 #FIXME: Should be 0! + + ret = gkfs_client.readdir(gkfs_daemon.mountdir) + + # XXX: This might change in the future if we add '.' and '..' + assert len(ret.dirents) == 1 + assert ret.dirents[0].d_name == 'top' + assert ret.dirents[0].d_type == 4 # DT_DIR + assert ret.errno == 115 #FIXME: Should be 0! + + expected = [ + ( dir_a.name, 4 ), # DT_DIR + ( dir_b.name, 4 ), + ( file_a.name, 8 ) # DT_REG + ] + + ret = gkfs_client.readdir(topdir) + assert len(ret.dirents) == len(expected) + assert ret.errno == 115 #FIXME: Should be 0! + + for d,e in zip(ret.dirents, expected): + assert d.d_name == e[0] + assert d.d_type == e[1] + + # remove file using rmdir should produce an error + ret = gkfs_client.rmdir(file_a) + assert ret.retval == -1 + assert ret.errno == errno.ENOTDIR + + # create a directory with the same prefix as topdir but longer name + ret = gkfs_client.mkdir( + longer, + stat.S_IRWXU | stat.S_IRWXG | stat.S_IRWXO) + + assert ret.retval == 0 + assert ret.errno == 115 #FIXME: Should be 0! + + expected = [ + ( topdir.name, 4 ), # DT_DIR + ( longer.name, 4 ), # DT_DIR + ] + + ret = gkfs_client.readdir(gkfs_daemon.mountdir) + assert len(ret.dirents) == len(expected) + assert ret.errno == 115 #FIXME: Should be 0! + + for d,e in zip(ret.dirents, expected): + assert d.d_name == e[0] + assert d.d_type == e[1] + + # create 2nd level subdir and check it's not included in readdir() + ret = gkfs_client.mkdir( + subdir_a, + stat.S_IRWXU | stat.S_IRWXG | stat.S_IRWXO) + + assert ret.retval == 0 + assert ret.errno == 115 #FIXME: Should be 0! + + expected = [ + ( topdir.name, 4 ), # DT_DIR + ( longer.name, 4 ), # DT_DIR + ] + + ret = gkfs_client.readdir(gkfs_daemon.mountdir) + assert len(ret.dirents) == len(expected) + assert ret.errno == 115 #FIXME: Should be 0! + + for d,e in zip(ret.dirents, expected): + assert d.d_name == e[0] + assert d.d_type == e[1] + + expected = [ + ( subdir_a.name, 4 ), # DT_DIR + ] + + ret = gkfs_client.readdir(dir_a) + + assert len(ret.dirents) == len(expected) + assert ret.errno == 115 #FIXME: Should be 0! + + for d,e in zip(ret.dirents, expected): + assert d.d_name == e[0] + assert d.d_type == e[1] + + + return + +@pytest.mark.skip(reason="invalid errno returned on success") +@pytest.mark.parametrize("directory_path", + [ nonexisting ]) +def test_opendir(gkfs_daemon, gkfs_client, directory_path): + + ret = gkfs_client.opendir(gkfs_daemon.mountdir / directory_path) + + assert ret.dirp is None + assert ret.errno == errno.ENOENT + +# def test_stat(gkfs_daemon): +# pass +# +# def test_rmdir(gkfs_daemon): +# pass +# +# def test_closedir(gkfs_daemon): +# pass diff --git a/tests/integration/harness/CMakeLists.txt b/tests/integration/harness/CMakeLists.txt new file mode 100644 index 0000000000000000000000000000000000000000..282d7bb75878d18d9cd8a3a9907c45d0d0782289 --- /dev/null +++ b/tests/integration/harness/CMakeLists.txt @@ -0,0 +1,80 @@ +cmake_minimum_required(VERSION 3.11) + +project(gkfs.io + VERSION 0.1 + LANGUAGES CXX +) + +add_executable(gkfs.io + gkfs.io/main.cpp + gkfs.io/commands.hpp + gkfs.io/mkdir.cpp + gkfs.io/open.cpp + gkfs.io/opendir.cpp + gkfs.io/read.cpp + gkfs.io/readdir.cpp + gkfs.io/reflection.hpp + gkfs.io/rmdir.cpp + gkfs.io/serialize.hpp + gkfs.io/stat.cpp + gkfs.io/write.cpp +) + +include(FetchContent) + +set(FETCHCONTENT_QUIET OFF) +FetchContent_Declare(cli11 + GIT_REPOSITORY https://github.com/CLIUtils/CLI11.git + GIT_TAG dd0d8e4fe729e5b1110232c7a5c9566dad884686 # v1.9.0 + GIT_SHALLOW ON + GIT_PROGRESS ON +) + +FetchContent_Declare(nlohmann_json + GIT_REPOSITORY https://github.com/nlohmann/json + GIT_TAG e7b3b40b5a95bc74b9a7f662830a27c49ffc01b4 # v3.7.3 + GIT_SHALLOW ON + GIT_PROGRESS ON +) + +FetchContent_GetProperties(cli11) + +if(NOT cli11_POPULATED) + FetchContent_Populate(cli11) + message(STATUS "[gkfs.io] CLI11 source dir: ${cli11_SOURCE_DIR}") + message(STATUS "[gkfs.io] CLI11 binary dir: ${cli11_BINARY_DIR}") + add_subdirectory(${cli11_SOURCE_DIR} ${cli11_BINARY_DIR}) +endif() + +FetchContent_GetProperties(nlohmann_json) + +if(NOT nlohmann_json_POPULATED) + FetchContent_Populate(nlohmann_json) + message(STATUS "[gkfs.io] Nlohmann JSON source dir: ${nlohmann_json_SOURCE_DIR}") + message(STATUS "[gkfs.io] Nlohmann JSON binary dir: ${nlohmann_json_BINARY_DIR}") + + # we don't really care so much about a third party library's tests to be + # run from our own project's code + set(JSON_BuildTests OFF CACHE INTERNAL "") + + # we also don't need to install it when our main project gets installed + set(JSON_Install OFF CACHE INTERNAL "") + + add_subdirectory(${nlohmann_json_SOURCE_DIR} ${nlohmann_json_BINARY_DIR}) +endif() + +target_include_directories(gkfs.io PRIVATE + $ +) + +target_link_libraries(gkfs.io + CLI11::CLI11 + nlohmann_json::nlohmann_json + fmt::fmt +) + +if(GKFS_INSTALL_TESTS) + install(TARGETS gkfs.io + DESTINATION ${CMAKE_INSTALL_BINDIR} + ) +endif() diff --git a/tests/integration/harness/README.md b/tests/integration/harness/README.md new file mode 100644 index 0000000000000000000000000000000000000000..c78fdbcfcc34924ec4fb64dfb783cf998f97eb3a --- /dev/null +++ b/tests/integration/harness/README.md @@ -0,0 +1,5 @@ +# README + +This directory contains the code for the testing harness used for running +functional and integration tests. Please refer to the wiki page about [testing +GekkoFS](../-/wikis/Testing) for more information. diff --git a/tests/integration/harness/cli.py b/tests/integration/harness/cli.py new file mode 100644 index 0000000000000000000000000000000000000000..3144c9b8cc2d98365adf2c099826e5c5a737fc79 --- /dev/null +++ b/tests/integration/harness/cli.py @@ -0,0 +1,81 @@ +################################################################################ +# Copyright 2018-2020, Barcelona Supercomputing Center (BSC), Spain # +# Copyright 2015-2020, Johannes Gutenberg Universitaet Mainz, Germany # +# # +# This software was partially supported by the # +# EC H2020 funded project NEXTGenIO (Project ID: 671951, www.nextgenio.eu). # +# # +# This software was partially supported by the # +# ADA-FS project under the SPPEXA project funded by the DFG. # +# # +# SPDX-License-Identifier: MIT # +################################################################################ + +import _pytest +import logging +from pathlib import Path + +### This code is meant to be included automatically by CMake in the build +### directory's top-level conftest.py as well as the source directory's +### conftest.py, so that tests can be correctly run from both directories +def add_cli_options(parser): + """ + Adds extra options to the py.test CLI so that we can provide + search directories for libraries and helper programs. + """ + + try: + parser.addoption( + '--interface', + action='store', + type=str, + default='lo', + help="network interface used for communications (default: 'lo')." + ) + + parser.addoption( + "--bin-dir", + action='append', + default=[Path.cwd()], + help="directory that should be considered when searching " + "for programs (multi-allowed)." + ) + + parser.addoption( + "--lib-dir", + action='append', + default=[Path.cwd()], + help="directory that should be considered when searching " + "for libraries (multi-allowed)." + ) + except ValueError: + # if the CLI args have already been added, we have been called both + # from the build directory's conftest.py and from the source + # directory's conftest.py through automatic finding, ignore the error + pass + + +def set_default_log_formatter(config, fmt): + + plugin_class = config.pluginmanager.get_plugin('logging').LoggingPlugin + + if not isinstance(plugin_class, LoggingPlugin): + config.pluginmanager.get_plugin('logging').LoggingPlugin = LoggingPlugin + + +class LoggingPlugin(_pytest.logging.LoggingPlugin): + """ + Replacement logging plugin that rewrites py.test default log formatter + """ + + def _create_formatter(self, log_format, + log_date_format, auto_indent) -> logging.Formatter: + """ + Patch pytest default logger to always return our formatter + + Returns: + logging.Formatter: Our formatter + """ + + # since we use loguru for formatting, we just want the message + return logging.Formatter("%(message)s") diff --git a/tests/integration/harness/cmd.py b/tests/integration/harness/cmd.py new file mode 100644 index 0000000000000000000000000000000000000000..9285493deddcd69d9c8a1afc964c64b35febe5c0 --- /dev/null +++ b/tests/integration/harness/cmd.py @@ -0,0 +1,89 @@ +################################################################################ +# Copyright 2018-2020, Barcelona Supercomputing Center (BSC), Spain # +# Copyright 2015-2020, Johannes Gutenberg Universitaet Mainz, Germany # +# # +# This software was partially supported by the # +# EC H2020 funded project NEXTGenIO (Project ID: 671951, www.nextgenio.eu). # +# # +# This software was partially supported by the # +# ADA-FS project under the SPPEXA project funded by the DFG. # +# # +# SPDX-License-Identifier: MIT # +################################################################################ + +from collections import namedtuple + +class Md5sumOutputSchema: + """ + Schema to deserialize the results of a md5sum command: + + $ md5sum foobar + 7f45c62700402ce5f9abe5b8d70d2844 foobar + + """ + + _field_names = [ 'digest', 'filename' ] + + def loads(self, input): + values = input.split() + return namedtuple('md5sumOutput', self._field_names)(*values) + +class StatOutputSchema: + """ + Schema to deserialize the results of a stat --terse command: + + $ stat --terse foobar + foobar 913 8 81b4 1000 1000 10308 7343758 1 0 0 1583160824 1583160634 1583160634 0 4096 + + Output for the command follows the format below: + + %n %s %b %f %u %g %D %i %h %t %T %X %Y %Z %W %o %C + + %n: file name + %s: total size, in bytes + %b: number of blocks + %f: raw mode in hex + %u: owner UID + %g: owner GID + %D: device numer in hex + %i: inode number + %h: number of hard links + %t: major device in hex + %T: minor device in hex + %X: time of last access, seconds since Epoch + %Y: time of last data modification, seconds since Epoch + %Z: time of last status change, seconds since Epoch + %W: time of file birth, seconds since Epoch; 0 if unknown + %o: optimal I/O transfer size hint + """ + + _field_names = [ + 'filename', 'size', 'blocks', 'raw_mode', 'uid', 'gid', 'device', + 'inode', 'hard_links', 'major', 'minor', 'last_access', + 'last_modification', 'last_status_change', 'creation', + 'transfer_size' ] + + _field_types = [ + str, int, int, str, int, int, str, int, int, str, str, int, int, int, + int, int, str ] + + def loads(self, input): + values = [ t(s) for t,s in zip(self._field_types, input.split()) ] + return namedtuple('statOutput', self._field_names)(*values) + +class CommandParser: + """ + A helper parser to transform the output of some shell commands to native + Python objects. + """ + + OutputSchemas = { + 'md5sum' : Md5sumOutputSchema(), + 'stat' : StatOutputSchema(), + } + + def parse(self, command, output): + if command not in self.OutputSchemas: + raise NotImplementedError( + f"Output parser for '{command}' not implemented") + return self.OutputSchemas[command].loads(output) diff --git a/tests/integration/harness/gkfs.io/binary_buffer.hpp b/tests/integration/harness/gkfs.io/binary_buffer.hpp new file mode 100644 index 0000000000000000000000000000000000000000..b09fff2db88e633dff6b5c690b253a770ca7ee9b --- /dev/null +++ b/tests/integration/harness/gkfs.io/binary_buffer.hpp @@ -0,0 +1,77 @@ +/* + Copyright 2018-2020, Barcelona Supercomputing Center (BSC), Spain + Copyright 2015-2020, Johannes Gutenberg Universitaet Mainz, Germany + + This software was partially supported by the + EC H2020 funded project NEXTGenIO (Project ID: 671951, www.nextgenio.eu). + + This software was partially supported by the + ADA-FS project under the SPPEXA project funded by the DFG. + + SPDX-License-Identifier: MIT +*/ + +#ifndef IO_BINARY_BUFFER_HPP +#define IO_BINARY_BUFFER_HPP + +#include +#include + +namespace io { + +struct buffer { + buffer(::size_t size = 0) : + m_data(size) { } + + buffer(nullptr_t p) { } + + buffer(const std::string& s) { + m_data.clear(); + std::copy(s.begin(), s.end(), std::back_inserter(m_data)); + } + + bool + operator==(nullptr_t) const { + return m_data.size() == 0; + } + + bool + operator!=(nullptr_t) const { + return m_data.size() != 0; + } + + auto + data() { + return m_data.data(); + } + + auto + storage() const { + return m_data; + } + + std::size_t + size() const { + return m_data.size(); + } + + std::vector m_data; +}; + +inline void +to_json(nlohmann::json& record, + const buffer& out) { + + if(out == nullptr) { + record = nullptr; + } + else { + +// record = fmt::format("x{:2x}", fmt::join(out, "x")); + record = out.storage(); + } +} + +} // namespace io + +#endif // IO_BINARY_BUFFER_HPP diff --git a/tests/integration/harness/gkfs.io/command.hpp b/tests/integration/harness/gkfs.io/command.hpp new file mode 100644 index 0000000000000000000000000000000000000000..a14442575cb36a9c046a0baa8d9476691175cdcd --- /dev/null +++ b/tests/integration/harness/gkfs.io/command.hpp @@ -0,0 +1,23 @@ +/* + Copyright 2018-2020, Barcelona Supercomputing Center (BSC), Spain + Copyright 2015-2020, Johannes Gutenberg Universitaet Mainz, Germany + + This software was partially supported by the + EC H2020 funded project NEXTGenIO (Project ID: 671951, www.nextgenio.eu). + + This software was partially supported by the + ADA-FS project under the SPPEXA project funded by the DFG. + + SPDX-License-Identifier: MIT +*/ + +#ifndef GKFS_IO_COMMAND_HPP +#define GKFS_IO_COMMAND_HPP + +struct command { + + std::string name; + std::string alt_name; +}; + +#endif // GKFS_IO_COMMAND_HPP diff --git a/tests/integration/harness/gkfs.io/commands.hpp b/tests/integration/harness/gkfs.io/commands.hpp new file mode 100644 index 0000000000000000000000000000000000000000..f91c696f5f4b7b8b4c5afbd02dc789fa15915be4 --- /dev/null +++ b/tests/integration/harness/gkfs.io/commands.hpp @@ -0,0 +1,44 @@ +/* + Copyright 2018-2020, Barcelona Supercomputing Center (BSC), Spain + Copyright 2015-2020, Johannes Gutenberg Universitaet Mainz, Germany + + This software was partially supported by the + EC H2020 funded project NEXTGenIO (Project ID: 671951, www.nextgenio.eu). + + This software was partially supported by the + ADA-FS project under the SPPEXA project funded by the DFG. + + SPDX-License-Identifier: MIT +*/ + +#ifndef IO_COMMANDS_HPP +#define IO_COMMANDS_HPP + +// forward declare CLI::App +namespace CLI { struct App; } + +void +mkdir_init(CLI::App& app); + +void +open_init(CLI::App& app); + +void +opendir_init(CLI::App& app); + +void +read_init(CLI::App& app); + +void +readdir_init(CLI::App& app); + +void +rmdir_init(CLI::App& app); + +void +stat_init(CLI::App& app); + +void +write_init(CLI::App& app); + +#endif // IO_COMMANDS_HPP diff --git a/tests/integration/harness/gkfs.io/main.cpp b/tests/integration/harness/gkfs.io/main.cpp new file mode 100644 index 0000000000000000000000000000000000000000..50f5b3210b7dadcb2cd2b2e51696d6abb2fd0338 --- /dev/null +++ b/tests/integration/harness/gkfs.io/main.cpp @@ -0,0 +1,46 @@ +/* + Copyright 2018-2020, Barcelona Supercomputing Center (BSC), Spain + Copyright 2015-2020, Johannes Gutenberg Universitaet Mainz, Germany + + This software was partially supported by the + EC H2020 funded project NEXTGenIO (Project ID: 671951, www.nextgenio.eu). + + This software was partially supported by the + ADA-FS project under the SPPEXA project funded by the DFG. + + SPDX-License-Identifier: MIT +*/ + +//#include + +#include +#include +#include +#include + +void +init_commands(CLI::App& app) { + open_init(app); + opendir_init(app); + mkdir_init(app); + read_init(app); + readdir_init(app); + rmdir_init(app); + stat_init(app); + write_init(app); +} + + + +int +main(int argc, char* argv[]) { + + CLI::App app{"GekkoFS I/O client"}; + app.require_subcommand(1); + app.get_formatter()->label("REQUIRED", ""); + app.set_help_all_flag("--help-all", "Expand all help"); + init_commands(app); + CLI11_PARSE(app, argc, argv); + + return EXIT_SUCCESS; +} diff --git a/tests/integration/harness/gkfs.io/mkdir.cpp b/tests/integration/harness/gkfs.io/mkdir.cpp new file mode 100644 index 0000000000000000000000000000000000000000..f542c03a094030838a775248cbfec6e977a23799 --- /dev/null +++ b/tests/integration/harness/gkfs.io/mkdir.cpp @@ -0,0 +1,107 @@ +/* + Copyright 2018-2020, Barcelona Supercomputing Center (BSC), Spain + Copyright 2015-2020, Johannes Gutenberg Universitaet Mainz, Germany + + This software was partially supported by the + EC H2020 funded project NEXTGenIO (Project ID: 671951, www.nextgenio.eu). + + This software was partially supported by the + ADA-FS project under the SPPEXA project funded by the DFG. + + SPDX-License-Identifier: MIT +*/ + +/* C++ includes */ +#include +#include +#include +#include +#include +#include +#include + +/* C includes */ +#include +#include + +using json = nlohmann::json; + +struct mkdir_options { + bool verbose; + std::string pathname; + ::mode_t mode; + + REFL_DECL_STRUCT(mkdir_options, + REFL_DECL_MEMBER(bool, verbose), + REFL_DECL_MEMBER(std::string, pathname), + REFL_DECL_MEMBER(::mode_t, mode) + ); +}; + +struct mkdir_output { + int retval; + int errnum; + + REFL_DECL_STRUCT(mkdir_output, + REFL_DECL_MEMBER(int, retval), + REFL_DECL_MEMBER(int, errnum) + ); +}; + +void +to_json(json& record, + const mkdir_output& out) { + record = serialize(out); +} + +void +mkdir_exec(const mkdir_options& opts) { + + int rv = ::mkdir(opts.pathname.c_str(), opts.mode); + + if(opts.verbose) { + fmt::print("mkdir(pathname=\"{}\", mode={:#04o}) = {}, errno: {} [{}]\n", + opts.pathname, opts.mode, rv, errno, ::strerror(errno)); + return; + } + + json out = mkdir_output{rv, errno}; + fmt::print("{}\n", out.dump(2)); +} + +void +mkdir_init(CLI::App& app) { + + // Create the option and subcommand objects + auto opts = std::make_shared(); + auto* cmd = app.add_subcommand( + "mkdir", + "Execute the mkdir() system call"); + + // Add options to cmd, binding them to opts + cmd->add_flag( + "-v,--verbose", + opts->verbose, + "Produce human readable output" + ); + + cmd->add_option( + "pathname", + opts->pathname, + "Directory name" + ) + ->required() + ->type_name(""); + + cmd->add_option( + "mode", + opts->mode, + "Octal mode specified for the new directory (e.g. 0664)" + ) + ->required() + ->type_name(""); + + cmd->callback([opts]() { + mkdir_exec(*opts); + }); +} diff --git a/tests/integration/harness/gkfs.io/open.cpp b/tests/integration/harness/gkfs.io/open.cpp new file mode 100644 index 0000000000000000000000000000000000000000..e2d40306d35b01bd3af3bf480045d8e1fdffc549 --- /dev/null +++ b/tests/integration/harness/gkfs.io/open.cpp @@ -0,0 +1,128 @@ +/* + Copyright 2018-2020, Barcelona Supercomputing Center (BSC), Spain + Copyright 2015-2020, Johannes Gutenberg Universitaet Mainz, Germany + + This software was partially supported by the + EC H2020 funded project NEXTGenIO (Project ID: 671951, www.nextgenio.eu). + + This software was partially supported by the + ADA-FS project under the SPPEXA project funded by the DFG. + + SPDX-License-Identifier: MIT +*/ + +/* C++ includes */ +#include +#include +#include +#include +#include +#include +#include +#include + +/* C includes */ +#include +#include +#include +#include + +using json = nlohmann::json; + +struct open_options { + bool verbose; + std::string pathname; + int flags; + ::mode_t mode; + + REFL_DECL_STRUCT(open_options, + REFL_DECL_MEMBER(bool, verbose), + REFL_DECL_MEMBER(std::string, pathname), + REFL_DECL_MEMBER(int, flags), + REFL_DECL_MEMBER(::mode_t, mode) + ); +}; + +struct open_output { + int retval; + int errnum; + + REFL_DECL_STRUCT(open_output, + REFL_DECL_MEMBER(int, retval), + REFL_DECL_MEMBER(int, errnum) + ); +}; + +void +to_json(json& record, + const open_output& out) { + record = serialize(out); +} + +void +open_exec(const open_options& opts) { + + int fd = ::open(opts.pathname.c_str(), opts.flags, opts.mode); + + if(opts.verbose) { + fmt::print("open(pathname=\"{}\", flags={}, mode={:#04o}) = {}, errno: {} [{}]\n", + opts.pathname, opts.flags, opts.mode, fd, errno, ::strerror(errno)); + return; + } + + json out = open_output{fd, errno}; + fmt::print("{}\n", out.dump(2)); + + return; +} + +void +open_init(CLI::App& app) { + + // Create the option and subcommand objects + auto opts = std::make_shared(); + auto* cmd = app.add_subcommand( + "open", + "Execute the open() system call"); + + // Add options to cmd, binding them to opts + cmd->add_flag( + "-v,--verbose", + opts->verbose, + "Produce human readable output" + ); + + cmd->add_option( + "pathname", + opts->pathname, + "Directory name" + ) + ->required() + ->type_name(""); + + cmd->add_option( + "flags", + opts->flags, + "Open flags" + ) + ->required() + ->type_name("") + ->check(CLI::NonNegativeNumber); + + cmd->add_option( + "mode", + opts->mode, + "Octal mode specified for the new directory (e.g. 0664)" + ) + //->required() + ->default_val(0) + ->type_name("") + ->check(CLI::NonNegativeNumber); + + + cmd->callback([opts]() { + open_exec(*opts); + }); +} + + diff --git a/tests/integration/harness/gkfs.io/opendir.cpp b/tests/integration/harness/gkfs.io/opendir.cpp new file mode 100644 index 0000000000000000000000000000000000000000..069c0bf14b0d11b1d7cb8096925fb628cf284786 --- /dev/null +++ b/tests/integration/harness/gkfs.io/opendir.cpp @@ -0,0 +1,97 @@ +/* + Copyright 2018-2020, Barcelona Supercomputing Center (BSC), Spain + Copyright 2015-2020, Johannes Gutenberg Universitaet Mainz, Germany + + This software was partially supported by the + EC H2020 funded project NEXTGenIO (Project ID: 671951, www.nextgenio.eu). + + This software was partially supported by the + ADA-FS project under the SPPEXA project funded by the DFG. + + SPDX-License-Identifier: MIT +*/ + +/* C++ includes */ +#include +#include +#include +#include +#include +#include +#include + +/* C includes */ +#include +#include + +using json = nlohmann::json; + +struct opendir_options { + bool verbose; + std::string dirname; + + REFL_DECL_STRUCT(opendir_options, + REFL_DECL_MEMBER(bool, verbose), + REFL_DECL_MEMBER(std::string, dirname) + ); +}; + +struct opendir_output { + ::DIR* dirp; + int errnum; + + REFL_DECL_STRUCT(opendir_output, + REFL_DECL_MEMBER(::DIR*, dirp), + REFL_DECL_MEMBER(int, errnum) + ); +}; + +void +to_json(json& record, + const opendir_output& out) { + record = serialize(out); +} + +void +opendir_exec(const opendir_options& opts) { + + ::DIR* dirp = ::opendir(opts.dirname.c_str()); + + if(opts.verbose) { + fmt::print("opendir(name=\"{}\") = {}, errno: {} [{}]\n", + opts.dirname, fmt::ptr(dirp), errno, ::strerror(errno)); + return; + } + + json j = opendir_output{dirp, errno}; + fmt::print("{}\n", j.dump(2)); +} + +void +opendir_init(CLI::App& app) { + // Create the option and subcommand objects + auto opts = std::make_shared(); + auto* cmd = app.add_subcommand( + "opendir", + "Execute the opendir() glibc function"); + + // Add options to cmd, binding them to opts + cmd->add_flag( + "-v,--verbose", + opts->verbose, + "Produce human readable output" + ); + + cmd->add_option( + "dirname", + opts->dirname, + "Directory name" + ) + ->required() + ->type_name(""); + + cmd->callback([opts]() { + opendir_exec(*opts); + }); +} + diff --git a/tests/integration/harness/gkfs.io/read.cpp b/tests/integration/harness/gkfs.io/read.cpp new file mode 100644 index 0000000000000000000000000000000000000000..fe5057df18606bdf1ea6337c0c4a8444e9e2ef4f --- /dev/null +++ b/tests/integration/harness/gkfs.io/read.cpp @@ -0,0 +1,130 @@ +/* + Copyright 2018-2020, Barcelona Supercomputing Center (BSC), Spain + Copyright 2015-2020, Johannes Gutenberg Universitaet Mainz, Germany + + This software was partially supported by the + EC H2020 funded project NEXTGenIO (Project ID: 671951, www.nextgenio.eu). + + This software was partially supported by the + ADA-FS project under the SPPEXA project funded by the DFG. + + SPDX-License-Identifier: MIT +*/ + +/* C++ includes */ +#include +#include +#include +#include +#include +#include +#include +#include + +/* C includes */ +#include +#include +#include +#include + +using json = nlohmann::json; + +struct read_options { + bool verbose; + std::string pathname; + ::size_t count; + + REFL_DECL_STRUCT(read_options, + REFL_DECL_MEMBER(bool, verbose), + REFL_DECL_MEMBER(std::string, pathname), + REFL_DECL_MEMBER(::size_t, count) + ); +}; + +struct read_output { + ::ssize_t retval; + io::buffer buf; + int errnum; + + REFL_DECL_STRUCT(read_output, + REFL_DECL_MEMBER(::size_t, retval), + REFL_DECL_MEMBER(void*, buf), + REFL_DECL_MEMBER(int, errnum) + ); +}; + +void +to_json(json& record, + const read_output& out) { + record = serialize(out); +} + +void +read_exec(const read_options& opts) { + + int fd = ::open(opts.pathname.c_str(), O_RDONLY); + + if(fd == -1) { + if(opts.verbose) { + fmt::print("read(pathname=\"{}\", count={}) = {}, errno: {} [{}]\n", + opts.pathname, opts.count, fd, errno, ::strerror(errno)); + return; + } + + json out = read_output{fd, nullptr, errno}; + fmt::print("{}\n", out.dump(2)); + + return; + } + + io::buffer buf(opts.count); + + int rv = ::read(fd, buf.data(), opts.count); + + if(opts.verbose) { + fmt::print("read(pathname=\"{}\", count={}) = {}, errno: {} [{}]\n", + opts.pathname, opts.count, rv, errno, ::strerror(errno)); + return; + } + + json out = read_output{rv, (rv != -1 ? buf : nullptr), errno}; + fmt::print("{}\n", out.dump(2)); +} + +void +read_init(CLI::App& app) { + + // Create the option and subcommand objects + auto opts = std::make_shared(); + auto* cmd = app.add_subcommand( + "read", + "Execute the read() system call"); + + // Add options to cmd, binding them to opts + cmd->add_flag( + "-v,--verbose", + opts->verbose, + "Produce human readable output" + ); + + cmd->add_option( + "pathname", + opts->pathname, + "Directory name" + ) + ->required() + ->type_name(""); + + cmd->add_option( + "count", + opts->count, + "Number of bytes to read" + ) + ->required() + ->type_name(""); + + cmd->callback([opts]() { + read_exec(*opts); + }); +} + diff --git a/tests/integration/harness/gkfs.io/readdir.cpp b/tests/integration/harness/gkfs.io/readdir.cpp new file mode 100644 index 0000000000000000000000000000000000000000..163bf7723d25068e9eb2c0329ce6b0ecbb76e7e7 --- /dev/null +++ b/tests/integration/harness/gkfs.io/readdir.cpp @@ -0,0 +1,124 @@ +/* + Copyright 2018-2020, Barcelona Supercomputing Center (BSC), Spain + Copyright 2015-2020, Johannes Gutenberg Universitaet Mainz, Germany + + This software was partially supported by the + EC H2020 funded project NEXTGenIO (Project ID: 671951, www.nextgenio.eu). + + This software was partially supported by the + ADA-FS project under the SPPEXA project funded by the DFG. + + SPDX-License-Identifier: MIT +*/ + +/* C++ includes */ +#include +#include +#include +#include +#include +#include +#include +#include + +/* C includes */ +#include +#include + +using json = nlohmann::json; + +struct readdir_options { + bool verbose; + std::string pathname; + ::size_t count; + + REFL_DECL_STRUCT(readdir_options, + REFL_DECL_MEMBER(bool, verbose), + REFL_DECL_MEMBER(std::string, pathname), + REFL_DECL_MEMBER(::size_t, count) + ); +}; + +struct readdir_output { + std::vector dirents; + int errnum; + + REFL_DECL_STRUCT(readdir_output, + REFL_DECL_MEMBER(std::vector, dirents), + REFL_DECL_MEMBER(int, errnum) + ); +}; + +void +to_json(json& record, + const readdir_output& out) { + record = serialize(out); +} + +void +readdir_exec(const readdir_options& opts) { + + ::DIR* dirp = ::opendir(opts.pathname.c_str()); + + if(dirp == NULL) { + if(opts.verbose) { + fmt::print("readdir(pathname=\"{}\") = {}, errno: {} [{}]\n", + opts.pathname, "NULL", errno, ::strerror(errno)); + return; + } + + json out = readdir_output{{}, errno}; + fmt::print("{}\n", out.dump(2)); + + return; + } + + io::buffer buf(opts.count); + + std::vector entries; + struct ::dirent* entry; + + while((entry = ::readdir(dirp)) != NULL) { + entries.push_back(*entry); + } + + if(opts.verbose) { + fmt::print("readdir(pathname=\"{}\") = [\n{}],\nerrno: {} [{}]\n", + opts.pathname, fmt::join(entries, ",\n"), errno, ::strerror(errno)); + return; + } + + json out = readdir_output{entries, errno}; + fmt::print("{}\n", out.dump(2)); +} + +void +readdir_init(CLI::App& app) { + + // Create the option and subcommand objects + auto opts = std::make_shared(); + auto* cmd = app.add_subcommand( + "readdir", + "Execute the readdir() system call"); + + // Add options to cmd, binding them to opts + cmd->add_flag( + "-v,--verbose", + opts->verbose, + "Produce human readable output" + ); + + cmd->add_option( + "pathname", + opts->pathname, + "Directory name" + ) + ->required() + ->type_name(""); + + cmd->callback([opts]() { + readdir_exec(*opts); + }); +} + + diff --git a/tests/integration/harness/gkfs.io/reflection.hpp b/tests/integration/harness/gkfs.io/reflection.hpp new file mode 100644 index 0000000000000000000000000000000000000000..91431afc749e522afa46dbd8eb8a7eea57c0c531 --- /dev/null +++ b/tests/integration/harness/gkfs.io/reflection.hpp @@ -0,0 +1,89 @@ +/* + Copyright 2018-2020, Barcelona Supercomputing Center (BSC), Spain + Copyright 2015-2020, Johannes Gutenberg Universitaet Mainz, Germany + + This software was partially supported by the + EC H2020 funded project NEXTGenIO (Project ID: 671951, www.nextgenio.eu). + + This software was partially supported by the + ADA-FS project under the SPPEXA project funded by the DFG. + + SPDX-License-Identifier: MIT +*/ + +#ifndef GKFS_IO_REFLECTION_HPP +#define GKFS_IO_REFLECTION_HPP + +#include +#include +#include // required by DIR* + +#include + + +namespace refl { +namespace detail { + +template +struct property_impl { + constexpr property_impl(T Class::*aMember, + const char* aType, + const char* aName) : + member{aMember}, + type{aType}, + name{aName} {} + + using Type = T; + + T Class::*member; + const char* type; + const char* name; +}; + +} // namespace detail + +template +constexpr auto +property(T Class::*member, const char* type, const char* name) { + return detail::property_impl{member, type, name}; +} + +template +constexpr void for_sequence(std::integer_sequence, F&& f) { + using unpack_t = int[]; + (void) unpack_t{(static_cast(f(std::integral_constant{})), 0)..., 0}; +} + +} // namespace refl + + +/* private helper macros, do not call directly */ +#define _REFL_STRUCT_NAME(t) BOOST_PP_TUPLE_ELEM(0, t) +#define _REFL_STRUCT_MEMBER_COUNT(t) BOOST_PP_TUPLE_ELEM(1, t) +#define _REFL_MEMBER_TYPE(t) BOOST_PP_TUPLE_ELEM(0, t) +#define _REFL_MEMBER_NAME(t) BOOST_PP_TUPLE_ELEM(1, t) + +#define _REFL_GEN_FIELD(r, data, index, elem) \ + refl::property( \ + &_REFL_STRUCT_NAME(data)::_REFL_MEMBER_NAME(elem), \ + BOOST_PP_STRINGIZE(_REFL_MEMBER_TYPE(elem)), \ + BOOST_PP_STRINGIZE(_REFL_MEMBER_NAME(elem)) \ + ) \ + BOOST_PP_COMMA_IF( \ + BOOST_PP_NOT_EQUAL(index, \ + BOOST_PP_DEC(_REFL_STRUCT_MEMBER_COUNT(data)))) + +/* public interface */ +#define REFL_DECL_MEMBER(type, name) \ + (type, name) + +#define REFL_DECL_STRUCT(struct_name, ...) \ + constexpr static auto properties = std::make_tuple( \ + BOOST_PP_SEQ_FOR_EACH_I( \ + _REFL_GEN_FIELD, \ + ( struct_name, BOOST_PP_VARIADIC_SIZE(__VA_ARGS__ ) ), \ + BOOST_PP_TUPLE_TO_SEQ( ( __VA_ARGS__ ) )) \ + );\ + static_assert(true, "") + +#endif // GKFS_IO_REFLECTION_HPP diff --git a/tests/integration/harness/gkfs.io/rmdir.cpp b/tests/integration/harness/gkfs.io/rmdir.cpp new file mode 100644 index 0000000000000000000000000000000000000000..00620c2c01150f5817ffe338b02f3fec9819616f --- /dev/null +++ b/tests/integration/harness/gkfs.io/rmdir.cpp @@ -0,0 +1,102 @@ +/* + Copyright 2018-2020, Barcelona Supercomputing Center (BSC), Spain + Copyright 2015-2020, Johannes Gutenberg Universitaet Mainz, Germany + + This software was partially supported by the + EC H2020 funded project NEXTGenIO (Project ID: 671951, www.nextgenio.eu). + + This software was partially supported by the + ADA-FS project under the SPPEXA project funded by the DFG. + + SPDX-License-Identifier: MIT +*/ + +/* C++ includes */ +#include +#include +#include +#include +#include +#include +#include +#include + +/* C includes */ +#include + +using json = nlohmann::json; + +struct rmdir_options { + bool verbose; + std::string pathname; + + REFL_DECL_STRUCT(rmdir_options, + REFL_DECL_MEMBER(bool, verbose), + REFL_DECL_MEMBER(std::string, pathname) + ); +}; + +struct rmdir_output { + int retval; + int errnum; + + REFL_DECL_STRUCT(rmdir_output, + REFL_DECL_MEMBER(int, retval), + REFL_DECL_MEMBER(int, errnum) + ); +}; + +void +to_json(json& record, + const rmdir_output& out) { + record = serialize(out); +} + +void +rmdir_exec(const rmdir_options& opts) { + + int fd = ::rmdir(opts.pathname.c_str()); + + if(opts.verbose) { + fmt::print("rmdir(pathname=\"{}\") = {}, errno: {} [{}]\n", + opts.pathname, errno, ::strerror(errno)); + return; + } + + json out = rmdir_output{fd, errno}; + fmt::print("{}\n", out.dump(2)); + + return; +} + +void +rmdir_init(CLI::App& app) { + + // Create the option and subcommand objects + auto opts = std::make_shared(); + auto* cmd = app.add_subcommand( + "rmdir", + "Execute the rmdir() system call"); + + // Add options to cmd, binding them to opts + cmd->add_flag( + "-v,--verbose", + opts->verbose, + "Produce human readable output" + ); + + cmd->add_option( + "pathname", + opts->pathname, + "Directory name" + ) + ->required() + ->type_name(""); + + cmd->callback([opts]() { + rmdir_exec(*opts); + }); +} + + + diff --git a/tests/integration/harness/gkfs.io/serialize.hpp b/tests/integration/harness/gkfs.io/serialize.hpp new file mode 100644 index 0000000000000000000000000000000000000000..739b89ee82517d787aac4d2eda8e57d77ba03c4a --- /dev/null +++ b/tests/integration/harness/gkfs.io/serialize.hpp @@ -0,0 +1,199 @@ +/* + Copyright 2018-2020, Barcelona Supercomputing Center (BSC), Spain + Copyright 2015-2020, Johannes Gutenberg Universitaet Mainz, Germany + + This software was partially supported by the + EC H2020 funded project NEXTGenIO (Project ID: 671951, www.nextgenio.eu). + + This software was partially supported by the + ADA-FS project under the SPPEXA project funded by the DFG. + + SPDX-License-Identifier: MIT +*/ + +#ifndef GKFS_IO_SERIALIZE_HPP +#define GKFS_IO_SERIALIZE_HPP + +#include + +template +nlohmann::json +serialize(const T& object) { + + using json = nlohmann::json; + + json j; + + constexpr auto n = std::tuple_size::value; + + refl::for_sequence(std::make_index_sequence{}, + [&](auto i) { + constexpr auto p = std::get(T::properties); + + //j[p.name] = json { + // { "type" , p.type }, + // { "value" , object.*(p.member) } + //}; + + j[p.name] = object.*(p.member); + } + ); + + return j; +} + +namespace nlohmann { + +// ADL specialization for pointers to complete types that +// we want serialized +template +struct adl_serializer { + static void to_json(json& j, const T* opt) { + if (opt) { + j = *opt; + } else { + j = nullptr; + } + } +}; + +// ADL specialization for C strings +template <> +struct adl_serializer { + static void to_json(json& j, const char* opt) { + if (opt) { + j = std::string{opt}; + } else { + j = std::string{}; + } + } +}; + +// base serializer for opaque pointers +template +struct opaque_ptr_serializer { + static void to_json(json& j, const T opt) { + if (opt) { + j = reinterpret_cast(opt); + } else { + j = nullptr; + } + } +}; + +// ADL specialization for opaque ::DIR* type +template <> +struct adl_serializer<::DIR*> : public opaque_ptr_serializer<::DIR*> { + using opaque_ptr_serializer<::DIR*>::to_json; +}; + +// ADL specialization for void* type +template <> +struct adl_serializer : public opaque_ptr_serializer { + using opaque_ptr_serializer::to_json; +}; + +// ADL specialization for struct ::timespec type +template <> +struct adl_serializer { + static void to_json(json& j, const struct ::timespec opt) { + + j = json { + { "tv_sec", opt.tv_sec }, + { "tv_nsec", opt.tv_nsec } + }; + } +}; + +// ADL specialization for struct ::dirent type +template <> +struct adl_serializer { + static void to_json(json& j, const struct ::dirent opt) { + + j = json { + { "d_ino", opt.d_ino }, + { "d_off", opt.d_off }, + { "d_reclen", opt.d_reclen }, + { "d_type", opt.d_type }, + { "d_name", opt.d_name }, + }; + } +}; + +//std::ostream& +//operator<<(std::ostream& os, const struct ::dirent& d) { +// return os << "hello there!\n"; +//} + +// ADL specialization for struct ::stat type +template <> +struct adl_serializer { + static void to_json(json& j, const struct ::stat opt) { + + j = json { + { "st_dev", opt.st_dev }, + { "st_ino", opt.st_ino }, + { "st_mode", opt.st_mode }, + { "st_nlink", opt.st_nlink }, + { "st_uid", opt.st_uid }, + { "st_gid", opt.st_gid }, + { "st_rdev", opt.st_rdev }, + { "st_size", opt.st_size }, + { "st_blksize", opt.st_blksize }, + { "st_blocks", opt.st_blocks }, + { "st_atim", opt.st_atim }, + { "st_mtim", opt.st_mtim }, + { "st_ctim", opt.st_ctim } + }; + } +}; + +} // namespace nlohmann + +namespace fmt { + +template <> struct formatter { + constexpr auto parse(format_parse_context &ctx) { + // [ctx.begin(), ctx.end()) is a character range that contains a part of + // the format string starting from the format specifications to be parsed, + // e.g. in + // + // fmt::format("{:f} - point of interest", point{1, 2}); + // + // the range will contain "f} - point of interest". The formatter should + // parse specifiers until '}' or the end of the range. In this example + // the formatter should parse the 'f' specifier and return an iterator + // pointing to '}'. + + // Parse the presentation format and store it in the formatter: + auto it = ctx.begin(), end = ctx.end(); + + // Check if reached the end of the range: + if (it != end && *it != '}') + throw format_error("invalid format"); + + // Return an iterator past the end of the parsed range: + return it; + } + + template + auto format(const struct ::dirent &dirent, FormatContext &ctx) { + return format_to(ctx.out(), + "struct dirent {{\n" + " d_ino = {};\n" + " d_off = {};\n" + " d_reclen = {};\n" + " d_type = {};\n" + " d_name = {};\n" + "}}", + dirent.d_ino, + dirent.d_off, + dirent.d_reclen, + dirent.d_type, + dirent.d_name); + } +}; + +} + +#endif // GKFS_IO_SERIALIZE_HPP diff --git a/tests/integration/harness/gkfs.io/stat.cpp b/tests/integration/harness/gkfs.io/stat.cpp new file mode 100644 index 0000000000000000000000000000000000000000..6f3d03740541d7039549be37da50967a2a9ee9d0 --- /dev/null +++ b/tests/integration/harness/gkfs.io/stat.cpp @@ -0,0 +1,103 @@ +/* + Copyright 2018-2020, Barcelona Supercomputing Center (BSC), Spain + Copyright 2015-2020, Johannes Gutenberg Universitaet Mainz, Germany + + This software was partially supported by the + EC H2020 funded project NEXTGenIO (Project ID: 671951, www.nextgenio.eu). + + This software was partially supported by the + ADA-FS project under the SPPEXA project funded by the DFG. + + SPDX-License-Identifier: MIT +*/ + +/* C++ includes */ +#include +#include +#include +#include +#include +#include +#include + +/* C includes */ +#include +#include +#include + +using json = nlohmann::json; + +struct stat_options { + bool verbose; + std::string pathname; + + REFL_DECL_STRUCT(stat_options, + REFL_DECL_MEMBER(bool, verbose), + REFL_DECL_MEMBER(std::string, pathname) + ); +}; + +struct stat_output { + int retval; + int errnum; + struct ::stat statbuf; + + REFL_DECL_STRUCT(stat_output, + REFL_DECL_MEMBER(int, retval), + REFL_DECL_MEMBER(int, errnum), + REFL_DECL_MEMBER(struct ::stat, statbuf) + ); +}; + +void +to_json(json& record, + const stat_output& out) { + record = serialize(out); +} + +void +stat_exec(const stat_options& opts) { + + struct ::stat statbuf; + + int rv = ::stat(opts.pathname.c_str(), &statbuf); + + if(opts.verbose) { + fmt::print("stat(pathname=\"{}\") = {}, errno: {} [{}]\n", + opts.pathname, rv, errno, ::strerror(errno)); + return; + } + + json out = stat_output{rv, errno, statbuf}; + fmt::print("{}\n", out.dump(2)); +} + +void +stat_init(CLI::App& app) { + + // Create the option and subcommand objects + auto opts = std::make_shared(); + auto* cmd = app.add_subcommand( + "stat", + "Execute the stat() system call"); + + // Add options to cmd, binding them to opts + cmd->add_flag( + "-v,--verbose", + opts->verbose, + "Produce human readable output" + ); + + cmd->add_option( + "pathname", + opts->pathname, + "Directory name" + ) + ->required() + ->type_name(""); + + cmd->callback([opts]() { + stat_exec(*opts); + }); +} + diff --git a/tests/integration/harness/gkfs.io/write.cpp b/tests/integration/harness/gkfs.io/write.cpp new file mode 100644 index 0000000000000000000000000000000000000000..8c819d82c019150a7ce78a3dc91dee5a2b155acd --- /dev/null +++ b/tests/integration/harness/gkfs.io/write.cpp @@ -0,0 +1,138 @@ +/* + Copyright 2018-2020, Barcelona Supercomputing Center (BSC), Spain + Copyright 2015-2020, Johannes Gutenberg Universitaet Mainz, Germany + + This software was partially supported by the + EC H2020 funded project NEXTGenIO (Project ID: 671951, www.nextgenio.eu). + + This software was partially supported by the + ADA-FS project under the SPPEXA project funded by the DFG. + + SPDX-License-Identifier: MIT +*/ + +/* C++ includes */ +#include +#include +#include +#include +#include +#include +#include +#include + +/* C includes */ +#include +#include +#include +#include + +using json = nlohmann::json; + +struct write_options { + bool verbose; + std::string pathname; + std::string data; + ::size_t count; + + REFL_DECL_STRUCT(write_options, + REFL_DECL_MEMBER(bool, verbose), + REFL_DECL_MEMBER(std::string, pathname), + REFL_DECL_MEMBER(std::string, data), + REFL_DECL_MEMBER(::size_t, count) + ); +}; + +struct write_output { + ::ssize_t retval; + int errnum; + + REFL_DECL_STRUCT(write_output, + REFL_DECL_MEMBER(::size_t, retval), + REFL_DECL_MEMBER(int, errnum) + ); +}; + +void +to_json(json& record, + const write_output& out) { + record = serialize(out); +} + +void +write_exec(const write_options& opts) { + + int fd = ::open(opts.pathname.c_str(), O_WRONLY); + + if(fd == -1) { + if(opts.verbose) { + fmt::print("write(pathname=\"{}\", buf=\"{}\" count={}) = {}, errno: {} [{}]\n", + opts.pathname, opts.data, opts.count, fd, errno, ::strerror(errno)); + return; + } + + json out = write_output{fd, errno}; + fmt::print("{}\n", out.dump(2)); + + return; + } + + io::buffer buf(opts.data); + int rv = ::write(fd, buf.data(), opts.count); + + if(opts.verbose) { + fmt::print("write(pathname=\"{}\", count={}) = {}, errno: {} [{}]\n", + opts.pathname, opts.count, rv, errno, ::strerror(errno)); + return; + } + + json out = write_output{rv, errno}; + fmt::print("{}\n", out.dump(2)); +} + +void +write_init(CLI::App& app) { + + // Create the option and subcommand objects + auto opts = std::make_shared(); + auto* cmd = app.add_subcommand( + "write", + "Execute the write() system call"); + + // Add options to cmd, binding them to opts + cmd->add_flag( + "-v,--verbose", + opts->verbose, + "Produce human writeable output" + ); + + cmd->add_option( + "pathname", + opts->pathname, + "Directory name" + ) + ->required() + ->type_name(""); + + cmd->add_option( + "data", + opts->data, + "Data to write" + ) + ->required() + ->type_name(""); + + cmd->add_option( + "count", + opts->count, + "Number of bytes to write" + ) + ->required() + ->type_name(""); + + cmd->callback([opts]() { + write_exec(*opts); + }); +} + + diff --git a/tests/integration/harness/gkfs.py b/tests/integration/harness/gkfs.py new file mode 100644 index 0000000000000000000000000000000000000000..36abe40ec6828b031d7e3063ebbd3e7ac22014f3 --- /dev/null +++ b/tests/integration/harness/gkfs.py @@ -0,0 +1,584 @@ +################################################################################ +# Copyright 2018-2020, Barcelona Supercomputing Center (BSC), Spain # +# Copyright 2015-2020, Johannes Gutenberg Universitaet Mainz, Germany # +# # +# This software was partially supported by the # +# EC H2020 funded project NEXTGenIO (Project ID: 671951, www.nextgenio.eu). # +# # +# This software was partially supported by the # +# ADA-FS project under the SPPEXA project funded by the DFG. # +# # +# SPDX-License-Identifier: MIT # +################################################################################ + +import os, sh, sys, re, pytest, signal +import random, socket, netifaces +from pathlib import Path +from itertools import islice +from time import perf_counter +from pprint import pformat +from harness.logger import logger +from harness.io import IOParser +from harness.cmd import CommandParser + +### some definitions required to interface with the client/daemon +gkfs_daemon_cmd = 'gkfs_daemon' +gkfs_client_cmd = 'gkfs.io' +gkfs_client_lib_file = 'libgkfs_intercept.so' +gkfs_hosts_file = 'gkfs_hosts.txt' +gkfs_daemon_log_file = 'gkfs_daemon.log' +gkfs_daemon_log_level = '100' +gkfs_client_log_file = 'gkfs_client.log' +gkfs_client_log_level = 'all' +gkfs_daemon_active_log_pattern = r'Startup successful. Daemon is ready.' + +def get_ip_addr(iface): + return netifaces.ifaddresses(iface)[netifaces.AF_INET][0]['addr'] + +def get_ephemeral_host(): + """ + Returns a random IP in the 127.0.0.0/24. This decreases the likelihood of + races for ports by 255^3. + """ + + res = '127.{}.{}.{}'.format(random.randrange(1, 255), + random.randrange(1, 255), + random.randrange(2, 255),) + + return res + +def get_ephemeral_port(port=0, host=None): + """ + Get an ephemeral socket at random from the kernel. + + Parameters + ---------- + port: `str` + If specified, use this port as a base and the next free port after that + base will be returned. + + host: `str` + If specified, use this host. Otherwise it will use a temporary IP in + the 127.0.0.0/24 range + + Returns + ------- + Available port to use + """ + + if host is None: + host = get_ephemeral_host() + + # Dynamic port-range: + # * cat /proc/sys/net/ipv4/ip_local_port_range + # 32768 61000 + if port == 0: + port = random.randrange(1024, 32768) + + while True: + s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + try: + s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) + s.bind((host, port)) + port = s.getsockname()[1] + s.close() + return port + except socket.error: + port = random.randrange(1024, 32768) + +def get_ephemeral_address(iface): + """ + Get an ephemeral network address (IPv4:port) from an interface + and a random port. + + Parameters + ---------- + iface: `str` + The interface that will be used to find out the IPv4 address + for the ephemeral address. + + Returns + ------- + A network address formed by iface's IPv4 address and an available + randomly selected port. + """ + + return f"{iface}:{get_ephemeral_port(host=get_ip_addr(iface))}" + +def _process_exists(pid): + """ + Checks whether a given PID exists in the system + + Parameters + ---------- + pid: `int` + The PID to check for + + Returns + ------- + True if a process with the provided `pid` exists in the system. + False Otherwise + """ + + try: + sh.ps(['-h', '-p', pid]) + except Exception: + # sh.raises an Exception if the command doesn't return 0 + return False + + return True + +class Daemon: + def __init__(self, interface, workspace): + + self._address = get_ephemeral_address(interface) + self._workspace = workspace + + self._cmd = sh.Command(gkfs_daemon_cmd, self._workspace.bindirs) + self._env = os.environ.copy() + + libdirs = ':'.join( + filter(None, [os.environ.get('LD_LIBRARY_PATH', '')] + + [str(p) for p in self._workspace.libdirs])) + + self._patched_env = { + 'LD_LIBRARY_PATH' : libdirs, + 'GKFS_HOSTS_FILE' : self.cwd / gkfs_hosts_file, + 'GKFS_DAEMON_LOG_PATH' : self.logdir / gkfs_daemon_log_file, + 'GKFS_LOG_LEVEL' : gkfs_daemon_log_level, + } + self._env.update(self._patched_env) + + def run(self): + + args = [ '--mountdir', self.mountdir, + '--rootdir', self.rootdir, + '-l', self._address ] + + logger.debug(f"spawning daemon") + logger.debug(f"cmdline: {self._cmd} " + " ".join(map(str, args))) + logger.debug(f"patched env:\n{pformat(self._patched_env)}") + + self._proc = self._cmd( + args, + _env=self._env, +# _out=sys.stdout, +# _err=sys.stderr, + _bg=True, + ) + + logger.debug(f"daemon process spawned (PID={self._proc.pid})") + logger.debug("waiting for daemon to be ready") + + try: + self.wait_until_active(self._proc.pid, 10.0) + except Exception as ex: + logger.error(f"daemon initialization failed: {ex}") + + # if the daemon initialized correctly but took longer than + # `timeout`, we may be leaving running processes behind + if _process_exists(self._proc.pid): + self.shutdown() + + logger.critical(f"daemon was shut down, what is ex? {ex.__repr__}?") + raise ex + + logger.debug("daemon is ready") + + return self + + def wait_until_active(self, pid, timeout, max_lines=50): + """ + Waits until a GKFS daemon is active or until a certain timeout + has expired. Checks if the daemon is running by searching its + log for a pre-defined readiness message. + + Parameters + ---------- + pid: `int` + The PID of the daemon process to wait for. + + timeout: `number` + The number of seconds to wait for + + max_lines: `int` + The maximum number of log lines to check for a match. + """ + + gkfs_daemon_active_log_pattern = r'Startup successful. Daemon is ready.' + + init_time = perf_counter() + + while perf_counter() - init_time < timeout: + try: + logger.debug(f"checking log file") + with open(self.logdir / gkfs_daemon_log_file) as log: + for line in islice(log, max_lines): + if re.search(gkfs_daemon_active_log_pattern, line) is not None: + return + except FileNotFoundError: + # Log is missing, the daemon might have crashed... + logger.debug(f"daemon log file missing, checking if daemon is alive...") + + pid=self._proc.pid + + if not _process_exists(pid): + raise RuntimeError(f"process {pid} is not running") + + # ... or it might just be lazy. let's give it some more time + logger.debug(f"daemon {pid} found, retrying...") + + raise RuntimeError("initialization timeout exceeded") + + def shutdown(self): + logger.debug(f"terminating daemon") + + try: + self._proc.terminate() + err = self._proc.wait() + except sh.SignalException_SIGTERM: + pass + except Exception: + raise + + + @property + def cwd(self): + return self._workspace.twd + + @property + def rootdir(self): + return self._workspace.rootdir + + @property + def mountdir(self): + return self._workspace.mountdir + + @property + def logdir(self): + return self._workspace.logdir + + @property + def interface(self): + return self._interface + +class _proxy_exec(): + def __init__(self, client, name): + self._client = client + self._name = name + + def __call__(self, *args, **kwargs): + return self._client.run(self._name, *args, **kwargs) + +class Client: + """ + A class to represent a GekkoFS client process with a patched LD_PRELOAD. + This class allows tests to interact with the file system using I/O-related + function calls, be them system calls (e.g. read()) or glibc I/O functions + (e.g. opendir()). + """ + def __init__(self, workspace): + self._parser = IOParser() + self._workspace = workspace + self._cmd = sh.Command(gkfs_client_cmd, self._workspace.bindirs) + self._env = os.environ.copy() + + libdirs = ':'.join( + filter(None, [os.environ.get('LD_LIBRARY_PATH', '')] + + [str(p) for p in self._workspace.libdirs])) + + # ensure the client interception library is available: + # to avoid running code with potentially installed libraries, + # it must be found in one (and only one) of the workspace's bindirs + preloads = [] + for d in self._workspace.bindirs: + search_path = Path(d) / gkfs_client_lib_file + if search_path.exists(): + preloads.append(search_path) + + if len(preloads) == 0: + logger.error(f'No client libraries found in the test\'s binary directories:') + pytest.exit("Aborted due to initialization error. Check test logs.") + + if len(preloads) != 1: + logger.error(f'Multiple client libraries found in the test\'s binary directories:') + for p in preloads: + logger.error(f' {p}') + logger.error(f'Make sure that only one copy of the client library is available.') + pytest.exit("Aborted due to initialization error. Check test logs.") + + self._preload_library = preloads[0] + + self._patched_env = { + 'LD_LIBRARY_PATH' : libdirs, + 'LD_PRELOAD' : self._preload_library, + 'LIBGKFS_HOSTS_FILE' : self.cwd / gkfs_hosts_file, + 'LIBGKFS_LOG' : gkfs_client_log_level, + 'LIBGKFS_LOG_OUTPUT' : self._workspace.logdir / gkfs_client_log_file + } + + self._env.update(self._patched_env) + + @property + def preload_library(self): + """ + Return the preload library detected for this client + """ + + return self._preload_library + + def run(self, cmd, *args): + + logger.debug(f"running client") + logger.debug(f"cmdline: {self._cmd} " + " ".join(map(str, list(args)))) + logger.debug(f"patched env: {pformat(self._patched_env)}") + + out = self._cmd( + [ cmd ] + list(args), + _env = self._env, + # _out=sys.stdout, + # _err=sys.stderr, + ) + + logger.debug(f"command output: {out.stdout}") + return self._parser.parse(cmd, out.stdout) + + def __getattr__(self, name): + return _proxy_exec(self, name) + + @property + def cwd(self): + return self._workspace.twd + +class ShellCommand: + """ + A wrapper class for sh.RunningCommand that allows seamlessly using all + its methods and properties plus extending it with additional methods + for ease of use. + """ + + def __init__(self, cmd, proc): + self._parser = CommandParser() + self._cmd = cmd + self._wrapped_proc = proc + + @property + def parsed_stdout(self): + return self._parser.parse(self._cmd, self._wrapped_proc.stdout.decode()) + + @property + def parsed_stderr(self): + return self._parser.parse(self._cmd, self._wrapped_proc.stderr.decode()) + + def __getattr__(self, attr): + if attr in self.__dict__: + return getattr(self, attr) + return getattr(self._wrapped_proc, attr) + +class ShellClient: + """ + A class to represent a GekkoFS shell client process. + This class allows tests to execute shell commands or scripts via bash -c + on a GekkoFS instance. + """ + + def __init__(self, workspace): + self._workspace = workspace + self._cmd = sh.Command("bash") + self._env = os.environ.copy() + + libdirs = ':'.join( + filter(None, [os.environ.get('LD_LIBRARY_PATH', '')] + + [str(p) for p in self._workspace.libdirs])) + + # ensure the client interception library is available: + # to avoid running code with potentially installed libraries, + # it must be found in one (and only one) of the workspace's bindirs + preloads = [] + for d in self._workspace.bindirs: + search_path = Path(d) / gkfs_client_lib_file + if search_path.exists(): + preloads.append(search_path) + + if len(preloads) != 1: + logger.error(f'Multiple client libraries found in the test\'s binary directories:') + for p in preloads: + logger.error(f' {p}') + logger.error(f'Make sure that only one copy of the client library is available.') + pytest.exit("Aborted due to initialization error") + + self._preload_library = preloads[0] + + self._patched_env = { + 'LD_LIBRARY_PATH' : libdirs, + 'LD_PRELOAD' : self._preload_library, + 'LIBGKFS_HOSTS_FILE' : self.cwd / gkfs_hosts_file, + 'LIBGKFS_LOG' : gkfs_client_log_level, + 'LIBGKFS_LOG_OUTPUT' : self._workspace.logdir / gkfs_client_log_file + } + + self._env.update(self._patched_env) + + @property + def patched_environ(self): + """ + Return the patched environment required to run a test as a string that + can be prepended to a shell command. + """ + + return ' '.join(f'{k}="{v}"' for k,v in self._patched_env.items()) + + def script(self, code, intercept_shell=True, timeout=60, timeout_signal=signal.SIGKILL): + """ + Execute a shell script passed as an argument in bash. + + For instance, the following snippet: + + mountdir = pathlib.Path('/tmp') + file01 = 'file01' + + ShellClient().script( + f''' + expected_pathname={mountdir / file01} + if [[ -e ${{expected_pathname}} ]]; + then + exit 0 + fi + exit 1 + ''') + + transforms into: + + bash -c ' + expected_pathname=/tmp/file01 + if [[ -e ${expected_pathname} ]]; + then + exit 0 + fi + exit 1 + ' + + Note that since we are using Python's f-strings, for variable + expansions to work correctly, they need to be defined with double + braces, e.g. ${{expected_pathname}}. + + Parameters + ---------- + code: `str` + The script code to be passed to 'bash -c'. + + intercept_shell: `bool` + Controls whether the shell executing the script should be + executed with LD_PRELOAD=libgkfs_intercept.so (default: True). + + timeout: `int` + How much time, in seconds, we should give the process to complete. + If the process does not finish within the timeout, it will be sent + the signal defined by `timeout_signal`. + + Default value: 60 + + timeout_signal: `int` + The signal to be sent to the process if `timeout` is not None. + + Default value: signal.SIGKILL + + Returns + ------- + A sh.RunningCommand instance that allows interacting with + the finished process. + """ + + logger.debug(f"running bash") + logger.debug(f"cmd: bash -c '{code}'") + logger.debug(f"timeout: {timeout} seconds") + logger.debug(f"timeout_signal: {signal.Signals(timeout_signal).name}") + + if intercept_shell: + logger.debug(f"patched env: {self._patched_env}") + + # 'sh' raises an exception if the return code is not zero; + # since we'd rather check for return codes explictly, we + # whitelist all exit codes from 1 to 255 as 'ok' using the + # _ok_code argument + return self._cmd('-c', + code, + _env = (self._env if intercept_shell else os.environ), + # _out=sys.stdout, + # _err=sys.stderr, + _timeout=timeout, + _timeout_signal=timeout_signal, + # _ok_code=list(range(0, 256)) + ) + + def run(self, cmd, *args, timeout=60, timeout_signal=signal.SIGKILL): + """ + Execute a shell command with arguments. + + For example, the following snippet: + + mountdir = pathlib.Path('/tmp') + file01 = 'file01' + + ShellClient().stat('--terse', mountdir / file01) + + transforms into: + + bash -c 'stat --terse /tmp/file01' + + Parameters: + ----------- + cmd: `str` + The command to execute. + + args: `list` + The list of arguments for the command. + + timeout: `number` + How much time, in seconds, we should give the process to complete. + If the process does not finish within the timeout, it will be sent + the signal defined by `timeout_signal`. + + Default value: 60 + + timeout_signal: `int` + The signal to be sent to the process if `timeout` is not None. + + Default value: signal.SIGKILL + + Returns + ------- + A ShellCommand instance that allows interacting with the finished + process. Note that ShellCommand wraps sh.RunningCommand and adds s + extra properties to it. + """ + + bash_c_args = f"{cmd} {' '.join(str(a) for a in args)}" + logger.debug(f"running bash") + logger.debug(f"cmd: bash -c '{bash_c_args}'") + logger.debug(f"timeout: {timeout} seconds") + logger.debug(f"timeout_signal: {signal.Signals(timeout_signal).name}") + logger.debug(f"patched env:\n{pformat(self._patched_env)}") + + # 'sh' raises an exception if the return code is not zero; + # since we'd rather check for return codes explictly, we + # whitelist all exit codes from 1 to 255 as 'ok' using the + # _ok_code argument + proc = self._cmd('-c', + bash_c_args, + _env = self._env, + # _out=sys.stdout, + # _err=sys.stderr, + _timeout=timeout, + _timeout_signal=timeout_signal, + # _ok_code=list(range(0, 256)) + ) + + return ShellCommand(cmd, proc) + + def __getattr__(self, name): + return _proxy_exec(self, name) + + @property + def cwd(self): + return self._workspace.twd diff --git a/tests/integration/harness/io.py b/tests/integration/harness/io.py new file mode 100644 index 0000000000000000000000000000000000000000..798227c9c58cee6af16e6fefc7774d55470c708e --- /dev/null +++ b/tests/integration/harness/io.py @@ -0,0 +1,182 @@ +################################################################################ +# Copyright 2018-2020, Barcelona Supercomputing Center (BSC), Spain # +# Copyright 2015-2020, Johannes Gutenberg Universitaet Mainz, Germany # +# # +# This software was partially supported by the # +# EC H2020 funded project NEXTGenIO (Project ID: 671951, www.nextgenio.eu). # +# # +# This software was partially supported by the # +# ADA-FS project under the SPPEXA project funded by the DFG. # +# # +# SPDX-License-Identifier: MIT # +################################################################################ + +import ctypes +from marshmallow import Schema, fields, pre_load, post_load +from collections import namedtuple + +class DIR_p(fields.Field): + """Field that deserializes a ::DIR* return value""" + + def _deserialize(self, value, attr, data, **kwargs): + return ctypes.c_void_p(value) + +class Errno(fields.Field): + """Field that deserialies an errno return value""" + + def _deserialize(self, value, attr, data, **kwargs): + return int(value) + +class ByteList(fields.Field): + """Field that deserializes a list of bytes""" + def _deserialize(self, value, attr, data, **kwargs): + return bytes(value) + +class StructTimespecSchema(Schema): + """Schema that deserializes a struct timespec""" + tv_sec = fields.Integer(required=True) + tv_nsec = fields.Integer(required=True) + + @post_load + def make_object(self, data, **kwargs): + return namedtuple('StructTimespec', + ['tv_sec', 'tv_nsec'])(**data) + +class StructStatSchema(Schema): + """Schema that deserializes a struct stat""" + + st_dev = fields.Integer(required=True) + st_ino = fields.Integer(required=True) + st_mode = fields.Integer(required=True) + st_nlink = fields.Integer(required=True) + st_uid = fields.Integer(required=True) + st_gid = fields.Integer(required=True) + st_rdev = fields.Integer(required=True) + st_size = fields.Integer(required=True) + st_blksize = fields.Integer(required=True) + st_blocks = fields.Integer(required=True) + + st_atim = fields.Nested(StructTimespecSchema) + st_mtim = fields.Nested(StructTimespecSchema) + st_ctim = fields.Nested(StructTimespecSchema) + + @post_load + def make_object(self, data, **kwargs): + return namedtuple('StructStat', + ['st_dev', 'st_ino', 'st_mode', 'st_nlink', 'st_uid', + 'st_gid', 'st_rdev', 'st_size', 'st_blksize', 'st_blocks', + 'st_atim', 'st_mtim', 'st_ctim'])(**data) + +class DirentStruct(Schema): + """Schema that deserializes a struct dirent""" + + d_ino = fields.Integer(required=True) + d_off = fields.Integer(required=True) + d_reclen = fields.Integer(required=True) + d_type = fields.Integer(required=True) + d_name = fields.Str(required=True) + + @post_load + def make_object(self, data, **kwargs): + return namedtuple('DirentStruct', + ['d_ino', 'd_off', 'd_reclen', 'd_type', 'd_name'])(**data) + +class MkdirOutputSchema(Schema): + """Schema to deserialize the results of a mkdir() execution""" + + retval = fields.Integer(required=True) + errno = Errno(data_key='errnum', required=True) + + @post_load + def make_object(self, data, **kwargs): + return namedtuple('MkdirReturn', ['retval', 'errno'])(**data) + +class OpenOutputSchema(Schema): + """Schema to deserialize the results of an open() execution""" + retval = fields.Integer(required=True) + errno = Errno(data_key='errnum', required=True) + + @post_load + def make_object(self, data, **kwargs): + return namedtuple('OpenReturn', ['retval', 'errno'])(**data) + +class OpendirOutputSchema(Schema): + """Schema to deserialize the results of an opendir() execution""" + dirp = DIR_p(required=True, allow_none=True) + errno = Errno(data_key='errnum', required=True) + + @post_load + def make_object(self, data, **kwargs): + return namedtuple('OpendirReturn', ['dirp', 'errno'])(**data) + +class ReadOutputSchema(Schema): + """Schema to deserialize the results of a read() execution""" + + buf = ByteList(allow_none=True) + retval = fields.Integer(required=True) + errno = Errno(data_key='errnum', required=True) + + @post_load + def make_object(self, data, **kwargs): + return namedtuple('ReadReturn', ['buf', 'retval', 'errno'])(**data) + +class ReaddirOutputSchema(Schema): + """Schema to deserialize the results of a readdir() execution""" + + dirents = fields.List(fields.Nested(DirentStruct), allow_none=True) + errno = Errno(data_key='errnum', required=True) + + @post_load + def make_object(self, data, **kwargs): + return namedtuple('ReaddirReturn', ['dirents', 'errno'])(**data) + +class RmdirOutputSchema(Schema): + """Schema to deserialize the results of an opendir() execution""" + + retval = fields.Integer(required=True) + errno = Errno(data_key='errnum', required=True) + + @post_load + def make_object(self, data, **kwargs): + return namedtuple('RmdirReturn', ['retval', 'errno'])(**data) + +class WriteOutputSchema(Schema): + """Schema to deserialize the results of a write() execution""" + + retval = fields.Integer(required=True) + errno = Errno(data_key='errnum', required=True) + + @post_load + def make_object(self, data, **kwargs): + return namedtuple('WriteReturn', ['retval', 'errno'])(**data) + +class StatOutputSchema(Schema): + """Schema to deserialize the results of a stat() execution""" + + retval = fields.Integer(required=True) + statbuf = fields.Nested(StructStatSchema, required=True) + errno = Errno(data_key='errnum', required=True) + + @post_load + def make_object(self, data, **kwargs): + return namedtuple('StatReturn', ['retval', 'statbuf', 'errno'])(**data) + + +class IOParser: + + OutputSchemas = { + 'mkdir' : MkdirOutputSchema(), + 'open' : OpenOutputSchema(), + 'opendir' : OpendirOutputSchema(), + 'read' : ReadOutputSchema(), + 'readdir' : ReaddirOutputSchema(), + 'rmdir' : RmdirOutputSchema(), + 'write' : WriteOutputSchema(), + 'stat' : StatOutputSchema(), + } + + def parse(self, command, output): + if command in self.OutputSchemas: + return self.OutputSchemas[command].loads(output) + else: + raise ValueError(f"Unknown I/O command {cmd}") diff --git a/tests/integration/harness/logger.py b/tests/integration/harness/logger.py new file mode 100644 index 0000000000000000000000000000000000000000..be4623c694fbef90dc7ff82a5f35924a97165987 --- /dev/null +++ b/tests/integration/harness/logger.py @@ -0,0 +1,116 @@ +################################################################################ +# Copyright 2018-2020, Barcelona Supercomputing Center (BSC), Spain # +# Copyright 2015-2020, Johannes Gutenberg Universitaet Mainz, Germany # +# # +# This software was partially supported by the # +# EC H2020 funded project NEXTGenIO (Project ID: 671951, www.nextgenio.eu). # +# # +# This software was partially supported by the # +# ADA-FS project under the SPPEXA project funded by the DFG. # +# # +# SPDX-License-Identifier: MIT # +################################################################################ + +import re +import logging +from loguru import logger + +# remove loguru's default logger +logger.remove() + +class LogFormatter: + """ + Formatter class for our log messages + """ + + def __init__(self, colorize=True): + self._colorize = colorize + + # color + self._prefix_fmt = "{time:YYYY-MM-DD HH:mm:ss.SSS} | {level: <8} | {name}:{line} | " + self._suffix_fmt = "{message}\n" + + # raw + self._raw_prefix_fmt = "{time:YYYY-MM-DD HH:mm:ss.SSS} | {level: <8} | {name}:{line} | " + self._raw_suffix_fmt = "{extra[raw_message]}\n" + + if colorize: + self._short_fmt = self._suffix_fmt + self._fmt = self._prefix_fmt + self._suffix_fmt + else: + self._short_fmt = self._raw_suffix_fmt + self._fmt = self._raw_prefix_fmt + self._raw_suffix_fmt + + + def format(self, record): + + def _ansi_strip(msg): + # 7-bit C1 ANSI sequences + ansi_escape = re.compile(r''' + \x1B # ESC + (?: # 7-bit C1 Fe (except CSI) + [@-Z\\-_] + | # or [ for CSI, followed by a control sequence + \[ + [0-?]* # Parameter bytes + [ -/]* # Intermediate bytes + [@-~] # Final byte + ) + ''', re.VERBOSE) + return ansi_escape.sub('', msg) + + if not self._colorize: + record["extra"]["raw_message"] = _ansi_strip(record["message"]) + + patch_location = record["extra"].get("patch_location", None) + if patch_location: + record.update(file="foobar") + + if record["extra"].get("skip_prefix", False): + return self._short_fmt + + return self._fmt + +class PropagateHandler(logging.Handler): + """ + This class ensures that loguru messages are propagated to caplog + """ + def emit(self, record): + logging.getLogger(record.name).handle(record) + + +def initialize_logging(logger, test_log_path, propagate=False): + + handles = [] + + # remove loguru's default logger + logger.remove() + + # create loggers: + # 1. log to file with ansi color codes + h0 = logger.add( + test_log_path.with_suffix(".color.log"), + colorize=True, + format=LogFormatter().format) + handles.append(h0) + + # 2. log to file with plain text + h1 = logger.add(test_log_path, + colorize=False, + format=LogFormatter(False).format) + handles.append(h1) + + # 3. log propagator to pytest + if propagate: + h2 = logger.add( + PropagateHandler(), + colorize=True, + format=LogFormatter().format) + handles.append(h2) + + return handles + +def finalize_logging(logger, handles): + + for h in handles: + logger.remove(h) diff --git a/tests/integration/harness/reporter.py b/tests/integration/harness/reporter.py new file mode 100644 index 0000000000000000000000000000000000000000..0b4a8bdd97c48d446d0051fd66ecb995b4365218 --- /dev/null +++ b/tests/integration/harness/reporter.py @@ -0,0 +1,147 @@ +################################################################################ +# Copyright 2018-2020, Barcelona Supercomputing Center (BSC), Spain # +# Copyright 2015-2020, Johannes Gutenberg Universitaet Mainz, Germany # +# # +# This software was partially supported by the # +# EC H2020 funded project NEXTGenIO (Project ID: 671951, www.nextgenio.eu). # +# # +# This software was partially supported by the # +# ADA-FS project under the SPPEXA project funded by the DFG. # +# # +# SPDX-License-Identifier: MIT # +################################################################################ + +import sys, pytest, pluggy, py, platform +from collections import namedtuple +from pathlib import Path +from pprint import pformat + +report_width = 80 + +def _add_sep(sepchar, msg, sepcolor=None, msgcolor=None, fullwidth = report_width): + # we want 2 + 2*len(fill) + len(msg) <= fullwidth + # i.e. 2 + 2*len(sepchar)*N + len(msg) <= fullwidth + # 2*len(sepchar)*N <= fullwidth - len(msg) - 2 + # N <= (fullwidth - len(msg) - 2) // (2*len(sepchar)) + N = max((fullwidth - len(msg) - 2) // (2*len(sepchar)), 1) + fill = sepchar * N + + if sepcolor is not None: + fill = f"<{sepcolor}>{fill}" + + if msgcolor is not None: + msg = f"<{msgcolor}>{msg}" + line = "%s %s %s" % (fill, msg, fill) + + if len(line) + len(sepchar.rstrip()) <= fullwidth: + line += sepchar.rstrip() + + return line + +def _format_exception(report): + + if not report.failed: + return "" + + tw = py.io.TerminalWriter(file=None, stringio=True) + tw.hasmarkup = True + tw.fullwidth = report_width + report.toterminal(tw) + + return tw._file.getvalue() + +def report_test_headline(logger, testid, config, workspace): + """ + Emit a log message describing a test configuration + """ + + lg = logger.bind(skip_prefix=True).opt(depth=1, colors=True) + + lg.info(_add_sep("=", "Test session starts")) + + verinfo = platform.python_version() + msg = "platform {} -- Python {}".format(sys.platform, verinfo) + pypy_version_info = getattr(sys, "pypy_version_info", None) + if pypy_version_info: + verinfo = ".".join(map(str, pypy_version_info[:3])) + msg += "[pypy-{}-{}]".format(verinfo, pypy_version_info[3]) + msg += ", pytest-{}, py-{}, pluggy-{}".format( + pytest.__version__, py.__version__, pluggy.__version__ + ) + + lg.info(f"{msg}") + + msg = "rootdir: %s" % config.rootdir + + if config.inifile: + msg += ", inifile: " + config.rootdir.bestrelpath(config.inifile) + + testpaths = config.getini("testpaths") + if testpaths and config.args == testpaths: + rel_paths = [config.rootdir.bestrelpath(x) for x in testpaths] + msg += ", testpaths: {}".format(", ".join(rel_paths)) + + lg.info(f"{msg}") + lg.info(f"workspace: {workspace.twd}") + lg.info(f"\n{_add_sep('=', testid)}\n") + +def report_test_status(logger, report): + """ + Emit a log message describing a test report + """ + + lg_opts = logger.opt(colors=True).bind(skip_prefix=True) + + def get_phase(report): + if report.when == "setup": + return "SETUP" + elif report.when == "call": + return "TEST" + elif report.when == "teardown": + return "TEARDOWN" + else: + raise ValueError("Test report has unknown phase") + + def get_status(report): + TestReport = namedtuple( + 'TestReport', ['phase', 'color', 'status', 'logfun']) + + phase = get_phase(report) + was_xfail = hasattr(report, "wasxfail") + + if report.passed and not was_xfail: + return TestReport(phase, "green", "PASSED", lg_opts.info) + elif report.passed and was_xfail: + return TestReport(phase, "yellow", "PASSED", lg_opts.warning) + elif report.failed: + return TestReport(phase, "red", "FAILED", lg_opts.error) + elif report.skipped: + return TestReport(phase, "yellow", "SKIPPED", lg_opts.warning) + else: + raise ValueError("Test report has unknown state") + + + phase, color, status, log_fun = get_status(report) + msg = _add_sep("=", f"{phase} {status}") + log_fun(f"\n<{color}>{msg}\n") + + if report.failed: + location = f"{_add_sep('_', report.location[2])}" + exception = _format_exception(report) + lg_opts.info(f"{location}\n{{}}", exception) + log_fun(f"{'=' * report_width}") + +def report_assertion_pass(logger, location, orig, expl): + + def patch_record(r): + copy = r.copy() + copy["file"].name = Path(location.path).name + copy["file"].path = location.path + copy["function"] = location.function + copy["line"] = location.lineno + #copy["module"] = location.module + copy["name"] = Path(location.path).stem + r.update(copy) + + logger.patch(lambda r : patch_record(r)).info( + f"assertion \"{orig}\" passed") diff --git a/tests/integration/harness/workspace.py b/tests/integration/harness/workspace.py new file mode 100644 index 0000000000000000000000000000000000000000..319b55a906223302cb2e3b05cfe7b65cf6559515 --- /dev/null +++ b/tests/integration/harness/workspace.py @@ -0,0 +1,162 @@ +################################################################################ +# Copyright 2018-2020, Barcelona Supercomputing Center (BSC), Spain # +# Copyright 2015-2020, Johannes Gutenberg Universitaet Mainz, Germany # +# # +# This software was partially supported by the # +# EC H2020 funded project NEXTGenIO (Project ID: 671951, www.nextgenio.eu). # +# # +# This software was partially supported by the # +# ADA-FS project under the SPPEXA project funded by the DFG. # +# # +# SPDX-License-Identifier: MIT # +################################################################################ + +import os, re, hashlib +from harness.logger import logger + +class Workspace: + """ + Test workspace, implements a self-contained subdirectory where a test + can run and generate artifacts in a self-contained manner. + """ + + def __init__(self, twd, bindirs, libdirs): + """ + Initializes the test workspace by creating the following directories + under `twd`: + + ./twd/ + ├──logs/ + ├──mnt/ + ├──root/ + └──tmp/ + + Parameters + ---------- + twd: `pathlib.Path` + Path to the test working directory. Must exist. + bindirs: `list(pathlib.Path)` + List of paths where programs required for the test should + be searched for. + libdirs: `list(pathlib.Path)` + List of paths where libraries required for the test should be + searched for. + """ + + self._twd = twd + self._bindirs = bindirs + self._libdirs = libdirs + self._logdir = self._twd / 'logs' + self._rootdir = self._twd / 'root' + self._mountdir = self._twd / 'mnt' + self._tmpdir = self._twd / 'tmp' + + self._logdir.mkdir() + self._rootdir.mkdir() + self._mountdir.mkdir() + self._tmpdir.mkdir() + + @property + def twd(self): + return self._twd + + @property + def bindirs(self): + return self._bindirs + + @property + def libdirs(self): + return self._libdirs + + @property + def logdir(self): + return self._logdir + + @property + def rootdir(self): + return self._rootdir + + @property + def mountdir(self): + return self._mountdir + + @property + def tmpdir(self): + return self._tmpdir + +class File: + + def __init__(self, pathname, size): + self._pathname = pathname + self._size = size + + @property + def pathname(self): + return self._pathname + + @property + def size(self): + return self._size + + + def md5sum(self, blocksize=65536): + hash = hashlib.md5() + with open(self.pathname, "rb") as f: + for block in iter(lambda: f.read(blocksize), b""): + hash.update(block) + return hash.hexdigest() + +class FileCreator: + """ + Factory that allows tests to create files in a workspace. + """ + + def __init__(self, workspace): + self._workspace = workspace + + def create(self, pathname, size, unit='c'): + """ + Create a random binary file in the tests workspace's temporary + directory. + + + Parameters + ---------- + pathname: `str` + The file pathname under the workspace's tmpdir. + size: `int` or `float` + The desired size for the created file, expressed as a number of + size units. + unit: `str` + The prefix of size unit to be used to compute the resulting + file size. May be one of the following multiplicative units: + c=1, w=2, b=512, kB=1000, K=1024, MB=1000*1000, M=1024*1024, + GB=1000*1000*1000, G=1024*1024*1024. + + If left unspecified, it defaults to 'c'=1. + + Returns + ------- + The full `pathlib.Path` pathname of the created file. + """ + + # allow some aliases for convenience + suffixes = { + 'c' : 1.0, + 'w' : 2.0, + 'b' : 512.0, + 'kB' : 1000.0, + 'K' : 1024.0, + 'MB' : 1000*1000.0, + 'M' : 1024*1024.0, + 'GB' : 1000*1000*1000.0, + 'G' : 1024*1024*1024.0 + } + + full_pathname = self._workspace.tmpdir / pathname + total_size = int(float(size)*suffixes.get(unit, 1.0)) + + with open(full_pathname, "wb") as out: + out.write(os.urandom(total_size)) + + return File(full_pathname, total_size) diff --git a/tests/integration/pytest.ini.in b/tests/integration/pytest.ini.in new file mode 100644 index 0000000000000000000000000000000000000000..d7901fa8f089a69b1604172972092fe3440165dd --- /dev/null +++ b/tests/integration/pytest.ini.in @@ -0,0 +1,17 @@ +################################################################################ +# Copyright 2018-2020, Barcelona Supercomputing Center (BSC), Spain # +# Copyright 2015-2020, Johannes Gutenberg Universitaet Mainz, Germany # +# # +# This software was partially supported by the # +# EC H2020 funded project NEXTGenIO (Project ID: 671951, www.nextgenio.eu). # +# # +# This software was partially supported by the # +# ADA-FS project under the SPPEXA project funded by the DFG. # +# # +# SPDX-License-Identifier: MIT # +################################################################################ + +[pytest] +testpaths = @CMAKE_CURRENT_SOURCE_DIR@ +addopts = @PYTEST_BINDIR_ARGS@ @PYTEST_LIBDIR_ARGS@ --interface=@GKFS_TESTS_INTERFACE@ +enable_assertion_pass_hook = true diff --git a/tests/integration/pytest.install.ini.in b/tests/integration/pytest.install.ini.in new file mode 100644 index 0000000000000000000000000000000000000000..23e87ec25a245c66b43c20037c70ee685e9b4214 --- /dev/null +++ b/tests/integration/pytest.install.ini.in @@ -0,0 +1,17 @@ +################################################################################ +# Copyright 2018-2020, Barcelona Supercomputing Center (BSC), Spain # +# Copyright 2015-2020, Johannes Gutenberg Universitaet Mainz, Germany # +# # +# This software was partially supported by the # +# EC H2020 funded project NEXTGenIO (Project ID: 671951, www.nextgenio.eu). # +# # +# This software was partially supported by the # +# ADA-FS project under the SPPEXA project funded by the DFG. # +# # +# SPDX-License-Identifier: MIT # +################################################################################ + +[pytest] +testpaths = @CMAKE_INSTALL_FULL_DATAROOTDIR@/gkfs/tests +addopts = --bin-dir=@CMAKE_INSTALL_FULL_BINDIR@ --bin-dir=@CMAKE_INSTALL_FULL_LIBDIR@ --lib-dir=@CMAKE_INSTALL_FULL_LIBDIR@ --interface=@GKFS_TESTS_INTERFACE@ +enable_assertion_pass_hook = true diff --git a/tests/integration/requirements.txt.in b/tests/integration/requirements.txt.in new file mode 100644 index 0000000000000000000000000000000000000000..d784b553e80b62af0d66fef1223b42e2a5d32c3a --- /dev/null +++ b/tests/integration/requirements.txt.in @@ -0,0 +1,36 @@ +apipkg==1.5 +attrs==19.3.0 +backcall==0.1.0 +decorator==4.4.1 +execnet==1.7.1 +importlib-metadata==1.5.0 +ipython==7.12.0 +ipython-genutils==0.2.0 +jedi==0.16.0 +loguru==0.4.1 +marshmallow==3.4.0 +more-itertools==8.2.0 +mypy-extensions==0.4.3 +netifaces==0.10.9 +packaging==20.1 +parso==0.6.1 +pexpect==4.8.0 +pickleshare==0.7.5 +pluggy==0.13.1 +prompt-toolkit==3.0.3 +ptyprocess==0.6.0 +py==1.8.1 +Pygments==2.5.2 +pyparsing==2.4.6 +pytest==5.3.5 +pytest-dependency==0.5.1 +pytest-forked==1.1.3 +pytest-xdist==1.31.0 +sh==1.12.14 +six==1.14.0 +traitlets==4.3.3 +typing-extensions==3.7.4.1 +typing-inspect==0.5.0 +typish==1.3.1 +wcwidth==0.1.8 +zipp==2.1.0 diff --git a/tests/integration/shell/README.md b/tests/integration/shell/README.md new file mode 100644 index 0000000000000000000000000000000000000000..84f91a1722ef85ecf9973904cf1ef58fdb8f28e2 --- /dev/null +++ b/tests/integration/shell/README.md @@ -0,0 +1,5 @@ +# README + +This directory contains all GekkoFS tests that require a shell interpreter in +order to run. The tests should exercise shell commands such as `cp` or `mv`, as +well as shell constructs such as *piping* and *redirection*. diff --git a/tests/integration/shell/test_concat.py b/tests/integration/shell/test_concat.py new file mode 100644 index 0000000000000000000000000000000000000000..3f86ea708d58a2fd02c16db7d7b59ec7b675361d --- /dev/null +++ b/tests/integration/shell/test_concat.py @@ -0,0 +1,53 @@ +################################################################################ +# Copyright 2018-2020, Barcelona Supercomputing Center (BSC), Spain # +# Copyright 2015-2020, Johannes Gutenberg Universitaet Mainz, Germany # +# # +# This software was partially supported by the # +# EC H2020 funded project NEXTGenIO (Project ID: 671951, www.nextgenio.eu). # +# # +# This software was partially supported by the # +# ADA-FS project under the SPPEXA project funded by the DFG. # +# # +# SPDX-License-Identifier: MIT # +################################################################################ + +import pytest +from harness.logger import logger + +large_file_01 = 'large_file_01' +large_file_02 = 'large_file_02' + +@pytest.mark.skip(reason="using >> to concatenate files seems to hang clients") +def test_concat(gkfs_daemon, gkfs_shell, file_factory): + """Concatenate two large files""" + + lf01 = file_factory.create(large_file_01, size=4.0, unit='MB') + lf02 = file_factory.create(large_file_02, size=4.0, unit='MB') + + cmd = gkfs_shell.cp(lf01.pathname, gkfs_daemon.mountdir) + assert cmd.exit_code == 0 + + cmd = gkfs_shell.cp(lf02.pathname, gkfs_daemon.mountdir) + assert cmd.exit_code == 0 + + cmd = gkfs_shell.stat('--terse', gkfs_daemon.mountdir / large_file_01) + assert cmd.exit_code == 0 + out = cmd.parsed_stdout + assert out.size == lf01.size + + cmd = gkfs_shell.stat('--terse', gkfs_daemon.mountdir / large_file_02) + assert cmd.exit_code == 0 + out = cmd.parsed_stdout + assert out.size == lf02.size + + cmd = gkfs_shell.md5sum(gkfs_daemon.mountdir / large_file_01) + assert cmd.exit_code == 0 + assert cmd.parsed_stdout.digest == lf01.md5sum() + + cmd = gkfs_shell.md5sum(gkfs_daemon.mountdir / large_file_02) + assert cmd.exit_code == 0 + assert cmd.parsed_stdout.digest == lf02.md5sum() + + ##XXX hangs! + cmd = gkfs_client.bash('cat', gkfs_daemon.mountdir / large_file_01, '>>', gkfs_daemon.mountdir / large_file_02) + assert cmd.exit_code == 0 diff --git a/tests/integration/shell/test_cp.py b/tests/integration/shell/test_cp.py new file mode 100644 index 0000000000000000000000000000000000000000..e44fbbfeb63ad9e1b042983e4ea62467720f0e11 --- /dev/null +++ b/tests/integration/shell/test_cp.py @@ -0,0 +1,30 @@ +################################################################################ +# Copyright 2018-2020, Barcelona Supercomputing Center (BSC), Spain # +# Copyright 2015-2020, Johannes Gutenberg Universitaet Mainz, Germany # +# # +# This software was partially supported by the # +# EC H2020 funded project NEXTGenIO (Project ID: 671951, www.nextgenio.eu). # +# # +# This software was partially supported by the # +# ADA-FS project under the SPPEXA project funded by the DFG. # +# # +# SPDX-License-Identifier: MIT # +################################################################################ + +import pytest +from harness.logger import logger + +file01 = 'file01' + +@pytest.mark.skip(reason="shell tests seem to hang clients at times") +def test_cp(gkfs_daemon, gkfs_shell, file_factory): + """Copy a file into gkfs using the shell""" + + logger.info("creating input file") + lf01 = file_factory.create(file01, size=4.0, unit='MB') + + logger.info("copying into gkfs") + cmd = gkfs_shell.cp(lf01.pathname, gkfs_daemon.mountdir) + assert cmd.exit_code == 0 + assert cmd.stdout.decode() == '' + assert cmd.stderr.decode() == '' diff --git a/tests/integration/shell/test_stat.py b/tests/integration/shell/test_stat.py new file mode 100644 index 0000000000000000000000000000000000000000..feb19845a33500b7880884df6372b27b620010e6 --- /dev/null +++ b/tests/integration/shell/test_stat.py @@ -0,0 +1,88 @@ +################################################################################ +# Copyright 2018-2020, Barcelona Supercomputing Center (BSC), Spain # +# Copyright 2015-2020, Johannes Gutenberg Universitaet Mainz, Germany # +# # +# This software was partially supported by the # +# EC H2020 funded project NEXTGenIO (Project ID: 671951, www.nextgenio.eu). # +# # +# This software was partially supported by the # +# ADA-FS project under the SPPEXA project funded by the DFG. # +# # +# SPDX-License-Identifier: MIT # +################################################################################ + +import pytest +from harness.logger import logger + +file01 = 'file01' + +@pytest.mark.skip(reason="shell tests seem to hang clients at times") +def test_shell_if_e(gkfs_daemon, gkfs_shell, file_factory): + """ + Copy a file into gkfs using the shell and check that it + exists using `if [[ -e ]]`. + """ + + logger.info("creating input file") + lf01 = file_factory.create(file01, size=4.0, unit='MB') + + logger.info("copying into gkfs") + cmd = gkfs_shell.cp(lf01.pathname, gkfs_daemon.mountdir) + assert cmd.exit_code == 0 + + logger.info("checking if file exists") + cmd = gkfs_shell.script( + f""" + expected_pathname={gkfs_daemon.mountdir / file01} + if [[ -e ${{expected_pathname}} ]]; + then + exit 0 + fi + exit 1 + """) + + assert cmd.exit_code == 0 + +@pytest.mark.skip(reason="shell tests seem to hang clients at times") +def test_stat_script(gkfs_daemon, gkfs_shell, file_factory): + """ + Copy a file into gkfs using the shell and check that it + exists using stat in a script. + """ + + logger.info("creating input file") + lf01 = file_factory.create(file01, size=4.0, unit='MB') + + logger.info("copying into gkfs") + cmd = gkfs_shell.cp(lf01.pathname, gkfs_daemon.mountdir) + assert cmd.exit_code == 0 + + logger.info("checking metadata") + cmd = gkfs_shell.script( + f""" + expected_pathname={gkfs_daemon.mountdir / file01} + {gkfs_shell.patched_environ} stat ${{expected_pathname}} + exit $? + """, + intercept_shell=False) + + assert cmd.exit_code == 0 + +@pytest.mark.skip(reason="shell tests seem to hang clients at times") +def test_stat_command(gkfs_daemon, gkfs_shell, file_factory): + """ + Copy a file into gkfs using the shell and check that it + exists using stat as a command. + """ + + logger.info("creating input file") + lf01 = file_factory.create(file01, size=4.0, unit='MB') + + logger.info("copying into gkfs") + cmd = gkfs_shell.cp(lf01.pathname, gkfs_daemon.mountdir) + assert cmd.exit_code == 0 + + logger.info("checking metadata") + cmd = gkfs_shell.stat('--terse', gkfs_daemon.mountdir / file01) + assert cmd.exit_code == 0 + assert cmd.parsed_stdout.size == lf01.size diff --git a/tests/unit/CMakeLists.txt b/tests/unit/CMakeLists.txt new file mode 100644 index 0000000000000000000000000000000000000000..08a6f139e615e4d63da65b4f6cc21ceb82aeba13 --- /dev/null +++ b/tests/unit/CMakeLists.txt @@ -0,0 +1,54 @@ +include(FetchContent) + +# get Catch2 +set(FETCHCONTENT_QUIET OFF) +FetchContent_Declare(catch2 + GIT_REPOSITORY https://github.com/catchorg/Catch2.git + GIT_TAG 255aa5f2afe1a622c97422f65ace6ca915be0d8d # v2.11.3 + GIT_SHALLOW ON + GIT_PROGRESS ON +) + +FetchContent_GetProperties(catch2) + +if(NOT catch2_POPULATED) + FetchContent_Populate(catch2) + message(STATUS "[gkfs] Catch2 source dir: ${catch2_SOURCE_DIR}") + message(STATUS "[gkfs] Catch2 binary dir: ${catch2_BINARY_DIR}") + set(CATCH_BUILD_TESTING OFF CACHE INTERNAL "") + add_subdirectory(${catch2_SOURCE_DIR} ${catch2_BINARY_DIR}) +endif() + +# create a convenience library with Catch2's main +# to speed up test compilation +add_library(catch2_main + STATIC catch_main.cpp +) + +target_link_libraries(catch2_main + Catch2::Catch2 +) + +# define executables for tests and make them depend on the convenience +# library (and Catch2 transitively) and fmt +add_executable(tests + test_example_00.cpp + test_example_01.cpp +) + +target_link_libraries(tests + catch2_main + fmt::fmt +) + +# Catch2's contrib folder includes some helper functions +# to auto-discover Catch tests and register them in CTest +set(CMAKE_MODULE_PATH "${catch2_SOURCE_DIR}/contrib" ${CMAKE_MODULE_PATH}) +include(Catch) +catch_discover_tests(tests) + +if(GKFS_INSTALL_TESTS) + install(TARGETS tests + DESTINATION ${CMAKE_INSTALL_BINDIR} + ) +endif() diff --git a/tests/unit/catch_main.cpp b/tests/unit/catch_main.cpp new file mode 100644 index 0000000000000000000000000000000000000000..4ed06df1f7bea8cc18ee161389b9c3e2741b08a0 --- /dev/null +++ b/tests/unit/catch_main.cpp @@ -0,0 +1,2 @@ +#define CATCH_CONFIG_MAIN +#include diff --git a/tests/unit/test_example_00.cpp b/tests/unit/test_example_00.cpp new file mode 100644 index 0000000000000000000000000000000000000000..63d01fd08e38a2a388f833a02edf019493cc3ba0 --- /dev/null +++ b/tests/unit/test_example_00.cpp @@ -0,0 +1,30 @@ +/* + Copyright 2018-2020, Barcelona Supercomputing Center (BSC), Spain + Copyright 2015-2020, Johannes Gutenberg Universitaet Mainz, Germany + + This software was partially supported by the + EC H2020 funded project NEXTGenIO (Project ID: 671951, www.nextgenio.eu). + + This software was partially supported by the + ADA-FS project under the SPPEXA project funded by the DFG. + + SPDX-License-Identifier: MIT +*/ + +#include +#include + +unsigned int Factorial( unsigned int number ) { + return number <= 1 ? number : Factorial(number-1)*number; +} + +TEST_CASE( "Factorials are computed", "[factorial]" ) { + REQUIRE( Factorial(1) == 1 ); + REQUIRE( Factorial(2) == 2 ); + REQUIRE( Factorial(3) == 6 ); + REQUIRE( Factorial(10) == 3628800 ); +} + +TEST_CASE( "Two and Two is Four", "[2+2=4]" ) { + REQUIRE( 2+2 == 4 ); +} diff --git a/tests/unit/test_example_01.cpp b/tests/unit/test_example_01.cpp new file mode 100644 index 0000000000000000000000000000000000000000..f061d6186ea40c83e7df952779a039d837737fb8 --- /dev/null +++ b/tests/unit/test_example_01.cpp @@ -0,0 +1,58 @@ +/* + Copyright 2018-2020, Barcelona Supercomputing Center (BSC), Spain + Copyright 2015-2020, Johannes Gutenberg Universitaet Mainz, Germany + + This software was partially supported by the + EC H2020 funded project NEXTGenIO (Project ID: 671951, www.nextgenio.eu). + + This software was partially supported by the + ADA-FS project under the SPPEXA project funded by the DFG. + + SPDX-License-Identifier: MIT +*/ + +#include +#include + +SCENARIO( "vectors can be sized and resized", "[vector]" ) { + + GIVEN( "A vector with some items" ) { + std::vector v( 5 ); + + REQUIRE( v.size() == 5 ); + REQUIRE( v.capacity() >= 5 ); + + WHEN( "the size is increased" ) { + v.resize( 10 ); + + THEN( "the size and capacity change" ) { + REQUIRE( v.size() == 10 ); + REQUIRE( v.capacity() >= 10 ); + } + } + WHEN( "the size is reduced" ) { + v.resize( 0 ); + + THEN( "the size changes but not capacity" ) { + REQUIRE( v.size() == 0 ); + REQUIRE( v.capacity() >= 5 ); + } + } + WHEN( "more capacity is reserved" ) { + v.reserve( 10 ); + + THEN( "the capacity changes but not the size" ) { + REQUIRE( v.size() == 5 ); + REQUIRE( v.capacity() >= 10 ); + } + } + WHEN( "less capacity is reserved" ) { + v.reserve( 0 ); + + THEN( "neither size nor capacity are changed" ) { + REQUIRE( v.size() == 5 ); + REQUIRE( v.capacity() >= 5 ); + } + } + } +}