Commit fb57de99 authored by Marc Vef's avatar Marc Vef
Browse files

Merge branch '32-io-exceeding-segment-count' into 'master'

Resolve "IO exceeding segment count"

Closes #32

See merge request zdvresearch_bsc/adafs!13
parents 7f74be02 29cc8203
Loading
Loading
Loading
Loading
+0 −6
Original line number Diff line number Diff line
@@ -31,10 +31,4 @@ void read_file_abt(void* _arg);

void write_file_abt(void* _arg);

int write_chunks(const std::string& path, const std::vector<void*>& buf_ptrs, const std::vector<hg_size_t>& buf_sizes,
                 off_t offset, size_t& write_size);

int read_chunks(const std::string& path, off_t offset, const std::vector<void*>& buf_ptrs,
                const std::vector<hg_size_t>& buf_sizes, size_t& read_size);

#endif //IFS_DATA_HPP
+0 −1
Original line number Diff line number Diff line
@@ -3,7 +3,6 @@
#define LFS_RPC_DATA_HPP

#include <daemon/adafs_daemon.hpp>
#include <extern/lrucache/LRUCache11.hpp>

class RPCData {

+0 −228
Original line number Diff line number Diff line
/*
 * LRUCache11 - a templated C++11 based LRU cache class that allows
 * specification of
 * key, value and optionally the map container type (defaults to
 * std::unordered_map)
 * By using the std::map and a linked list of keys it allows O(1) insert, delete
 * and
 * refresh operations.
 *
 * This is a header-only library and all you need is the LRUCache11.hpp file
 *
 * Github: https://github.com/mohaps/lrucache11
 *
 * This is a follow-up to the LRUCache project -
 * https://github.com/mohaps/lrucache
 *
 * Copyright (c) 2012-22 SAURAV MOHAPATRA <mohaps@gmail.com>
 *
 * Permission to use, copy, modify, and distribute this software for any
 * purpose with or without fee is hereby granted, provided that the above
 * copyright notice and this permission notice appear in all copies.
 *
 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
 */
#pragma once

#include <algorithm>
#include <cstdint>
#include <list>
#include <mutex>
#include <stdexcept>
#include <thread>
#include <unordered_map>

namespace lru11 {
/**
 * base class to prevent copy
 * use as ClassName : private NoCopy {}
 * to prevent copy constructor of ClassName and assignment by copy
 */
    class NoCopy {
    public:
        virtual ~NoCopy() = default;

    protected:
        NoCopy() = default;

    private:
        NoCopy(const NoCopy&) = delete;

        const NoCopy& operator=(const NoCopy&) = delete;
    };

/*
 * a noop lockable concept that can be used in place of std::mutex
 */
    class NullLock {
    public:
        void lock() {}

        void unlock() {}

        bool try_lock() { return true; }
    };

/**
 * error raised when a key not in cache is passed to get()
 */
    class KeyNotFound : public std::invalid_argument {
    public:
        KeyNotFound() : std::invalid_argument("key_not_found") {}
    };

    template<typename K, typename V>
    struct KeyValuePair {
    public:
        K key;
        V value;

        KeyValuePair(const K& k, const V& v) : key(k), value(v) {}
    };

/**
 *	The LRU Cache class templated by
 *		Key - key type
 *		Value - value type
 *		MapType - an associative container like std::unordered_map
 *		LockType - a lock type derived from the Lock class (default:
 *NullLock = no synchronization)
 *
 *	The default NullLock based template is not thread-safe, however passing
 *Lock=std::mutex will make it
 *	thread-safe
 */
    template<class Key, class Value, class Lock = NullLock,
            class Map = std::unordered_map<
                    Key, typename std::list<KeyValuePair<Key, Value>>::iterator>>
    class Cache : private NoCopy {
    public:
        typedef KeyValuePair<Key, Value> node_type;
        typedef std::list<KeyValuePair<Key, Value>> list_type;
        typedef Map map_type;
        typedef Lock lock_type;
        using Guard = std::lock_guard<lock_type>;

        /**
         * the max size is the hard limit of keys and (maxSize + elasticity) is the
         * soft limit
         * the cache is allowed to grow till maxSize + elasticity and is pruned back
         * to maxSize keys
         * set maxSize = 0 for an unbounded cache (but in that case, you're better off
         * using a std::unordered_map
         * directly anyway! :)
         */
        explicit Cache(size_t maxSize = 64, size_t elasticity = 10)
                : maxSize_(maxSize), elasticity_(elasticity) {}

        virtual ~Cache() = default;

        size_t size() const {
            Guard g(lock_);
            return cache_.size();
        }

        bool empty() const {
            Guard g(lock_);
            return cache_.empty();
        }

        void clear() {
            Guard g(lock_);
            cache_.clear();
            keys_.clear();
        }

        void insert(const Key& k, const Value& v) {
            Guard g(lock_);
            const auto iter = cache_.find(k);
            if (iter != cache_.end()) {
                iter->second->value = v;
                keys_.splice(keys_.begin(), keys_, iter->second);
                return;
            }

            keys_.emplace_front(k, v);
            cache_[k] = keys_.begin();
            prune();
        }

        bool tryGet(const Key& kIn, Value& vOut) {
            Guard g(lock_);
            const auto iter = cache_.find(kIn);
            if (iter == cache_.end()) {
                return false;
            }
            keys_.splice(keys_.begin(), keys_, iter->second);
            vOut = iter->second->value;
            return true;
        }

        const Value& get(const Key& k) {
            Guard g(lock_);
            const auto iter = cache_.find(k);
            if (iter == cache_.end()) {
                throw KeyNotFound();
            }
            keys_.splice(keys_.begin(), keys_, iter->second);
            return iter->second->value;
        }

        bool remove(const Key& k) {
            Guard g(lock_);
            auto iter = cache_.find(k);
            if (iter == cache_.end()) {
                return false;
            }
            keys_.erase(iter->second);
            cache_.erase(iter);
            return true;
        }

        bool contains(const Key& k) {
            Guard g(lock_);
            return cache_.find(k) != cache_.end();
        }

        size_t getMaxSize() const { return maxSize_; }

        size_t getElasticity() const { return elasticity_; }

        size_t getMaxAllowedSize() const { return maxSize_ + elasticity_; }

        template<typename F>
        void cwalk(F& f) const {
            Guard g(lock_);
            std::for_each(keys_.begin(), keys_.end(), f);
        }

    protected:
        size_t prune() {
            size_t maxAllowed = maxSize_ + elasticity_;
            if (maxSize_ == 0 || cache_.size() < maxAllowed) {
                return 0;
            }
            size_t count = 0;
            while (cache_.size() > maxSize_) {
                cache_.erase(keys_.back().key);
                keys_.pop_back();
                ++count;
            }
            return count;
        }

    private:
        mutable Lock lock_;
        Map cache_;
        list_type keys_;
        size_t maxSize_;
        size_t elasticity_;
    };

}  // namespace LRUCache11
+8 −1
Original line number Diff line number Diff line
@@ -68,8 +68,11 @@ MERCURY_GEN_PROC(rpc_get_metadentry_size_out_t, ((hg_int32_t) (err))
// data
MERCURY_GEN_PROC(rpc_read_data_in_t,
                 ((hg_const_string_t) (path))\
((hg_size_t) (size))\
((int64_t) (offset))\
((hg_uint64_t) (chunk_n))\
((hg_uint64_t) (chunk_start))\
((hg_uint64_t) (chunk_end))\
((hg_uint64_t) (total_chunk_size))\
((hg_bulk_t) (bulk_handle)))

MERCURY_GEN_PROC(rpc_data_out_t,
@@ -79,6 +82,10 @@ MERCURY_GEN_PROC(rpc_data_out_t,
MERCURY_GEN_PROC(rpc_write_data_in_t,
                 ((hg_const_string_t) (path))\
((int64_t) (offset))\
((hg_uint64_t) (chunk_n))\
((hg_uint64_t) (chunk_start))\
((hg_uint64_t) (chunk_end))\
((hg_uint64_t) (total_chunk_size))\
((hg_bulk_t) (bulk_handle)))

#endif //LFS_RPC_TYPES_HPP
+4 −0
Original line number Diff line number Diff line
@@ -51,4 +51,8 @@ hg_bool_t bool_to_merc_bool(bool state);

bool is_handle_sm(margo_instance_id mid, const hg_addr_t& addr);

size_t adafs_hash_path(const std::string& to_hash, const size_t host_size);

size_t adafs_hash_path_chunk(const std::string& path, const size_t chunk_id, const size_t host_size);

#endif //IFS_RPC_UTILS_HPP
Loading