Merge branch 'develop' of github.com:smessmer/blockstore into develop

This commit is contained in:
Sebastian Messmer 2015-06-16 16:52:31 +02:00
commit 123ac44f24
3 changed files with 26 additions and 2 deletions

View File

@ -5,7 +5,9 @@ ADD_BII_TARGETS()
ADD_BOOST(filesystem system thread) ADD_BOOST(filesystem system thread)
# This is needed by boost thread # This is needed by boost thread
TARGET_LINK_LIBRARIES(${BII_BLOCK_TARGET} INTERFACE rt) IF(${CMAKE_SYSTEM_NAME} MATCHES "Linux")
TARGET_LINK_LIBRARIES(${BII_BLOCK_TARGET} INTERFACE rt)
ENDIF(CMAKE_SYSTEM_NAME)
ACTIVATE_CPP14() ACTIVATE_CPP14()
ENABLE_STYLE_WARNINGS() ENABLE_STYLE_WARNINGS()

View File

@ -8,6 +8,7 @@
#include <memory> #include <memory>
#include <mutex> #include <mutex>
#include <boost/optional.hpp> #include <boost/optional.hpp>
#include <future>
namespace blockstore { namespace blockstore {
namespace caching { namespace caching {
@ -29,6 +30,7 @@ public:
private: private:
void _popOldEntries(); void _popOldEntries();
static void _destructElementsInParallel(std::vector<CacheEntry<Key, Value>> *list);
mutable std::mutex _mutex; mutable std::mutex _mutex;
QueueMap<Key, CacheEntry<Key, Value>> _cachedBlocks; QueueMap<Key, CacheEntry<Key, Value>> _cachedBlocks;
@ -75,9 +77,28 @@ void Cache<Key, Value>::push(const Key &key, Value value) {
template<class Key, class Value> template<class Key, class Value>
void Cache<Key, Value>::_popOldEntries() { void Cache<Key, Value>::_popOldEntries() {
std::lock_guard<std::mutex> lock(_mutex); std::lock_guard<std::mutex> lock(_mutex);
std::vector<CacheEntry<Key, Value>> entriesToDelete;
while(_cachedBlocks.size() > 0 && _cachedBlocks.peek()->ageSeconds() > PURGE_LIFETIME_SEC) { while(_cachedBlocks.size() > 0 && _cachedBlocks.peek()->ageSeconds() > PURGE_LIFETIME_SEC) {
_cachedBlocks.pop(); entriesToDelete.push_back(*_cachedBlocks.pop());
} }
_destructElementsInParallel(&entriesToDelete);
}
template<class Key, class Value>
void Cache<Key, Value>::_destructElementsInParallel(std::vector<CacheEntry<Key, Value>> *list) {
//TODO Check whether this parallel destruction below works (just comment it in but keep the list->clear()) and check performance impacts. Is it better to have a lower parallelity level, i.e. #core threads?
/*
std::vector<std::future<void>> waitHandles;
for (auto & entry : *list) {
waitHandles.push_back(std::async(std::launch::async, [&entry] {
entry.releaseValue();
}));
}
for (auto & waitHandle : waitHandles) {
waitHandle.wait();
}
*/
list->clear();
} }
} }

View File

@ -14,6 +14,7 @@ class AES256_GCM {
public: public:
BOOST_CONCEPT_ASSERT((CipherConcept<AES256_GCM>)); BOOST_CONCEPT_ASSERT((CipherConcept<AES256_GCM>));
//TODO Does EncryptionKey::GenerateRandom() use a PseudoRandomGenerator? Would be better to use real randomness. This is true for all ciphers - we should offer a CreateKey() method in Ciphers.
using EncryptionKey = cpputils::FixedSizeData<32>; using EncryptionKey = cpputils::FixedSizeData<32>;
static_assert(32 == CryptoPP::AES::MAX_KEYLENGTH, "If AES offered larger keys, we should offer a variant with it"); static_assert(32 == CryptoPP::AES::MAX_KEYLENGTH, "If AES offered larger keys, we should offer a variant with it");