Make cache MAX_SIZE configurable
This commit is contained in:
parent
810c2c5b48
commit
260bc1056a
@ -26,7 +26,7 @@ public:
|
|||||||
|
|
||||||
private:
|
private:
|
||||||
cpputils::unique_ref<BlockStore> _baseBlockStore;
|
cpputils::unique_ref<BlockStore> _baseBlockStore;
|
||||||
Cache<Key, cpputils::unique_ref<Block>> _cache;
|
Cache<Key, cpputils::unique_ref<Block>, 1000> _cache;
|
||||||
uint32_t _numNewBlocks;
|
uint32_t _numNewBlocks;
|
||||||
|
|
||||||
DISALLOW_COPY_AND_ASSIGN(CachingBlockStore);
|
DISALLOW_COPY_AND_ASSIGN(CachingBlockStore);
|
||||||
|
51
implementations/caching/cache/Cache.h
vendored
51
implementations/caching/cache/Cache.h
vendored
@ -14,10 +14,9 @@
|
|||||||
namespace blockstore {
|
namespace blockstore {
|
||||||
namespace caching {
|
namespace caching {
|
||||||
|
|
||||||
template<class Key, class Value>
|
template<class Key, class Value, uint32_t MAX_ENTRIES>
|
||||||
class Cache {
|
class Cache {
|
||||||
public:
|
public:
|
||||||
static constexpr uint32_t MAX_ENTRIES = 1000;
|
|
||||||
//TODO Experiment with good values
|
//TODO Experiment with good values
|
||||||
static constexpr double PURGE_LIFETIME_SEC = 0.5; //When an entry has this age, it will be purged from the cache
|
static constexpr double PURGE_LIFETIME_SEC = 0.5; //When an entry has this age, it will be purged from the cache
|
||||||
static constexpr double PURGE_INTERVAL = 0.5; // With this interval, we check for entries to purge
|
static constexpr double PURGE_INTERVAL = 0.5; // With this interval, we check for entries to purge
|
||||||
@ -44,24 +43,23 @@ private:
|
|||||||
std::unique_ptr<PeriodicTask> _timeoutFlusher;
|
std::unique_ptr<PeriodicTask> _timeoutFlusher;
|
||||||
};
|
};
|
||||||
|
|
||||||
template<class Key, class Value> constexpr uint32_t Cache<Key, Value>::MAX_ENTRIES;
|
template<class Key, class Value, uint32_t MAX_ENTRIES> constexpr double Cache<Key, Value, MAX_ENTRIES>::PURGE_LIFETIME_SEC;
|
||||||
template<class Key, class Value> constexpr double Cache<Key, Value>::PURGE_LIFETIME_SEC;
|
template<class Key, class Value, uint32_t MAX_ENTRIES> constexpr double Cache<Key, Value, MAX_ENTRIES>::PURGE_INTERVAL;
|
||||||
template<class Key, class Value> constexpr double Cache<Key, Value>::PURGE_INTERVAL;
|
template<class Key, class Value, uint32_t MAX_ENTRIES> constexpr double Cache<Key, Value, MAX_ENTRIES>::MAX_LIFETIME_SEC;
|
||||||
template<class Key, class Value> constexpr double Cache<Key, Value>::MAX_LIFETIME_SEC;
|
|
||||||
|
|
||||||
template<class Key, class Value>
|
template<class Key, class Value, uint32_t MAX_ENTRIES>
|
||||||
Cache<Key, Value>::Cache(): _cachedBlocks(), _timeoutFlusher(nullptr) {
|
Cache<Key, Value, MAX_ENTRIES>::Cache(): _cachedBlocks(), _timeoutFlusher(nullptr) {
|
||||||
//Don't initialize timeoutFlusher in the initializer list,
|
//Don't initialize timeoutFlusher in the initializer list,
|
||||||
//because it then might already call Cache::popOldEntries() before Cache is done constructing.
|
//because it then might already call Cache::popOldEntries() before Cache is done constructing.
|
||||||
_timeoutFlusher = std::make_unique<PeriodicTask>(std::bind(&Cache::_deleteOldEntriesParallel, this), PURGE_INTERVAL);
|
_timeoutFlusher = std::make_unique<PeriodicTask>(std::bind(&Cache::_deleteOldEntriesParallel, this), PURGE_INTERVAL);
|
||||||
}
|
}
|
||||||
|
|
||||||
template<class Key, class Value>
|
template<class Key, class Value, uint32_t MAX_ENTRIES>
|
||||||
Cache<Key, Value>::~Cache() {
|
Cache<Key, Value, MAX_ENTRIES>::~Cache() {
|
||||||
}
|
}
|
||||||
|
|
||||||
template<class Key, class Value>
|
template<class Key, class Value, uint32_t MAX_ENTRIES>
|
||||||
boost::optional<Value> Cache<Key, Value>::pop(const Key &key) {
|
boost::optional<Value> Cache<Key, Value, MAX_ENTRIES>::pop(const Key &key) {
|
||||||
std::unique_lock<std::mutex> lock(_mutex);
|
std::unique_lock<std::mutex> lock(_mutex);
|
||||||
cpputils::MutexPoolLock<Key> lockEntryFromBeingPopped(&_currentlyFlushingEntries, key, &lock);
|
cpputils::MutexPoolLock<Key> lockEntryFromBeingPopped(&_currentlyFlushingEntries, key, &lock);
|
||||||
|
|
||||||
@ -72,16 +70,17 @@ boost::optional<Value> Cache<Key, Value>::pop(const Key &key) {
|
|||||||
return found->releaseValue();
|
return found->releaseValue();
|
||||||
}
|
}
|
||||||
|
|
||||||
template<class Key, class Value>
|
template<class Key, class Value, uint32_t MAX_ENTRIES>
|
||||||
void Cache<Key, Value>::push(const Key &key, Value value) {
|
void Cache<Key, Value, MAX_ENTRIES>::push(const Key &key, Value value) {
|
||||||
std::unique_lock<std::mutex> lock(_mutex);
|
std::unique_lock<std::mutex> lock(_mutex);
|
||||||
|
//std::cout << "Pushing " << key.ToString() << "\n";
|
||||||
ASSERT(_cachedBlocks.size() <= MAX_ENTRIES, "Cache too full");
|
ASSERT(_cachedBlocks.size() <= MAX_ENTRIES, "Cache too full");
|
||||||
_makeSpaceForEntry(&lock);
|
_makeSpaceForEntry(&lock);
|
||||||
_cachedBlocks.push(key, CacheEntry<Key, Value>(std::move(value)));
|
_cachedBlocks.push(key, CacheEntry<Key, Value>(std::move(value)));
|
||||||
}
|
}
|
||||||
|
|
||||||
template<class Key, class Value>
|
template<class Key, class Value, uint32_t MAX_ENTRIES>
|
||||||
void Cache<Key, Value>::_makeSpaceForEntry(std::unique_lock<std::mutex> *lock) {
|
void Cache<Key, Value, MAX_ENTRIES>::_makeSpaceForEntry(std::unique_lock<std::mutex> *lock) {
|
||||||
// _deleteEntry releases the lock while the Value destructor is running.
|
// _deleteEntry releases the lock while the Value destructor is running.
|
||||||
// So we can destruct multiple entries in parallel and also call pop() or push() while doing so.
|
// So we can destruct multiple entries in parallel and also call pop() or push() while doing so.
|
||||||
// However, if another thread calls push() before we get the lock back, the cache is full again.
|
// However, if another thread calls push() before we get the lock back, the cache is full again.
|
||||||
@ -92,8 +91,8 @@ void Cache<Key, Value>::_makeSpaceForEntry(std::unique_lock<std::mutex> *lock) {
|
|||||||
ASSERT(_cachedBlocks.size() < MAX_ENTRIES, "Removing entry from cache didn't work");
|
ASSERT(_cachedBlocks.size() < MAX_ENTRIES, "Removing entry from cache didn't work");
|
||||||
};
|
};
|
||||||
|
|
||||||
template<class Key, class Value>
|
template<class Key, class Value, uint32_t MAX_ENTRIES>
|
||||||
void Cache<Key, Value>::_deleteEntry(std::unique_lock<std::mutex> *lock) {
|
void Cache<Key, Value, MAX_ENTRIES>::_deleteEntry(std::unique_lock<std::mutex> *lock) {
|
||||||
auto key = _cachedBlocks.peekKey();
|
auto key = _cachedBlocks.peekKey();
|
||||||
ASSERT(key != boost::none, "There was no entry to delete");
|
ASSERT(key != boost::none, "There was no entry to delete");
|
||||||
cpputils::MutexPoolLock<Key> lockEntryFromBeingPopped(&_currentlyFlushingEntries, *key);
|
cpputils::MutexPoolLock<Key> lockEntryFromBeingPopped(&_currentlyFlushingEntries, *key);
|
||||||
@ -105,8 +104,8 @@ void Cache<Key, Value>::_deleteEntry(std::unique_lock<std::mutex> *lock) {
|
|||||||
lock->lock();
|
lock->lock();
|
||||||
};
|
};
|
||||||
|
|
||||||
template<class Key, class Value>
|
template<class Key, class Value, uint32_t MAX_ENTRIES>
|
||||||
void Cache<Key, Value>::_deleteOldEntriesParallel() {
|
void Cache<Key, Value, MAX_ENTRIES>::_deleteOldEntriesParallel() {
|
||||||
unsigned int numThreads = std::max(1u, std::thread::hardware_concurrency());
|
unsigned int numThreads = std::max(1u, std::thread::hardware_concurrency());
|
||||||
std::vector<std::future<void>> waitHandles;
|
std::vector<std::future<void>> waitHandles;
|
||||||
for (unsigned int i = 0; i < numThreads; ++i) {
|
for (unsigned int i = 0; i < numThreads; ++i) {
|
||||||
@ -119,13 +118,13 @@ void Cache<Key, Value>::_deleteOldEntriesParallel() {
|
|||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
template<class Key, class Value>
|
template<class Key, class Value, uint32_t MAX_ENTRIES>
|
||||||
void Cache<Key, Value>::_deleteOldEntries() {
|
void Cache<Key, Value, MAX_ENTRIES>::_deleteOldEntries() {
|
||||||
while (_deleteOldEntry()) {}
|
while (_deleteOldEntry()) {}
|
||||||
}
|
}
|
||||||
|
|
||||||
template<class Key, class Value>
|
template<class Key, class Value, uint32_t MAX_ENTRIES>
|
||||||
bool Cache<Key, Value>::_deleteOldEntry() {
|
bool Cache<Key, Value, MAX_ENTRIES>::_deleteOldEntry() {
|
||||||
// This function can be called in parallel by multiple threads and will then cause the Value destructors
|
// This function can be called in parallel by multiple threads and will then cause the Value destructors
|
||||||
// to be called in parallel. The call to _deleteEntry() releases the lock while the Value destructor is running.
|
// to be called in parallel. The call to _deleteEntry() releases the lock while the Value destructor is running.
|
||||||
std::unique_lock<std::mutex> lock(_mutex);
|
std::unique_lock<std::mutex> lock(_mutex);
|
||||||
@ -137,8 +136,8 @@ bool Cache<Key, Value>::_deleteOldEntry() {
|
|||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
template<class Key, class Value>
|
template<class Key, class Value, uint32_t MAX_ENTRIES>
|
||||||
uint32_t Cache<Key, Value>::size() const {
|
uint32_t Cache<Key, Value, MAX_ENTRIES>::size() const {
|
||||||
std::unique_lock<std::mutex> lock(_mutex);
|
std::unique_lock<std::mutex> lock(_mutex);
|
||||||
return _cachedBlocks.size();
|
return _cachedBlocks.size();
|
||||||
};
|
};
|
||||||
|
4
implementations/caching/cache/QueueMap.h
vendored
4
implementations/caching/cache/QueueMap.h
vendored
@ -25,9 +25,7 @@ public:
|
|||||||
|
|
||||||
void push(const Key &key, Value value) {
|
void push(const Key &key, Value value) {
|
||||||
auto newEntry = _entries.emplace(std::piecewise_construct, std::forward_as_tuple(key), std::forward_as_tuple(_sentinel.prev, &_sentinel));
|
auto newEntry = _entries.emplace(std::piecewise_construct, std::forward_as_tuple(key), std::forward_as_tuple(_sentinel.prev, &_sentinel));
|
||||||
if(newEntry.second != true) {
|
ASSERT(newEntry.second == true, "There is already an element with this key");
|
||||||
throw std::logic_error("There is already an element with this key");
|
|
||||||
}
|
|
||||||
newEntry.first->second.init(&newEntry.first->first, std::move(value));
|
newEntry.first->second.init(&newEntry.first->first, std::move(value));
|
||||||
//The following is ok, because std::unordered_map never invalidates pointers to its entries
|
//The following is ok, because std::unordered_map never invalidates pointers to its entries
|
||||||
_sentinel.prev->next = &newEntry.first->second;
|
_sentinel.prev->next = &newEntry.first->second;
|
||||||
|
Loading…
Reference in New Issue
Block a user