Fix a potential deadlock in the cache
This commit is contained in:
parent
2ff7d34980
commit
c403ec6b48
@ -3,6 +3,7 @@ Version 0.9.4 (unreleased)
|
||||
Fixed Bugs:
|
||||
* Renaming a file to an existing file (i.e. overwriting an existing file) didn't free the allocated memory for the overwritten file
|
||||
* Renaming a file to an existing file could hurt an invariant in the directory layout (directory entries have to be sorted) and doing so could cause files to seemingly disappear.
|
||||
* Fix a potential deadlock in the cache
|
||||
|
||||
Compatibility:
|
||||
* The generated .deb packages work for Ubuntu 16.04
|
||||
|
@ -102,6 +102,7 @@ void Cache<Key, Value, MAX_ENTRIES>::_makeSpaceForEntry(std::unique_lock<std::mu
|
||||
|
||||
template<class Key, class Value, uint32_t MAX_ENTRIES>
|
||||
void Cache<Key, Value, MAX_ENTRIES>::_deleteEntry(std::unique_lock<std::mutex> *lock) {
|
||||
ASSERT(lock->owns_lock(), "The operations in this function require a locked mutex");
|
||||
auto key = _cachedBlocks.peekKey();
|
||||
ASSERT(key != boost::none, "There was no entry to delete");
|
||||
cpputils::MutexPoolLock<Key> lockEntryFromBeingPopped(&_currentlyFlushingEntries, *key);
|
||||
@ -134,7 +135,7 @@ void Cache<Key, Value, MAX_ENTRIES>::_deleteMatchingEntriesAtBeginningParallel(s
|
||||
std::vector<std::future<void>> waitHandles;
|
||||
for (unsigned int i = 0; i < numThreads; ++i) {
|
||||
waitHandles.push_back(std::async(std::launch::async, [this, matches] {
|
||||
_deleteMatchingEntriesAtBeginning(matches);
|
||||
_deleteMatchingEntriesAtBeginning(matches);
|
||||
}));
|
||||
}
|
||||
for (auto & waitHandle : waitHandles) {
|
||||
@ -154,6 +155,7 @@ bool Cache<Key, Value, MAX_ENTRIES>::_deleteMatchingEntryAtBeginning(std::functi
|
||||
std::unique_lock<std::mutex> lock(_mutex);
|
||||
if (_cachedBlocks.size() > 0 && matches(*_cachedBlocks.peek())) {
|
||||
_deleteEntry(&lock);
|
||||
ASSERT(lock.owns_lock(), "Something strange happened with the lock. It should be locked again when we come back.");
|
||||
return true;
|
||||
} else {
|
||||
return false;
|
||||
|
36
src/cpp-utils/lock/CombinedLock.h
Normal file
36
src/cpp-utils/lock/CombinedLock.h
Normal file
@ -0,0 +1,36 @@
|
||||
#ifndef MESSMER_CPPUTILS_LOCK_COMBINEDLOCK_H
|
||||
#define MESSMER_CPPUTILS_LOCK_COMBINEDLOCK_H
|
||||
|
||||
#include "../macros.h"
|
||||
|
||||
namespace cpputils {
|
||||
|
||||
/**
|
||||
* This class is used to combine multiple locks into one, taking care that they are locked/unlocked
|
||||
* in the order they were given to the constructor.
|
||||
*/
|
||||
class CombinedLock final {
|
||||
public:
|
||||
CombinedLock(std::unique_lock<std::mutex> *outer, std::unique_lock<std::mutex> *inner)
|
||||
: _outer(outer), _inner(inner) {
|
||||
}
|
||||
|
||||
void lock() {
|
||||
_outer->lock();
|
||||
_inner->lock();
|
||||
}
|
||||
|
||||
void unlock() {
|
||||
_inner->unlock();
|
||||
_outer->unlock();
|
||||
}
|
||||
|
||||
private:
|
||||
std::unique_lock<std::mutex> *_outer;
|
||||
std::unique_lock<std::mutex> *_inner;
|
||||
|
||||
DISALLOW_COPY_AND_ASSIGN(CombinedLock);
|
||||
};
|
||||
}
|
||||
|
||||
#endif
|
@ -8,11 +8,13 @@
|
||||
#include <algorithm>
|
||||
#include "../assert/assert.h"
|
||||
#include "../macros.h"
|
||||
#include "CombinedLock.h"
|
||||
|
||||
//TODO Test
|
||||
//TODO Rename package to synchronization
|
||||
//TODO Rename to MutexPool
|
||||
namespace cpputils {
|
||||
|
||||
template<class LockName>
|
||||
class LockPool final {
|
||||
public:
|
||||
@ -26,7 +28,7 @@ namespace cpputils {
|
||||
|
||||
std::vector<LockName> _lockedLocks;
|
||||
std::mutex _mutex;
|
||||
std::condition_variable _cv;
|
||||
std::condition_variable_any _cv;
|
||||
|
||||
DISALLOW_COPY_AND_ASSIGN(LockPool);
|
||||
};
|
||||
@ -42,15 +44,14 @@ namespace cpputils {
|
||||
inline void LockPool<LockName>::lock(const LockName &lock, std::unique_lock<std::mutex> *lockToFreeWhileWaiting) {
|
||||
std::unique_lock<std::mutex> mutexLock(_mutex); // TODO Is shared_lock enough here?
|
||||
if (_isLocked(lock)) {
|
||||
if(lockToFreeWhileWaiting != nullptr) {
|
||||
lockToFreeWhileWaiting->unlock();
|
||||
}
|
||||
_cv.wait(mutexLock, [this, &lock]{
|
||||
// Order of locking/unlocking is important and should be the same order as everywhere else to prevent deadlocks.
|
||||
// Since when entering the function, lockToFreeWhileWaiting is already locked and mutexLock is locked afterwards,
|
||||
// the condition variable should do it in the same order. We use combinedLock for this.
|
||||
CombinedLock combinedLock(lockToFreeWhileWaiting, &mutexLock);
|
||||
_cv.wait(combinedLock, [this, &lock]{
|
||||
return !_isLocked(lock);
|
||||
});
|
||||
if(lockToFreeWhileWaiting != nullptr) {
|
||||
lockToFreeWhileWaiting->lock();
|
||||
}
|
||||
ASSERT(mutexLock.owns_lock() && lockToFreeWhileWaiting->owns_lock(), "Locks haven't been correctly relocked");
|
||||
}
|
||||
_lockedLocks.push_back(lock);
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user