Merge branch 'release/0.10' into develop
This commit is contained in:
commit
d86fcf27c6
@ -22,6 +22,8 @@ Version 0.9.10 (unreleased)
|
|||||||
--------------
|
--------------
|
||||||
Fixed bugs:
|
Fixed bugs:
|
||||||
* Fixed occasional deadlock (https://github.com/cryfs/cryfs/issues/64)
|
* Fixed occasional deadlock (https://github.com/cryfs/cryfs/issues/64)
|
||||||
|
* Fix for reading empty files out of bounds
|
||||||
|
* Fixed race condition (https://github.com/cryfs/cryfs/issues/224)
|
||||||
|
|
||||||
|
|
||||||
Version 0.9.9
|
Version 0.9.9
|
||||||
|
@ -11,6 +11,7 @@ set(SOURCES
|
|||||||
implementations/onblocks/datanodestore/DataInnerNode.cpp
|
implementations/onblocks/datanodestore/DataInnerNode.cpp
|
||||||
implementations/onblocks/datanodestore/DataNodeStore.cpp
|
implementations/onblocks/datanodestore/DataNodeStore.cpp
|
||||||
implementations/onblocks/datatreestore/impl/algorithms.cpp
|
implementations/onblocks/datatreestore/impl/algorithms.cpp
|
||||||
|
implementations/onblocks/datatreestore/impl/CachedValue.cpp
|
||||||
implementations/onblocks/datatreestore/impl/LeafTraverser.cpp
|
implementations/onblocks/datatreestore/impl/LeafTraverser.cpp
|
||||||
implementations/onblocks/datatreestore/LeafHandle.cpp
|
implementations/onblocks/datatreestore/LeafHandle.cpp
|
||||||
implementations/onblocks/datatreestore/DataTree.cpp
|
implementations/onblocks/datatreestore/DataTree.cpp
|
||||||
|
@ -8,13 +8,9 @@
|
|||||||
#include <cpp-utils/assert/assert.h>
|
#include <cpp-utils/assert/assert.h>
|
||||||
#include "datatreestore/LeafHandle.h"
|
#include "datatreestore/LeafHandle.h"
|
||||||
|
|
||||||
using std::function;
|
|
||||||
using std::unique_lock;
|
|
||||||
using std::mutex;
|
|
||||||
using cpputils::unique_ref;
|
using cpputils::unique_ref;
|
||||||
using cpputils::Data;
|
using cpputils::Data;
|
||||||
using blockstore::BlockId;
|
using blockstore::BlockId;
|
||||||
using blobstore::onblocks::datatreestore::LeafHandle;
|
|
||||||
|
|
||||||
namespace blobstore {
|
namespace blobstore {
|
||||||
namespace onblocks {
|
namespace onblocks {
|
||||||
@ -22,128 +18,34 @@ namespace onblocks {
|
|||||||
using parallelaccessdatatreestore::DataTreeRef;
|
using parallelaccessdatatreestore::DataTreeRef;
|
||||||
|
|
||||||
BlobOnBlocks::BlobOnBlocks(unique_ref<DataTreeRef> datatree)
|
BlobOnBlocks::BlobOnBlocks(unique_ref<DataTreeRef> datatree)
|
||||||
: _datatree(std::move(datatree)), _sizeCache(boost::none), _mutex() {
|
: _datatree(std::move(datatree)) {
|
||||||
}
|
}
|
||||||
|
|
||||||
BlobOnBlocks::~BlobOnBlocks() {
|
BlobOnBlocks::~BlobOnBlocks() {
|
||||||
} // NOLINT (workaround https://gcc.gnu.org/bugzilla/show_bug.cgi?id=82481 )
|
} // NOLINT (workaround https://gcc.gnu.org/bugzilla/show_bug.cgi?id=82481 )
|
||||||
|
|
||||||
uint64_t BlobOnBlocks::size() const {
|
uint64_t BlobOnBlocks::size() const {
|
||||||
if (_sizeCache == boost::none) {
|
return _datatree->numBytes();
|
||||||
_sizeCache = _datatree->numStoredBytes();
|
|
||||||
}
|
|
||||||
return *_sizeCache;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void BlobOnBlocks::resize(uint64_t numBytes) {
|
void BlobOnBlocks::resize(uint64_t numBytes) {
|
||||||
_datatree->resizeNumBytes(numBytes);
|
_datatree->resizeNumBytes(numBytes);
|
||||||
_sizeCache = numBytes;
|
|
||||||
}
|
|
||||||
|
|
||||||
void BlobOnBlocks::_traverseLeaves(uint64_t beginByte, uint64_t sizeBytes, function<void (uint64_t leafOffset, LeafHandle leaf, uint32_t begin, uint32_t count)> onExistingLeaf, function<Data (uint64_t beginByte, uint32_t count)> onCreateLeaf) const {
|
|
||||||
unique_lock<mutex> lock(_mutex); // TODO Multiple traverse calls in parallel?
|
|
||||||
uint64_t endByte = beginByte + sizeBytes;
|
|
||||||
uint64_t maxBytesPerLeaf = _datatree->maxBytesPerLeaf();
|
|
||||||
uint32_t firstLeaf = beginByte / maxBytesPerLeaf;
|
|
||||||
uint32_t endLeaf = utils::ceilDivision(endByte, maxBytesPerLeaf);
|
|
||||||
bool blobIsGrowingFromThisTraversal = false;
|
|
||||||
auto _onExistingLeaf = [&onExistingLeaf, beginByte, endByte, endLeaf, maxBytesPerLeaf, &blobIsGrowingFromThisTraversal] (uint32_t leafIndex, bool isRightBorderLeaf, LeafHandle leafHandle) {
|
|
||||||
uint64_t indexOfFirstLeafByte = leafIndex * maxBytesPerLeaf;
|
|
||||||
ASSERT(endByte > indexOfFirstLeafByte, "Traversal went too far right");
|
|
||||||
uint32_t dataBegin = utils::maxZeroSubtraction(beginByte, indexOfFirstLeafByte);
|
|
||||||
uint32_t dataEnd = std::min(maxBytesPerLeaf, endByte - indexOfFirstLeafByte);
|
|
||||||
// If we are traversing exactly until the last leaf, then the last leaf wasn't resized by the traversal and might have a wrong size. We have to fix it.
|
|
||||||
if (isRightBorderLeaf) {
|
|
||||||
ASSERT(leafIndex == endLeaf-1, "If we traversed further right, this wouldn't be the right border leaf.");
|
|
||||||
auto leaf = leafHandle.node();
|
|
||||||
if (leaf->numBytes() < dataEnd) {
|
|
||||||
leaf->resize(dataEnd);
|
|
||||||
blobIsGrowingFromThisTraversal = true;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
onExistingLeaf(indexOfFirstLeafByte, std::move(leafHandle), dataBegin, dataEnd-dataBegin);
|
|
||||||
};
|
|
||||||
auto _onCreateLeaf = [&onCreateLeaf, maxBytesPerLeaf, beginByte, firstLeaf, endByte, endLeaf, &blobIsGrowingFromThisTraversal] (uint32_t leafIndex) -> Data {
|
|
||||||
blobIsGrowingFromThisTraversal = true;
|
|
||||||
uint64_t indexOfFirstLeafByte = leafIndex * maxBytesPerLeaf;
|
|
||||||
ASSERT(endByte > indexOfFirstLeafByte, "Traversal went too far right");
|
|
||||||
uint32_t dataBegin = utils::maxZeroSubtraction(beginByte, indexOfFirstLeafByte);
|
|
||||||
uint32_t dataEnd = std::min(maxBytesPerLeaf, endByte - indexOfFirstLeafByte);
|
|
||||||
ASSERT(leafIndex == firstLeaf || dataBegin == 0, "Only the leftmost leaf can have a gap on the left.");
|
|
||||||
ASSERT(leafIndex == endLeaf-1 || dataEnd == maxBytesPerLeaf, "Only the rightmost leaf can have a gap on the right");
|
|
||||||
Data data = onCreateLeaf(indexOfFirstLeafByte + dataBegin, dataEnd-dataBegin);
|
|
||||||
ASSERT(data.size() == dataEnd-dataBegin, "Returned leaf data with wrong size");
|
|
||||||
// If this leaf is created but only partly in the traversed region (i.e. dataBegin > leafBegin), we have to fill the data before the traversed region with zeroes.
|
|
||||||
if (dataBegin != 0) {
|
|
||||||
Data actualData(dataBegin + data.size());
|
|
||||||
std::memset(actualData.data(), 0, dataBegin);
|
|
||||||
std::memcpy(actualData.dataOffset(dataBegin), data.data(), data.size());
|
|
||||||
data = std::move(actualData);
|
|
||||||
}
|
|
||||||
return data;
|
|
||||||
};
|
|
||||||
_datatree->traverseLeaves(firstLeaf, endLeaf, _onExistingLeaf, _onCreateLeaf);
|
|
||||||
if (blobIsGrowingFromThisTraversal) {
|
|
||||||
ASSERT(_datatree->numStoredBytes() == endByte, "Writing didn't grow by the correct number of bytes");
|
|
||||||
_sizeCache = endByte;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
Data BlobOnBlocks::readAll() const {
|
Data BlobOnBlocks::readAll() const {
|
||||||
//TODO Querying size is inefficient. Is this possible without a call to size()?
|
return _datatree->readAllBytes();
|
||||||
uint64_t count = size();
|
|
||||||
Data result(count);
|
|
||||||
_read(result.data(), 0, count);
|
|
||||||
return result;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void BlobOnBlocks::read(void *target, uint64_t offset, uint64_t count) const {
|
void BlobOnBlocks::read(void *target, uint64_t offset, uint64_t count) const {
|
||||||
uint64_t _size = size();
|
return _datatree->readBytes(target, offset, count);
|
||||||
ASSERT(offset <= _size && offset + count <= _size, "BlobOnBlocks::read() read outside blob. Use BlobOnBlocks::tryRead() if this should be allowed.");
|
|
||||||
uint64_t read = tryRead(target, offset, count);
|
|
||||||
ASSERT(read == count, "BlobOnBlocks::read() couldn't read all requested bytes. Use BlobOnBlocks::tryRead() if this should be allowed.");
|
|
||||||
}
|
}
|
||||||
|
|
||||||
uint64_t BlobOnBlocks::tryRead(void *target, uint64_t offset, uint64_t count) const {
|
uint64_t BlobOnBlocks::tryRead(void *target, uint64_t offset, uint64_t count) const {
|
||||||
//TODO Quite inefficient to call size() here, because that has to traverse the tree
|
return _datatree->tryReadBytes(target, offset, count);
|
||||||
uint64_t realCount = std::max(INT64_C(0), std::min(static_cast<int64_t>(count), static_cast<int64_t>(size())-static_cast<int64_t>(offset)));
|
|
||||||
_read(target, offset, realCount);
|
|
||||||
return realCount;
|
|
||||||
}
|
|
||||||
|
|
||||||
void BlobOnBlocks::_read(void *target, uint64_t offset, uint64_t count) const {
|
|
||||||
auto onExistingLeaf = [target, offset, count] (uint64_t indexOfFirstLeafByte, LeafHandle leaf, uint32_t leafDataOffset, uint32_t leafDataSize) {
|
|
||||||
ASSERT(indexOfFirstLeafByte+leafDataOffset>=offset && indexOfFirstLeafByte-offset+leafDataOffset <= count && indexOfFirstLeafByte-offset+leafDataOffset+leafDataSize <= count, "Writing to target out of bounds");
|
|
||||||
//TODO Simplify formula, make it easier to understand
|
|
||||||
leaf.node()->read(static_cast<uint8_t*>(target) + indexOfFirstLeafByte - offset + leafDataOffset, leafDataOffset, leafDataSize);
|
|
||||||
};
|
|
||||||
auto onCreateLeaf = [] (uint64_t /*beginByte*/, uint32_t /*count*/) -> Data {
|
|
||||||
ASSERT(false, "Reading shouldn't create new leaves.");
|
|
||||||
};
|
|
||||||
_traverseLeaves(offset, count, onExistingLeaf, onCreateLeaf);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void BlobOnBlocks::write(const void *source, uint64_t offset, uint64_t count) {
|
void BlobOnBlocks::write(const void *source, uint64_t offset, uint64_t count) {
|
||||||
auto onExistingLeaf = [source, offset, count] (uint64_t indexOfFirstLeafByte, LeafHandle leaf, uint32_t leafDataOffset, uint32_t leafDataSize) {
|
_datatree->writeBytes(source, offset, count);
|
||||||
ASSERT(indexOfFirstLeafByte+leafDataOffset>=offset && indexOfFirstLeafByte-offset+leafDataOffset <= count && indexOfFirstLeafByte-offset+leafDataOffset+leafDataSize <= count, "Reading from source out of bounds");
|
|
||||||
if (leafDataOffset == 0 && leafDataSize == leaf.nodeStore()->layout().maxBytesPerLeaf()) {
|
|
||||||
Data leafData(leafDataSize);
|
|
||||||
std::memcpy(leafData.data(), static_cast<const uint8_t*>(source) + indexOfFirstLeafByte - offset, leafDataSize);
|
|
||||||
leaf.nodeStore()->overwriteLeaf(leaf.blockId(), std::move(leafData));
|
|
||||||
} else {
|
|
||||||
//TODO Simplify formula, make it easier to understand
|
|
||||||
leaf.node()->write(static_cast<const uint8_t*>(source) + indexOfFirstLeafByte - offset + leafDataOffset, leafDataOffset,
|
|
||||||
leafDataSize);
|
|
||||||
}
|
|
||||||
};
|
|
||||||
auto onCreateLeaf = [source, offset, count] (uint64_t beginByte, uint32_t numBytes) -> Data {
|
|
||||||
ASSERT(beginByte >= offset && beginByte-offset <= count && beginByte-offset+numBytes <= count, "Reading from source out of bounds");
|
|
||||||
Data result(numBytes);
|
|
||||||
//TODO Simplify formula, make it easier to understand
|
|
||||||
std::memcpy(result.data(), static_cast<const uint8_t*>(source) + beginByte - offset, numBytes);
|
|
||||||
return result;
|
|
||||||
};
|
|
||||||
_traverseLeaves(offset, count, onExistingLeaf, onCreateLeaf);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void BlobOnBlocks::flush() {
|
void BlobOnBlocks::flush() {
|
||||||
|
@ -7,6 +7,7 @@
|
|||||||
|
|
||||||
#include <memory>
|
#include <memory>
|
||||||
#include <boost/optional.hpp>
|
#include <boost/optional.hpp>
|
||||||
|
#include <boost/thread/shared_mutex.hpp>
|
||||||
|
|
||||||
namespace blobstore {
|
namespace blobstore {
|
||||||
namespace onblocks {
|
namespace onblocks {
|
||||||
@ -38,12 +39,11 @@ public:
|
|||||||
|
|
||||||
private:
|
private:
|
||||||
|
|
||||||
|
uint64_t _tryRead(void *target, uint64_t offset, uint64_t size) const;
|
||||||
void _read(void *target, uint64_t offset, uint64_t count) const;
|
void _read(void *target, uint64_t offset, uint64_t count) const;
|
||||||
void _traverseLeaves(uint64_t offsetBytes, uint64_t sizeBytes, std::function<void (uint64_t leafOffset, datatreestore::LeafHandle leaf, uint32_t begin, uint32_t count)> onExistingLeaf, std::function<cpputils::Data (uint64_t beginByte, uint32_t count)> onCreateLeaf) const;
|
void _traverseLeaves(uint64_t offsetBytes, uint64_t sizeBytes, std::function<void (uint64_t leafOffset, datatreestore::LeafHandle leaf, uint32_t begin, uint32_t count)> onExistingLeaf, std::function<cpputils::Data (uint64_t beginByte, uint32_t count)> onCreateLeaf) const;
|
||||||
|
|
||||||
cpputils::unique_ref<parallelaccessdatatreestore::DataTreeRef> _datatree;
|
cpputils::unique_ref<parallelaccessdatatreestore::DataTreeRef> _datatree;
|
||||||
mutable boost::optional<uint64_t> _sizeCache;
|
|
||||||
mutable std::mutex _mutex;
|
|
||||||
|
|
||||||
DISALLOW_COPY_AND_ASSIGN(BlobOnBlocks);
|
DISALLOW_COPY_AND_ASSIGN(BlobOnBlocks);
|
||||||
};
|
};
|
||||||
|
@ -12,6 +12,7 @@
|
|||||||
#include <cmath>
|
#include <cmath>
|
||||||
#include <cpp-utils/assert/assert.h>
|
#include <cpp-utils/assert/assert.h>
|
||||||
#include "impl/LeafTraverser.h"
|
#include "impl/LeafTraverser.h"
|
||||||
|
#include <boost/thread.hpp>
|
||||||
|
|
||||||
using blockstore::BlockId;
|
using blockstore::BlockId;
|
||||||
using blobstore::onblocks::datanodestore::DataNodeStore;
|
using blobstore::onblocks::datanodestore::DataNodeStore;
|
||||||
@ -24,17 +25,21 @@ using boost::shared_mutex;
|
|||||||
using boost::shared_lock;
|
using boost::shared_lock;
|
||||||
using boost::unique_lock;
|
using boost::unique_lock;
|
||||||
using boost::none;
|
using boost::none;
|
||||||
|
using boost::optional;
|
||||||
|
|
||||||
using cpputils::optional_ownership_ptr;
|
using cpputils::optional_ownership_ptr;
|
||||||
using cpputils::unique_ref;
|
using cpputils::unique_ref;
|
||||||
using cpputils::Data;
|
using cpputils::Data;
|
||||||
|
using namespace cpputils::logging;
|
||||||
|
|
||||||
|
//TODO shared_lock currently not enough for traverse because of root replacement. Can be fixed while keeping shared?
|
||||||
|
|
||||||
namespace blobstore {
|
namespace blobstore {
|
||||||
namespace onblocks {
|
namespace onblocks {
|
||||||
namespace datatreestore {
|
namespace datatreestore {
|
||||||
|
|
||||||
DataTree::DataTree(DataNodeStore *nodeStore, unique_ref<DataNode> rootNode)
|
DataTree::DataTree(DataNodeStore *nodeStore, unique_ref<DataNode> rootNode)
|
||||||
: _mutex(), _nodeStore(nodeStore), _rootNode(std::move(rootNode)), _blockId(_rootNode->blockId()), _numLeavesCache(none) {
|
: _treeStructureMutex(), _nodeStore(nodeStore), _rootNode(std::move(rootNode)), _blockId(_rootNode->blockId()), _sizeCache() {
|
||||||
}
|
}
|
||||||
|
|
||||||
DataTree::~DataTree() {
|
DataTree::~DataTree() {
|
||||||
@ -45,102 +50,143 @@ const BlockId &DataTree::blockId() const {
|
|||||||
}
|
}
|
||||||
|
|
||||||
void DataTree::flush() const {
|
void DataTree::flush() const {
|
||||||
// By grabbing a lock, we ensure that all modifying functions don't run currently and are therefore flushed
|
// By grabbing a lock, we ensure that all modifying functions don't run currently and are therefore flushed.
|
||||||
unique_lock<shared_mutex> lock(_mutex);
|
// It's only a shared lock, because this doesn't modify the tree structure.
|
||||||
|
shared_lock<shared_mutex> lock(_treeStructureMutex);
|
||||||
// We also have to flush the root node
|
// We also have to flush the root node
|
||||||
_rootNode->flush();
|
_rootNode->flush();
|
||||||
}
|
}
|
||||||
|
|
||||||
unique_ref<DataNode> DataTree::releaseRootNode() {
|
unique_ref<DataNode> DataTree::releaseRootNode() {
|
||||||
unique_lock<shared_mutex> lock(_mutex); // Lock ensures that the root node is currently set (traversing unsets it temporarily)
|
// Lock also ensures that the root node is currently set (traversing unsets it temporarily)
|
||||||
|
// It's a unique lock because this "modifies" tree structure by changing _rootNode.
|
||||||
|
unique_lock<shared_mutex> lock(_treeStructureMutex);
|
||||||
return std::move(_rootNode);
|
return std::move(_rootNode);
|
||||||
}
|
}
|
||||||
|
|
||||||
//TODO Test numLeaves(), for example also two configurations with same number of bytes but different number of leaves (last leaf has 0 bytes)
|
//TODO Test numLeaves(), for example also two configurations with same number of bytes but different number of leaves (last leaf has 0 bytes)
|
||||||
uint32_t DataTree::numLeaves() const {
|
uint32_t DataTree::numLeaves() const {
|
||||||
shared_lock<shared_mutex> lock(_mutex);
|
shared_lock<shared_mutex> lock(_treeStructureMutex);
|
||||||
return _numLeaves();
|
|
||||||
|
return _getOrComputeSizeCache().numLeaves;
|
||||||
}
|
}
|
||||||
|
|
||||||
uint32_t DataTree::_numLeaves() const {
|
uint64_t DataTree::numBytes() const {
|
||||||
if (_numLeavesCache == none) {
|
shared_lock<shared_mutex> lock(_treeStructureMutex);
|
||||||
_numLeavesCache = _computeNumLeaves(*_rootNode);
|
return _numBytes();
|
||||||
}
|
|
||||||
return *_numLeavesCache;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
uint32_t DataTree::_forceComputeNumLeaves() const {
|
uint64_t DataTree::_numBytes() const {
|
||||||
unique_lock<shared_mutex> lock(_mutex); // Lock ensures that the root node is currently set (traversing unsets it temporarily)
|
return _getOrComputeSizeCache().numBytes;
|
||||||
_numLeavesCache = _computeNumLeaves(*_rootNode);
|
|
||||||
return *_numLeavesCache;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
uint32_t DataTree::_computeNumLeaves(const DataNode &node) const {
|
DataTree::SizeCache DataTree::_getOrComputeSizeCache() const {
|
||||||
|
return _sizeCache.getOrCompute([this] () {
|
||||||
|
return _computeSizeCache(*_rootNode);
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
uint32_t DataTree::forceComputeNumLeaves() const {
|
||||||
|
_sizeCache.clear();
|
||||||
|
return numLeaves();
|
||||||
|
}
|
||||||
|
|
||||||
|
DataTree::SizeCache DataTree::_computeSizeCache(const DataNode &node) const {
|
||||||
const DataLeafNode *leaf = dynamic_cast<const DataLeafNode*>(&node);
|
const DataLeafNode *leaf = dynamic_cast<const DataLeafNode*>(&node);
|
||||||
if (leaf != nullptr) {
|
if (leaf != nullptr) {
|
||||||
return 1;
|
return {1, leaf->numBytes()};
|
||||||
}
|
}
|
||||||
|
|
||||||
const DataInnerNode &inner = dynamic_cast<const DataInnerNode&>(node);
|
const DataInnerNode &inner = dynamic_cast<const DataInnerNode&>(node);
|
||||||
uint64_t numLeavesInLeftChildren = static_cast<uint64_t>(inner.numChildren()-1) * leavesPerFullChild(inner);
|
uint32_t numLeavesInLeftChildren = static_cast<uint32_t>(inner.numChildren()-1) * _leavesPerFullChild(inner);
|
||||||
|
uint64_t numBytesInLeftChildren = numLeavesInLeftChildren * _nodeStore->layout().maxBytesPerLeaf();
|
||||||
auto lastChild = _nodeStore->load(inner.readLastChild().blockId());
|
auto lastChild = _nodeStore->load(inner.readLastChild().blockId());
|
||||||
ASSERT(lastChild != none, "Couldn't load last child");
|
ASSERT(lastChild != none, "Couldn't load last child");
|
||||||
uint64_t numLeavesInRightChild = _computeNumLeaves(**lastChild);
|
SizeCache sizeInRightChild = _computeSizeCache(**lastChild);
|
||||||
|
|
||||||
return numLeavesInLeftChildren + numLeavesInRightChild;
|
return SizeCache {
|
||||||
|
numLeavesInLeftChildren + sizeInRightChild.numLeaves,
|
||||||
|
numBytesInLeftChildren + sizeInRightChild.numBytes
|
||||||
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
void DataTree::traverseLeaves(uint32_t beginIndex, uint32_t endIndex, function<void (uint32_t index, bool isRightBorderLeaf, LeafHandle leaf)> onExistingLeaf, function<Data (uint32_t index)> onCreateLeaf) {
|
void DataTree::_traverseLeavesByLeafIndices(uint32_t beginIndex, uint32_t endIndex, bool readOnlyTraversal,
|
||||||
//TODO Can we allow multiple runs of traverseLeaves() in parallel? Also in parallel with resizeNumBytes()?
|
|
||||||
std::unique_lock<shared_mutex> lock(_mutex);
|
|
||||||
ASSERT(beginIndex <= endIndex, "Invalid parameters");
|
|
||||||
|
|
||||||
auto onBacktrackFromSubtree = [] (DataInnerNode* /*node*/) {};
|
|
||||||
|
|
||||||
_traverseLeaves(beginIndex, endIndex, onExistingLeaf, onCreateLeaf, onBacktrackFromSubtree);
|
|
||||||
|
|
||||||
if (_numLeavesCache != none && *_numLeavesCache < endIndex) {
|
|
||||||
_numLeavesCache = endIndex;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
void DataTree::_traverseLeaves(uint32_t beginIndex, uint32_t endIndex,
|
|
||||||
function<void (uint32_t index, bool isRightBorderLeaf, LeafHandle leaf)> onExistingLeaf,
|
function<void (uint32_t index, bool isRightBorderLeaf, LeafHandle leaf)> onExistingLeaf,
|
||||||
function<Data (uint32_t index)> onCreateLeaf,
|
function<Data (uint32_t index)> onCreateLeaf,
|
||||||
function<void (DataInnerNode *node)> onBacktrackFromSubtree) {
|
function<void (DataInnerNode *node)> onBacktrackFromSubtree) const {
|
||||||
LeafTraverser(_nodeStore).traverseAndUpdateRoot(&_rootNode, beginIndex, endIndex, onExistingLeaf, onCreateLeaf, onBacktrackFromSubtree);
|
if(endIndex <= beginIndex) {
|
||||||
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
uint32_t DataTree::leavesPerFullChild(const DataInnerNode &root) const {
|
// TODO no const cast
|
||||||
|
LeafTraverser(_nodeStore, readOnlyTraversal).traverseAndUpdateRoot(&const_cast<DataTree*>(this)->_rootNode, beginIndex, endIndex, onExistingLeaf, onCreateLeaf, onBacktrackFromSubtree);
|
||||||
|
}
|
||||||
|
|
||||||
|
void DataTree::_traverseLeavesByByteIndices(uint64_t beginByte, uint64_t sizeBytes, bool readOnlyTraversal, function<void (uint64_t leafOffset, LeafHandle leaf, uint32_t begin, uint32_t count)> onExistingLeaf, function<Data (uint64_t beginByte, uint32_t count)> onCreateLeaf) const {
|
||||||
|
if (sizeBytes == 0) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
uint64_t endByte = beginByte + sizeBytes;
|
||||||
|
uint64_t _maxBytesPerLeaf = maxBytesPerLeaf();
|
||||||
|
uint32_t firstLeaf = beginByte / _maxBytesPerLeaf;
|
||||||
|
uint32_t endLeaf = utils::ceilDivision(endByte, _maxBytesPerLeaf);
|
||||||
|
bool blobIsGrowingFromThisTraversal = false;
|
||||||
|
auto _onExistingLeaf = [&onExistingLeaf, beginByte, endByte, endLeaf, _maxBytesPerLeaf, &blobIsGrowingFromThisTraversal] (uint32_t leafIndex, bool isRightBorderLeaf, LeafHandle leafHandle) {
|
||||||
|
uint64_t indexOfFirstLeafByte = leafIndex * _maxBytesPerLeaf;
|
||||||
|
ASSERT(endByte > indexOfFirstLeafByte, "Traversal went too far right");
|
||||||
|
uint32_t dataBegin = utils::maxZeroSubtraction(beginByte, indexOfFirstLeafByte);
|
||||||
|
uint32_t dataEnd = std::min(_maxBytesPerLeaf, endByte - indexOfFirstLeafByte);
|
||||||
|
// If we are traversing exactly until the last leaf, then the last leaf wasn't resized by the traversal and might have a wrong size. We have to fix it.
|
||||||
|
if (isRightBorderLeaf) {
|
||||||
|
ASSERT(leafIndex == endLeaf-1, "If we traversed further right, this wouldn't be the right border leaf.");
|
||||||
|
auto leaf = leafHandle.node();
|
||||||
|
if (leaf->numBytes() < dataEnd) {
|
||||||
|
leaf->resize(dataEnd);
|
||||||
|
blobIsGrowingFromThisTraversal = true;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
onExistingLeaf(indexOfFirstLeafByte, std::move(leafHandle), dataBegin, dataEnd-dataBegin);
|
||||||
|
};
|
||||||
|
auto _onCreateLeaf = [&onCreateLeaf, _maxBytesPerLeaf, beginByte, firstLeaf, endByte, endLeaf, &blobIsGrowingFromThisTraversal, readOnlyTraversal] (uint32_t leafIndex) -> Data {
|
||||||
|
ASSERT(!readOnlyTraversal, "Cannot create leaves in a read-only traversal");
|
||||||
|
blobIsGrowingFromThisTraversal = true;
|
||||||
|
uint64_t indexOfFirstLeafByte = leafIndex * _maxBytesPerLeaf;
|
||||||
|
ASSERT(endByte > indexOfFirstLeafByte, "Traversal went too far right");
|
||||||
|
uint32_t dataBegin = utils::maxZeroSubtraction(beginByte, indexOfFirstLeafByte);
|
||||||
|
uint32_t dataEnd = std::min(_maxBytesPerLeaf, endByte - indexOfFirstLeafByte);
|
||||||
|
ASSERT(leafIndex == firstLeaf || dataBegin == 0, "Only the leftmost leaf can have a gap on the left.");
|
||||||
|
ASSERT(leafIndex == endLeaf-1 || dataEnd == _maxBytesPerLeaf, "Only the rightmost leaf can have a gap on the right");
|
||||||
|
Data data = onCreateLeaf(indexOfFirstLeafByte + dataBegin, dataEnd-dataBegin);
|
||||||
|
ASSERT(data.size() == dataEnd-dataBegin, "Returned leaf data with wrong size");
|
||||||
|
// If this leaf is created but only partly in the traversed region (i.e. dataBegin > leafBegin), we have to fill the data before the traversed region with zeroes.
|
||||||
|
if (dataBegin != 0) {
|
||||||
|
Data actualData(dataBegin + data.size());
|
||||||
|
std::memset(actualData.data(), 0, dataBegin);
|
||||||
|
std::memcpy(actualData.dataOffset(dataBegin), data.data(), data.size());
|
||||||
|
data = std::move(actualData);
|
||||||
|
}
|
||||||
|
return data;
|
||||||
|
};
|
||||||
|
auto _onBacktrackFromSubtree = [] (DataInnerNode* /*node*/) {};
|
||||||
|
|
||||||
|
_traverseLeavesByLeafIndices(firstLeaf, endLeaf, readOnlyTraversal, _onExistingLeaf, _onCreateLeaf, _onBacktrackFromSubtree);
|
||||||
|
|
||||||
|
ASSERT(!readOnlyTraversal || !blobIsGrowingFromThisTraversal, "Blob grew from traversal that didn't allow growing (i.e. reading)");
|
||||||
|
|
||||||
|
if (blobIsGrowingFromThisTraversal) {
|
||||||
|
_sizeCache.update([endLeaf, endByte] (optional<SizeCache>* cache) {
|
||||||
|
*cache = SizeCache{endLeaf, endByte};
|
||||||
|
});
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
uint32_t DataTree::_leavesPerFullChild(const DataInnerNode &root) const {
|
||||||
return utils::intPow(_nodeStore->layout().maxChildrenPerInnerNode(), static_cast<uint64_t>(root.depth())-1);
|
return utils::intPow(_nodeStore->layout().maxChildrenPerInnerNode(), static_cast<uint64_t>(root.depth())-1);
|
||||||
}
|
}
|
||||||
|
|
||||||
uint64_t DataTree::numStoredBytes() const {
|
|
||||||
shared_lock<shared_mutex> lock(_mutex);
|
|
||||||
return _numStoredBytes();
|
|
||||||
}
|
|
||||||
|
|
||||||
uint64_t DataTree::_numStoredBytes() const {
|
|
||||||
return _numStoredBytes(*_rootNode);
|
|
||||||
}
|
|
||||||
|
|
||||||
uint64_t DataTree::_numStoredBytes(const DataNode &root) const {
|
|
||||||
const DataLeafNode *leaf = dynamic_cast<const DataLeafNode*>(&root);
|
|
||||||
if (leaf != nullptr) {
|
|
||||||
return leaf->numBytes();
|
|
||||||
}
|
|
||||||
|
|
||||||
const DataInnerNode &inner = dynamic_cast<const DataInnerNode&>(root);
|
|
||||||
uint64_t numBytesInLeftChildren = (inner.numChildren()-1) * leavesPerFullChild(inner) * _nodeStore->layout().maxBytesPerLeaf();
|
|
||||||
auto lastChild = _nodeStore->load(inner.readLastChild().blockId());
|
|
||||||
ASSERT(lastChild != none, "Couldn't load last child");
|
|
||||||
uint64_t numBytesInRightChild = _numStoredBytes(**lastChild);
|
|
||||||
|
|
||||||
return numBytesInLeftChildren + numBytesInRightChild;
|
|
||||||
}
|
|
||||||
|
|
||||||
void DataTree::resizeNumBytes(uint64_t newNumBytes) {
|
void DataTree::resizeNumBytes(uint64_t newNumBytes) {
|
||||||
std::unique_lock<shared_mutex> lock(_mutex); // TODO Multiple ones in parallel? Also in parallel with traverseLeaves()?
|
std::unique_lock<shared_mutex> lock(_treeStructureMutex);
|
||||||
|
|
||||||
uint32_t newNumLeaves = std::max(UINT64_C(1), utils::ceilDivision(newNumBytes, _nodeStore->layout().maxBytesPerLeaf()));
|
uint32_t newNumLeaves = std::max(UINT64_C(1), utils::ceilDivision(newNumBytes, _nodeStore->layout().maxBytesPerLeaf()));
|
||||||
uint32_t newLastLeafSize = newNumBytes - (newNumLeaves-1) * _nodeStore->layout().maxBytesPerLeaf();
|
uint32_t newLastLeafSize = newNumBytes - (newNumLeaves-1) * _nodeStore->layout().maxBytesPerLeaf();
|
||||||
@ -171,8 +217,11 @@ void DataTree::resizeNumBytes(uint64_t newNumBytes) {
|
|||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
_traverseLeaves(newNumLeaves - 1, newNumLeaves, onExistingLeaf, onCreateLeaf, onBacktrackFromSubtree);
|
_traverseLeavesByLeafIndices(newNumLeaves - 1, newNumLeaves, false, onExistingLeaf, onCreateLeaf, onBacktrackFromSubtree);
|
||||||
_numLeavesCache = newNumLeaves;
|
_sizeCache.update([newNumLeaves, newNumBytes] (boost::optional<SizeCache>* cache) {
|
||||||
|
*cache = SizeCache{newNumLeaves, newNumBytes};
|
||||||
|
});
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
uint64_t DataTree::maxBytesPerLeaf() const {
|
uint64_t DataTree::maxBytesPerLeaf() const {
|
||||||
@ -180,9 +229,87 @@ uint64_t DataTree::maxBytesPerLeaf() const {
|
|||||||
}
|
}
|
||||||
|
|
||||||
uint8_t DataTree::depth() const {
|
uint8_t DataTree::depth() const {
|
||||||
|
shared_lock<shared_mutex> lock(_treeStructureMutex);
|
||||||
return _rootNode->depth();
|
return _rootNode->depth();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void DataTree::readBytes(void *target, uint64_t offset, uint64_t count) const {
|
||||||
|
shared_lock<shared_mutex> lock(_treeStructureMutex);
|
||||||
|
|
||||||
|
const uint64_t _size = _numBytes();
|
||||||
|
if(offset > _size || offset + count > _size) {
|
||||||
|
throw std::runtime_error("BlobOnBlocks::read() read outside blob. Use BlobOnBlocks::tryRead() if this should be allowed.");
|
||||||
|
}
|
||||||
|
const uint64_t read = _tryReadBytes(target, offset, count);
|
||||||
|
if (read != count) {
|
||||||
|
throw std::runtime_error("BlobOnBlocks::read() couldn't read all requested bytes. Use BlobOnBlocks::tryRead() if this should be allowed.");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Data DataTree::readAllBytes() const {
|
||||||
|
shared_lock<shared_mutex> lock(_treeStructureMutex);
|
||||||
|
|
||||||
|
//TODO Querying numBytes can be inefficient. Is this possible without a call to size()?
|
||||||
|
uint64_t count = _numBytes();
|
||||||
|
Data result(count);
|
||||||
|
_doReadBytes(result.data(), 0, count);
|
||||||
|
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
|
||||||
|
uint64_t DataTree::tryReadBytes(void *target, uint64_t offset, uint64_t count) const {
|
||||||
|
shared_lock<shared_mutex> lock(_treeStructureMutex);
|
||||||
|
auto result = _tryReadBytes(target, offset, count);
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
|
||||||
|
uint64_t DataTree::_tryReadBytes(void *target, uint64_t offset, uint64_t count) const {
|
||||||
|
//TODO Quite inefficient to call size() here, because that has to traverse the tree
|
||||||
|
const uint64_t _size = _numBytes();
|
||||||
|
const uint64_t realCount = std::max(INT64_C(0), std::min(static_cast<int64_t>(count), static_cast<int64_t>(_size)-static_cast<int64_t>(offset)));
|
||||||
|
_doReadBytes(target, offset, realCount);
|
||||||
|
return realCount;
|
||||||
|
}
|
||||||
|
|
||||||
|
void DataTree::_doReadBytes(void *target, uint64_t offset, uint64_t count) const {
|
||||||
|
auto onExistingLeaf = [target, offset, count] (uint64_t indexOfFirstLeafByte, LeafHandle leaf, uint32_t leafDataOffset, uint32_t leafDataSize) {
|
||||||
|
ASSERT(indexOfFirstLeafByte+leafDataOffset>=offset && indexOfFirstLeafByte-offset+leafDataOffset <= count && indexOfFirstLeafByte-offset+leafDataOffset+leafDataSize <= count, "Writing to target out of bounds");
|
||||||
|
//TODO Simplify formula, make it easier to understand
|
||||||
|
leaf.node()->read(static_cast<uint8_t*>(target) + indexOfFirstLeafByte - offset + leafDataOffset, leafDataOffset, leafDataSize);
|
||||||
|
};
|
||||||
|
auto onCreateLeaf = [] (uint64_t /*beginByte*/, uint32_t /*count*/) -> Data {
|
||||||
|
ASSERT(false, "Reading shouldn't create new leaves.");
|
||||||
|
};
|
||||||
|
|
||||||
|
_traverseLeavesByByteIndices(offset, count, true, onExistingLeaf, onCreateLeaf);
|
||||||
|
}
|
||||||
|
|
||||||
|
void DataTree::writeBytes(const void *source, uint64_t offset, uint64_t count) {
|
||||||
|
unique_lock<shared_mutex> lock(_treeStructureMutex);
|
||||||
|
|
||||||
|
auto onExistingLeaf = [source, offset, count] (uint64_t indexOfFirstLeafByte, LeafHandle leaf, uint32_t leafDataOffset, uint32_t leafDataSize) {
|
||||||
|
ASSERT(indexOfFirstLeafByte+leafDataOffset>=offset && indexOfFirstLeafByte-offset+leafDataOffset <= count && indexOfFirstLeafByte-offset+leafDataOffset+leafDataSize <= count, "Reading from source out of bounds");
|
||||||
|
if (leafDataOffset == 0 && leafDataSize == leaf.nodeStore()->layout().maxBytesPerLeaf()) {
|
||||||
|
Data leafData(leafDataSize);
|
||||||
|
std::memcpy(leafData.data(), static_cast<const uint8_t*>(source) + indexOfFirstLeafByte - offset, leafDataSize);
|
||||||
|
leaf.nodeStore()->overwriteLeaf(leaf.blockId(), std::move(leafData));
|
||||||
|
} else {
|
||||||
|
//TODO Simplify formula, make it easier to understand
|
||||||
|
leaf.node()->write(static_cast<const uint8_t*>(source) + indexOfFirstLeafByte - offset + leafDataOffset, leafDataOffset,
|
||||||
|
leafDataSize);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
auto onCreateLeaf = [source, offset, count] (uint64_t beginByte, uint32_t numBytes) -> Data {
|
||||||
|
ASSERT(beginByte >= offset && beginByte-offset <= count && beginByte-offset+numBytes <= count, "Reading from source out of bounds");
|
||||||
|
Data result(numBytes);
|
||||||
|
//TODO Simplify formula, make it easier to understand
|
||||||
|
std::memcpy(result.data(), static_cast<const uint8_t*>(source) + beginByte - offset, numBytes);
|
||||||
|
return result;
|
||||||
|
};
|
||||||
|
|
||||||
|
_traverseLeavesByByteIndices(offset, count, false, onExistingLeaf, onCreateLeaf);
|
||||||
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -10,6 +10,7 @@
|
|||||||
#include <boost/thread/shared_mutex.hpp>
|
#include <boost/thread/shared_mutex.hpp>
|
||||||
#include <blockstore/utils/BlockId.h>
|
#include <blockstore/utils/BlockId.h>
|
||||||
#include "LeafHandle.h"
|
#include "LeafHandle.h"
|
||||||
|
#include "impl/CachedValue.h"
|
||||||
|
|
||||||
namespace blobstore {
|
namespace blobstore {
|
||||||
namespace onblocks {
|
namespace onblocks {
|
||||||
@ -31,39 +32,57 @@ public:
|
|||||||
//Returning uint64_t, because calculations handling this probably need to be done in 64bit to support >4GB blobs.
|
//Returning uint64_t, because calculations handling this probably need to be done in 64bit to support >4GB blobs.
|
||||||
uint64_t maxBytesPerLeaf() const;
|
uint64_t maxBytesPerLeaf() const;
|
||||||
|
|
||||||
void traverseLeaves(uint32_t beginIndex, uint32_t endIndex, std::function<void (uint32_t index, bool isRightBorderLeaf, LeafHandle leaf)> onExistingLeaf, std::function<cpputils::Data (uint32_t index)> onCreateLeaf);
|
uint64_t tryReadBytes(void *target, uint64_t offset, uint64_t count) const;
|
||||||
|
void readBytes(void *target, uint64_t offset, uint64_t count) const;
|
||||||
|
cpputils::Data readAllBytes() const;
|
||||||
|
|
||||||
|
void writeBytes(const void *source, uint64_t offset, uint64_t count);
|
||||||
|
|
||||||
void resizeNumBytes(uint64_t newNumBytes);
|
void resizeNumBytes(uint64_t newNumBytes);
|
||||||
|
|
||||||
uint32_t numLeaves() const;
|
uint32_t numLeaves() const;
|
||||||
uint64_t numStoredBytes() const;
|
uint64_t numBytes() const;
|
||||||
|
|
||||||
uint8_t depth() const;
|
uint8_t depth() const;
|
||||||
|
|
||||||
// only used by test cases
|
// only used by test cases
|
||||||
uint32_t _forceComputeNumLeaves() const;
|
uint32_t forceComputeNumLeaves() const;
|
||||||
|
|
||||||
void flush() const;
|
void flush() const;
|
||||||
|
|
||||||
private:
|
private:
|
||||||
mutable boost::shared_mutex _mutex;
|
// This mutex must protect the tree structure, i.e. which nodes exist and how they're connected.
|
||||||
|
// Also protects total number of bytes (i.e. number of leaves + size of last leaf).
|
||||||
|
// It also protects the data in leaf nodes, because writing bytes might grow the blob and change the structure.
|
||||||
|
mutable boost::shared_mutex _treeStructureMutex;
|
||||||
|
|
||||||
datanodestore::DataNodeStore *_nodeStore;
|
datanodestore::DataNodeStore *_nodeStore;
|
||||||
cpputils::unique_ref<datanodestore::DataNode> _rootNode;
|
cpputils::unique_ref<datanodestore::DataNode> _rootNode;
|
||||||
blockstore::BlockId _blockId; // BlockId is stored in a member variable, since _rootNode is nullptr while traversing, but we still want to be able to return the blockId.
|
blockstore::BlockId _blockId; // BlockId is stored in a member variable, since _rootNode is nullptr while traversing, but we still want to be able to return the blockId.
|
||||||
mutable boost::optional<uint32_t> _numLeavesCache;
|
|
||||||
|
struct SizeCache final {
|
||||||
|
uint32_t numLeaves;
|
||||||
|
uint64_t numBytes;
|
||||||
|
};
|
||||||
|
mutable CachedValue<SizeCache> _sizeCache;
|
||||||
|
|
||||||
cpputils::unique_ref<datanodestore::DataNode> releaseRootNode();
|
cpputils::unique_ref<datanodestore::DataNode> releaseRootNode();
|
||||||
friend class DataTreeStore;
|
friend class DataTreeStore;
|
||||||
|
|
||||||
//TODO Use underscore for private methods
|
void _traverseLeavesByLeafIndices(uint32_t beginIndex, uint32_t endIndex, bool readOnlyTraversal,
|
||||||
void _traverseLeaves(uint32_t beginIndex, uint32_t endIndex,
|
|
||||||
std::function<void (uint32_t index, bool isRightBorderLeaf, LeafHandle leaf)> onExistingLeaf,
|
std::function<void (uint32_t index, bool isRightBorderLeaf, LeafHandle leaf)> onExistingLeaf,
|
||||||
std::function<cpputils::Data (uint32_t index)> onCreateLeaf,
|
std::function<cpputils::Data (uint32_t index)> onCreateLeaf,
|
||||||
std::function<void (datanodestore::DataInnerNode *node)> onBacktrackFromSubtree);
|
std::function<void (datanodestore::DataInnerNode *node)> onBacktrackFromSubtree) const;
|
||||||
uint32_t leavesPerFullChild(const datanodestore::DataInnerNode &root) const;
|
void _traverseLeavesByByteIndices(uint64_t beginByte, uint64_t sizeBytes, bool readOnlyTraversal, std::function<void (uint64_t leafOffset, LeafHandle leaf, uint32_t begin, uint32_t count)> onExistingLeaf, std::function<cpputils::Data (uint64_t beginByte, uint32_t count)> onCreateLeaf) const;
|
||||||
uint64_t _numStoredBytes() const;
|
|
||||||
uint64_t _numStoredBytes(const datanodestore::DataNode &root) const;
|
uint32_t _leavesPerFullChild(const datanodestore::DataInnerNode &root) const;
|
||||||
uint32_t _numLeaves() const;
|
|
||||||
uint32_t _computeNumLeaves(const datanodestore::DataNode &node) const;
|
SizeCache _getOrComputeSizeCache() const;
|
||||||
|
SizeCache _computeSizeCache(const datanodestore::DataNode &node) const;
|
||||||
|
|
||||||
|
uint64_t _tryReadBytes(void *target, uint64_t offset, uint64_t count) const;
|
||||||
|
void _doReadBytes(void *target, uint64_t offset, uint64_t count) const;
|
||||||
|
uint64_t _numBytes() const;
|
||||||
|
|
||||||
DISALLOW_COPY_AND_ASSIGN(DataTree);
|
DISALLOW_COPY_AND_ASSIGN(DataTree);
|
||||||
};
|
};
|
||||||
|
@ -0,0 +1,2 @@
|
|||||||
|
#include "CachedValue.h"
|
||||||
|
|
@ -0,0 +1,46 @@
|
|||||||
|
#pragma once
|
||||||
|
#ifndef MESSMER_BLOBSTORE_IMPLEMENTATIONS_ONBLOCKS_IMPL_CACHEDVALUE_H_
|
||||||
|
#define MESSMER_BLOBSTORE_IMPLEMENTATIONS_ONBLOCKS_IMPL_CACHEDVALUE_H_
|
||||||
|
|
||||||
|
#include <boost/optional.hpp>
|
||||||
|
#include <boost/thread/shared_mutex.hpp>
|
||||||
|
#include <functional>
|
||||||
|
|
||||||
|
namespace blobstore {
|
||||||
|
namespace onblocks {
|
||||||
|
|
||||||
|
// TODO Test
|
||||||
|
template<class T>
|
||||||
|
class CachedValue final {
|
||||||
|
public:
|
||||||
|
CachedValue() :_cache(boost::none), _mutex() {}
|
||||||
|
|
||||||
|
T getOrCompute(std::function<T ()> compute) {
|
||||||
|
boost::upgrade_lock<boost::shared_mutex> readLock(_mutex);
|
||||||
|
if (_cache == boost::none) {
|
||||||
|
boost::upgrade_to_unique_lock<boost::shared_mutex> writeLock(readLock);
|
||||||
|
_cache = compute();
|
||||||
|
}
|
||||||
|
return *_cache;
|
||||||
|
}
|
||||||
|
|
||||||
|
void update(std::function<void (boost::optional<T>*)> func) {
|
||||||
|
boost::unique_lock<boost::shared_mutex> writeLock(_mutex);
|
||||||
|
func(&_cache);
|
||||||
|
}
|
||||||
|
|
||||||
|
void clear() {
|
||||||
|
update([] (boost::optional<T>* cache) {
|
||||||
|
*cache = boost::none;
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
private:
|
||||||
|
boost::optional<T> _cache;
|
||||||
|
boost::shared_mutex _mutex;
|
||||||
|
};
|
||||||
|
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#endif
|
@ -20,8 +20,8 @@ namespace blobstore {
|
|||||||
namespace onblocks {
|
namespace onblocks {
|
||||||
namespace datatreestore {
|
namespace datatreestore {
|
||||||
|
|
||||||
LeafTraverser::LeafTraverser(DataNodeStore *nodeStore)
|
LeafTraverser::LeafTraverser(DataNodeStore *nodeStore, bool readOnlyTraversal)
|
||||||
: _nodeStore(nodeStore) {
|
: _nodeStore(nodeStore), _readOnlyTraversal(readOnlyTraversal) {
|
||||||
}
|
}
|
||||||
|
|
||||||
void LeafTraverser::traverseAndUpdateRoot(unique_ref<DataNode>* root, uint32_t beginIndex, uint32_t endIndex, function<void (uint32_t index, bool isRightBorderLeaf, LeafHandle leaf)> onExistingLeaf, function<Data (uint32_t index)> onCreateLeaf, function<void (DataInnerNode *node)> onBacktrackFromSubtree) {
|
void LeafTraverser::traverseAndUpdateRoot(unique_ref<DataNode>* root, uint32_t beginIndex, uint32_t endIndex, function<void (uint32_t index, bool isRightBorderLeaf, LeafHandle leaf)> onExistingLeaf, function<Data (uint32_t index)> onCreateLeaf, function<void (DataInnerNode *node)> onBacktrackFromSubtree) {
|
||||||
@ -38,6 +38,7 @@ namespace blobstore {
|
|||||||
|
|
||||||
uint32_t maxLeavesForDepth = _maxLeavesForTreeDepth((*root)->depth());
|
uint32_t maxLeavesForDepth = _maxLeavesForTreeDepth((*root)->depth());
|
||||||
bool increaseTreeDepth = endIndex > maxLeavesForDepth;
|
bool increaseTreeDepth = endIndex > maxLeavesForDepth;
|
||||||
|
ASSERT(!_readOnlyTraversal || !increaseTreeDepth, "Tried to grow a tree on a read only traversal");
|
||||||
|
|
||||||
if ((*root)->depth() == 0) {
|
if ((*root)->depth() == 0) {
|
||||||
DataLeafNode *leaf = dynamic_cast<DataLeafNode*>(root->get());
|
DataLeafNode *leaf = dynamic_cast<DataLeafNode*>(root->get());
|
||||||
@ -63,6 +64,8 @@ namespace blobstore {
|
|||||||
// We don't increase to the full needed tree depth in one step, because we want the traversal to go as far as possible
|
// We don't increase to the full needed tree depth in one step, because we want the traversal to go as far as possible
|
||||||
// and only then increase the depth - this causes the tree to be in consistent shape (balanced) for longer.
|
// and only then increase the depth - this causes the tree to be in consistent shape (balanced) for longer.
|
||||||
if (increaseTreeDepth) {
|
if (increaseTreeDepth) {
|
||||||
|
ASSERT(!_readOnlyTraversal, "Can't increase tree depth in a read-only traversal");
|
||||||
|
|
||||||
// TODO Test cases that increase tree depth by 0, 1, 2, ... levels
|
// TODO Test cases that increase tree depth by 0, 1, 2, ... levels
|
||||||
*root = _increaseTreeDepth(std::move(*root));
|
*root = _increaseTreeDepth(std::move(*root));
|
||||||
_traverseAndUpdateRoot(root, std::max(beginIndex, maxLeavesForDepth), endIndex, false, onExistingLeaf, onCreateLeaf, onBacktrackFromSubtree);
|
_traverseAndUpdateRoot(root, std::max(beginIndex, maxLeavesForDepth), endIndex, false, onExistingLeaf, onCreateLeaf, onBacktrackFromSubtree);
|
||||||
@ -74,6 +77,8 @@ namespace blobstore {
|
|||||||
}
|
}
|
||||||
|
|
||||||
unique_ref<DataInnerNode> LeafTraverser::_increaseTreeDepth(unique_ref<DataNode> root) {
|
unique_ref<DataInnerNode> LeafTraverser::_increaseTreeDepth(unique_ref<DataNode> root) {
|
||||||
|
ASSERT(!_readOnlyTraversal, "Can't increase tree depth in a read-only traversal");
|
||||||
|
|
||||||
auto copyOfOldRoot = _nodeStore->createNewNodeAsCopyFrom(*root);
|
auto copyOfOldRoot = _nodeStore->createNewNodeAsCopyFrom(*root);
|
||||||
return DataNode::convertToNewInnerNode(std::move(root), _nodeStore->layout(), *copyOfOldRoot);
|
return DataNode::convertToNewInnerNode(std::move(root), _nodeStore->layout(), *copyOfOldRoot);
|
||||||
}
|
}
|
||||||
@ -85,6 +90,7 @@ namespace blobstore {
|
|||||||
LeafHandle leafHandle(_nodeStore, blockId);
|
LeafHandle leafHandle(_nodeStore, blockId);
|
||||||
if (growLastLeaf) {
|
if (growLastLeaf) {
|
||||||
if (leafHandle.node()->numBytes() != _nodeStore->layout().maxBytesPerLeaf()) {
|
if (leafHandle.node()->numBytes() != _nodeStore->layout().maxBytesPerLeaf()) {
|
||||||
|
ASSERT(!_readOnlyTraversal, "Can't grow the last leaf in a read-only traversal");
|
||||||
leafHandle.node()->resize(_nodeStore->layout().maxBytesPerLeaf());
|
leafHandle.node()->resize(_nodeStore->layout().maxBytesPerLeaf());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -116,6 +122,7 @@ namespace blobstore {
|
|||||||
ASSERT(endChild <= _nodeStore->layout().maxChildrenPerInnerNode(), "Traversal region would need increasing the tree depth. This should have happened before calling this function.");
|
ASSERT(endChild <= _nodeStore->layout().maxChildrenPerInnerNode(), "Traversal region would need increasing the tree depth. This should have happened before calling this function.");
|
||||||
uint32_t numChildren = root->numChildren();
|
uint32_t numChildren = root->numChildren();
|
||||||
ASSERT(!growLastLeaf || endChild >= numChildren, "Can only grow last leaf if it exists");
|
ASSERT(!growLastLeaf || endChild >= numChildren, "Can only grow last leaf if it exists");
|
||||||
|
ASSERT(!_readOnlyTraversal || endChild <= numChildren, "Can only traverse out of bounds in a read-only traversal");
|
||||||
bool shouldGrowLastExistingLeaf = growLastLeaf || endChild > numChildren;
|
bool shouldGrowLastExistingLeaf = growLastLeaf || endChild > numChildren;
|
||||||
|
|
||||||
// If we traverse outside of the valid region (i.e. usually would only traverse to new leaves and not to the last leaf),
|
// If we traverse outside of the valid region (i.e. usually would only traverse to new leaves and not to the last leaf),
|
||||||
@ -146,6 +153,8 @@ namespace blobstore {
|
|||||||
|
|
||||||
// Traverse new children (including gap children, i.e. children that are created but not traversed because they're to the right of the current size, but to the left of the traversal region)
|
// Traverse new children (including gap children, i.e. children that are created but not traversed because they're to the right of the current size, but to the left of the traversal region)
|
||||||
for (uint32_t childIndex = numChildren; childIndex < endChild; ++childIndex) {
|
for (uint32_t childIndex = numChildren; childIndex < endChild; ++childIndex) {
|
||||||
|
ASSERT(!_readOnlyTraversal, "Can't create new children in a read-only traversal");
|
||||||
|
|
||||||
uint32_t childOffset = childIndex * leavesPerChild;
|
uint32_t childOffset = childIndex * leavesPerChild;
|
||||||
uint32_t localBeginIndex = std::min(leavesPerChild, utils::maxZeroSubtraction(beginIndex, childOffset));
|
uint32_t localBeginIndex = std::min(leavesPerChild, utils::maxZeroSubtraction(beginIndex, childOffset));
|
||||||
uint32_t localEndIndex = std::min(leavesPerChild, endIndex - childOffset);
|
uint32_t localEndIndex = std::min(leavesPerChild, endIndex - childOffset);
|
||||||
@ -161,6 +170,8 @@ namespace blobstore {
|
|||||||
}
|
}
|
||||||
|
|
||||||
unique_ref<DataNode> LeafTraverser::_createNewSubtree(uint32_t beginIndex, uint32_t endIndex, uint32_t leafOffset, uint8_t depth, function<Data (uint32_t index)> onCreateLeaf, function<void (DataInnerNode *node)> onBacktrackFromSubtree) {
|
unique_ref<DataNode> LeafTraverser::_createNewSubtree(uint32_t beginIndex, uint32_t endIndex, uint32_t leafOffset, uint8_t depth, function<Data (uint32_t index)> onCreateLeaf, function<void (DataInnerNode *node)> onBacktrackFromSubtree) {
|
||||||
|
ASSERT(!_readOnlyTraversal, "Can't create a new subtree in a read-only traversal");
|
||||||
|
|
||||||
ASSERT(beginIndex <= endIndex, "Invalid parameters");
|
ASSERT(beginIndex <= endIndex, "Invalid parameters");
|
||||||
if (0 == depth) {
|
if (0 == depth) {
|
||||||
ASSERT(beginIndex <= 1 && endIndex == 1, "With depth 0, we can only traverse one or zero leaves (i.e. traverse one leaf or traverse a gap leaf).");
|
ASSERT(beginIndex <= 1 && endIndex == 1, "With depth 0, we can only traverse one or zero leaves (i.e. traverse one leaf or traverse a gap leaf).");
|
||||||
@ -212,6 +223,8 @@ namespace blobstore {
|
|||||||
}
|
}
|
||||||
|
|
||||||
function<Data (uint32_t index)> LeafTraverser::_createMaxSizeLeaf() const {
|
function<Data (uint32_t index)> LeafTraverser::_createMaxSizeLeaf() const {
|
||||||
|
ASSERT(!_readOnlyTraversal, "Can't create a new leaf in a read-only traversal");
|
||||||
|
|
||||||
uint64_t maxBytesPerLeaf = _nodeStore->layout().maxBytesPerLeaf();
|
uint64_t maxBytesPerLeaf = _nodeStore->layout().maxBytesPerLeaf();
|
||||||
return [maxBytesPerLeaf] (uint32_t /*index*/) -> Data {
|
return [maxBytesPerLeaf] (uint32_t /*index*/) -> Data {
|
||||||
return Data(maxBytesPerLeaf).FillWithZeroes();
|
return Data(maxBytesPerLeaf).FillWithZeroes();
|
||||||
@ -221,6 +234,8 @@ namespace blobstore {
|
|||||||
void LeafTraverser::_whileRootHasOnlyOneChildReplaceRootWithItsChild(unique_ref<DataNode>* root) {
|
void LeafTraverser::_whileRootHasOnlyOneChildReplaceRootWithItsChild(unique_ref<DataNode>* root) {
|
||||||
DataInnerNode *inner = dynamic_cast<DataInnerNode*>(root->get());
|
DataInnerNode *inner = dynamic_cast<DataInnerNode*>(root->get());
|
||||||
if (inner != nullptr && inner->numChildren() == 1) {
|
if (inner != nullptr && inner->numChildren() == 1) {
|
||||||
|
ASSERT(!_readOnlyTraversal, "Can't decrease tree depth in a read-only traversal");
|
||||||
|
|
||||||
auto newRoot = _whileRootHasOnlyOneChildRemoveRootReturnChild(inner->readChild(0).blockId());
|
auto newRoot = _whileRootHasOnlyOneChildRemoveRootReturnChild(inner->readChild(0).blockId());
|
||||||
*root = _nodeStore->overwriteNodeWith(std::move(*root), *newRoot);
|
*root = _nodeStore->overwriteNodeWith(std::move(*root), *newRoot);
|
||||||
_nodeStore->remove(std::move(newRoot));
|
_nodeStore->remove(std::move(newRoot));
|
||||||
@ -228,6 +243,8 @@ namespace blobstore {
|
|||||||
}
|
}
|
||||||
|
|
||||||
unique_ref<DataNode> LeafTraverser::_whileRootHasOnlyOneChildRemoveRootReturnChild(const blockstore::BlockId &blockId) {
|
unique_ref<DataNode> LeafTraverser::_whileRootHasOnlyOneChildRemoveRootReturnChild(const blockstore::BlockId &blockId) {
|
||||||
|
ASSERT(!_readOnlyTraversal, "Can't decrease tree depth in a read-only traversal");
|
||||||
|
|
||||||
auto current = _nodeStore->load(blockId);
|
auto current = _nodeStore->load(blockId);
|
||||||
ASSERT(current != none, "Node not found");
|
ASSERT(current != none, "Node not found");
|
||||||
auto inner = dynamic_pointer_move<DataInnerNode>(*current);
|
auto inner = dynamic_pointer_move<DataInnerNode>(*current);
|
||||||
|
@ -25,7 +25,7 @@ namespace blobstore {
|
|||||||
*/
|
*/
|
||||||
class LeafTraverser final {
|
class LeafTraverser final {
|
||||||
public:
|
public:
|
||||||
LeafTraverser(datanodestore::DataNodeStore *nodeStore);
|
LeafTraverser(datanodestore::DataNodeStore *nodeStore, bool readOnlyTraversal);
|
||||||
|
|
||||||
void traverseAndUpdateRoot(
|
void traverseAndUpdateRoot(
|
||||||
cpputils::unique_ref<datanodestore::DataNode>* root, uint32_t beginIndex, uint32_t endIndex,
|
cpputils::unique_ref<datanodestore::DataNode>* root, uint32_t beginIndex, uint32_t endIndex,
|
||||||
@ -35,6 +35,7 @@ namespace blobstore {
|
|||||||
|
|
||||||
private:
|
private:
|
||||||
datanodestore::DataNodeStore *_nodeStore;
|
datanodestore::DataNodeStore *_nodeStore;
|
||||||
|
const bool _readOnlyTraversal;
|
||||||
|
|
||||||
void _traverseAndUpdateRoot(
|
void _traverseAndUpdateRoot(
|
||||||
cpputils::unique_ref<datanodestore::DataNode>* root, uint32_t beginIndex, uint32_t endIndex, bool isLeftBorderOfTraversal,
|
cpputils::unique_ref<datanodestore::DataNode>* root, uint32_t beginIndex, uint32_t endIndex, bool isLeftBorderOfTraversal,
|
||||||
|
@ -22,10 +22,6 @@ public:
|
|||||||
return _baseTree->maxBytesPerLeaf();
|
return _baseTree->maxBytesPerLeaf();
|
||||||
}
|
}
|
||||||
|
|
||||||
void traverseLeaves(uint32_t beginIndex, uint32_t endIndex, std::function<void (uint32_t index, bool isRightBorderLeaf, datatreestore::LeafHandle leaf)> onExistingLeaf, std::function<cpputils::Data (uint32_t index)> onCreateLeaf) {
|
|
||||||
return _baseTree->traverseLeaves(beginIndex, endIndex, onExistingLeaf, onCreateLeaf);
|
|
||||||
}
|
|
||||||
|
|
||||||
uint32_t numLeaves() const {
|
uint32_t numLeaves() const {
|
||||||
return _baseTree->numLeaves();
|
return _baseTree->numLeaves();
|
||||||
}
|
}
|
||||||
@ -34,8 +30,24 @@ public:
|
|||||||
return _baseTree->resizeNumBytes(newNumBytes);
|
return _baseTree->resizeNumBytes(newNumBytes);
|
||||||
}
|
}
|
||||||
|
|
||||||
uint64_t numStoredBytes() const {
|
uint64_t numBytes() const {
|
||||||
return _baseTree->numStoredBytes();
|
return _baseTree->numBytes();
|
||||||
|
}
|
||||||
|
|
||||||
|
uint64_t tryReadBytes(void *target, uint64_t offset, uint64_t count) const {
|
||||||
|
return _baseTree->tryReadBytes(target, offset, count);
|
||||||
|
}
|
||||||
|
|
||||||
|
void readBytes(void *target, uint64_t offset, uint64_t count) const {
|
||||||
|
return _baseTree->readBytes(target, offset, count);
|
||||||
|
}
|
||||||
|
|
||||||
|
cpputils::Data readAllBytes() const {
|
||||||
|
return _baseTree->readAllBytes();
|
||||||
|
}
|
||||||
|
|
||||||
|
void writeBytes(const void *source, uint64_t offset, uint64_t count) {
|
||||||
|
return _baseTree->writeBytes(source, offset, count);
|
||||||
}
|
}
|
||||||
|
|
||||||
void flush() {
|
void flush() {
|
||||||
|
@ -19,7 +19,7 @@ set(SOURCES
|
|||||||
implementations/onblocks/datatreestore/DataTreeTest_NumStoredBytes.cpp
|
implementations/onblocks/datatreestore/DataTreeTest_NumStoredBytes.cpp
|
||||||
implementations/onblocks/datatreestore/DataTreeTest_ResizeNumBytes.cpp
|
implementations/onblocks/datatreestore/DataTreeTest_ResizeNumBytes.cpp
|
||||||
implementations/onblocks/datatreestore/DataTreeStoreTest.cpp
|
implementations/onblocks/datatreestore/DataTreeStoreTest.cpp
|
||||||
implementations/onblocks/datatreestore/DataTreeTest_TraverseLeaves.cpp
|
implementations/onblocks/datatreestore/LeafTraverserTest.cpp
|
||||||
implementations/onblocks/BlobSizeTest.cpp
|
implementations/onblocks/BlobSizeTest.cpp
|
||||||
implementations/onblocks/BlobReadWriteTest.cpp
|
implementations/onblocks/BlobReadWriteTest.cpp
|
||||||
implementations/onblocks/BigBlobsTest.cpp
|
implementations/onblocks/BigBlobsTest.cpp
|
||||||
|
@ -63,6 +63,92 @@ TEST_F(BlobReadWriteTest, WritingCloseTo16ByteLimitDoesntDestroySize) {
|
|||||||
EXPECT_EQ(32780u, blob->size());
|
EXPECT_EQ(32780u, blob->size());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
TEST_F(BlobReadWriteTest, givenEmptyBlob_whenTryReadInFirstLeaf_thenFails) {
|
||||||
|
Data data(5);
|
||||||
|
size_t read = blob->tryRead(data.data(), 3, 5);
|
||||||
|
EXPECT_EQ(0, read);
|
||||||
|
}
|
||||||
|
|
||||||
|
TEST_F(BlobReadWriteTest, givenEmptyBlob_whenTryReadInLaterLeaf_thenFails) {
|
||||||
|
Data data(5);
|
||||||
|
size_t read = blob->tryRead(data.data(), 2*LAYOUT.maxBytesPerLeaf(), 5);
|
||||||
|
EXPECT_EQ(0, read);
|
||||||
|
}
|
||||||
|
|
||||||
|
TEST_F(BlobReadWriteTest, givenEmptyBlob_whenReadInFirstLeaf_thenFails) {
|
||||||
|
Data data(5);
|
||||||
|
EXPECT_ANY_THROW(
|
||||||
|
blob->read(data.data(), 3, 5)
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
TEST_F(BlobReadWriteTest, givenEmptyBlob_whenReadInLaterLeaf_thenFails) {
|
||||||
|
Data data(5);
|
||||||
|
EXPECT_ANY_THROW(
|
||||||
|
blob->read(data.data(), 2*LAYOUT.maxBytesPerLeaf(), 5)
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
TEST_F(BlobReadWriteTest, givenEmptyBlob_whenReadAll_thenReturnsZeroSizedData) {
|
||||||
|
Data data = blob->readAll();
|
||||||
|
EXPECT_EQ(0, data.size());
|
||||||
|
}
|
||||||
|
|
||||||
|
TEST_F(BlobReadWriteTest, givenEmptyBlob_whenWrite_thenGrows) {
|
||||||
|
Data data(5);
|
||||||
|
blob->write(data.data(), 4, 5);
|
||||||
|
EXPECT_EQ(9, blob->size());
|
||||||
|
}
|
||||||
|
|
||||||
|
TEST_F(BlobReadWriteTest, givenEmptyBlob_whenWriteZeroBytes_thenDoesntGrow) {
|
||||||
|
Data data(5);
|
||||||
|
blob->write(data.data(), 4, 0);
|
||||||
|
EXPECT_EQ(0, blob->size());;
|
||||||
|
}
|
||||||
|
|
||||||
|
TEST_F(BlobReadWriteTest, givenBlobResizedToZero_whenTryReadInFirstLeaf_thenFails) {
|
||||||
|
Data data(5);
|
||||||
|
size_t read = blob->tryRead(data.data(), 3, 5);
|
||||||
|
EXPECT_EQ(0, read);
|
||||||
|
}
|
||||||
|
|
||||||
|
TEST_F(BlobReadWriteTest, givenBlobResizedToZero_whenTryReadInLaterLeaf_thenFails) {
|
||||||
|
Data data(5);
|
||||||
|
size_t read = blob->tryRead(data.data(), 2*LAYOUT.maxBytesPerLeaf(), 5);
|
||||||
|
EXPECT_EQ(0, read);
|
||||||
|
}
|
||||||
|
|
||||||
|
TEST_F(BlobReadWriteTest, givenBlobResizedToZero_whenReadInFirstLeaf_thenFails) {
|
||||||
|
Data data(5);
|
||||||
|
EXPECT_ANY_THROW(
|
||||||
|
blob->read(data.data(), 3, 5)
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
TEST_F(BlobReadWriteTest, givenBlobResizedToZero_whenReadInLaterLeaf_thenFails) {
|
||||||
|
Data data(5);
|
||||||
|
EXPECT_ANY_THROW(
|
||||||
|
blob->read(data.data(), 2*LAYOUT.maxBytesPerLeaf(), 5)
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
TEST_F(BlobReadWriteTest, givenBlobResizedToZero_whenReadAll_thenReturnsZeroSizedData) {
|
||||||
|
Data data = blob->readAll();
|
||||||
|
EXPECT_EQ(0, data.size());
|
||||||
|
}
|
||||||
|
|
||||||
|
TEST_F(BlobReadWriteTest, givenBlobResizedToZero_whenWrite_thenGrows) {
|
||||||
|
Data data(5);
|
||||||
|
blob->write(data.data(), 4, 5);
|
||||||
|
EXPECT_EQ(9, blob->size());
|
||||||
|
}
|
||||||
|
|
||||||
|
TEST_F(BlobReadWriteTest, givenBlobResizedToZero_whenWriteZeroBytes_thenDoesntGrow) {
|
||||||
|
Data data(5);
|
||||||
|
blob->write(data.data(), 4, 0);
|
||||||
|
EXPECT_EQ(0, blob->size());
|
||||||
|
}
|
||||||
|
|
||||||
struct DataRange {
|
struct DataRange {
|
||||||
uint64_t blobsize;
|
uint64_t blobsize;
|
||||||
uint64_t offset;
|
uint64_t offset;
|
||||||
|
@ -13,7 +13,7 @@ public:
|
|||||||
|
|
||||||
TEST_F(DataTreeTest_NumStoredBytes, CreatedTreeIsEmpty) {
|
TEST_F(DataTreeTest_NumStoredBytes, CreatedTreeIsEmpty) {
|
||||||
auto tree = treeStore.createNewTree();
|
auto tree = treeStore.createNewTree();
|
||||||
EXPECT_EQ(0u, tree->numStoredBytes());
|
EXPECT_EQ(0u, tree->numBytes());
|
||||||
}
|
}
|
||||||
|
|
||||||
class DataTreeTest_NumStoredBytes_P: public DataTreeTest_NumStoredBytes, public WithParamInterface<uint32_t> {};
|
class DataTreeTest_NumStoredBytes_P: public DataTreeTest_NumStoredBytes, public WithParamInterface<uint32_t> {};
|
||||||
@ -24,47 +24,47 @@ INSTANTIATE_TEST_CASE_P(FullLastLeaf, DataTreeTest_NumStoredBytes_P, Values(stat
|
|||||||
TEST_P(DataTreeTest_NumStoredBytes_P, SingleLeaf) {
|
TEST_P(DataTreeTest_NumStoredBytes_P, SingleLeaf) {
|
||||||
BlockId blockId = CreateLeafWithSize(GetParam())->blockId();
|
BlockId blockId = CreateLeafWithSize(GetParam())->blockId();
|
||||||
auto tree = treeStore.load(blockId).value();
|
auto tree = treeStore.load(blockId).value();
|
||||||
EXPECT_EQ(GetParam(), tree->numStoredBytes());
|
EXPECT_EQ(GetParam(), tree->numBytes());
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_P(DataTreeTest_NumStoredBytes_P, TwoLeafTree) {
|
TEST_P(DataTreeTest_NumStoredBytes_P, TwoLeafTree) {
|
||||||
BlockId blockId = CreateTwoLeafWithSecondLeafSize(GetParam())->blockId();
|
BlockId blockId = CreateTwoLeafWithSecondLeafSize(GetParam())->blockId();
|
||||||
auto tree = treeStore.load(blockId).value();
|
auto tree = treeStore.load(blockId).value();
|
||||||
EXPECT_EQ(nodeStore->layout().maxBytesPerLeaf() + GetParam(), tree->numStoredBytes());
|
EXPECT_EQ(nodeStore->layout().maxBytesPerLeaf() + GetParam(), tree->numBytes());
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_P(DataTreeTest_NumStoredBytes_P, FullTwolevelTree) {
|
TEST_P(DataTreeTest_NumStoredBytes_P, FullTwolevelTree) {
|
||||||
BlockId blockId = CreateFullTwoLevelWithLastLeafSize(GetParam())->blockId();
|
BlockId blockId = CreateFullTwoLevelWithLastLeafSize(GetParam())->blockId();
|
||||||
auto tree = treeStore.load(blockId).value();
|
auto tree = treeStore.load(blockId).value();
|
||||||
EXPECT_EQ(nodeStore->layout().maxBytesPerLeaf()*(nodeStore->layout().maxChildrenPerInnerNode()-1) + GetParam(), tree->numStoredBytes());
|
EXPECT_EQ(nodeStore->layout().maxBytesPerLeaf()*(nodeStore->layout().maxChildrenPerInnerNode()-1) + GetParam(), tree->numBytes());
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_P(DataTreeTest_NumStoredBytes_P, ThreeLevelTreeWithOneChild) {
|
TEST_P(DataTreeTest_NumStoredBytes_P, ThreeLevelTreeWithOneChild) {
|
||||||
BlockId blockId = CreateThreeLevelWithOneChildAndLastLeafSize(GetParam())->blockId();
|
BlockId blockId = CreateThreeLevelWithOneChildAndLastLeafSize(GetParam())->blockId();
|
||||||
auto tree = treeStore.load(blockId).value();
|
auto tree = treeStore.load(blockId).value();
|
||||||
EXPECT_EQ(nodeStore->layout().maxBytesPerLeaf() + GetParam(), tree->numStoredBytes());
|
EXPECT_EQ(nodeStore->layout().maxBytesPerLeaf() + GetParam(), tree->numBytes());
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_P(DataTreeTest_NumStoredBytes_P, ThreeLevelTreeWithTwoChildren) {
|
TEST_P(DataTreeTest_NumStoredBytes_P, ThreeLevelTreeWithTwoChildren) {
|
||||||
BlockId blockId = CreateThreeLevelWithTwoChildrenAndLastLeafSize(GetParam())->blockId();
|
BlockId blockId = CreateThreeLevelWithTwoChildrenAndLastLeafSize(GetParam())->blockId();
|
||||||
auto tree = treeStore.load(blockId).value();
|
auto tree = treeStore.load(blockId).value();
|
||||||
EXPECT_EQ(nodeStore->layout().maxBytesPerLeaf()*nodeStore->layout().maxChildrenPerInnerNode() + nodeStore->layout().maxBytesPerLeaf() + GetParam(), tree->numStoredBytes());
|
EXPECT_EQ(nodeStore->layout().maxBytesPerLeaf()*nodeStore->layout().maxChildrenPerInnerNode() + nodeStore->layout().maxBytesPerLeaf() + GetParam(), tree->numBytes());
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_P(DataTreeTest_NumStoredBytes_P, ThreeLevelTreeWithThreeChildren) {
|
TEST_P(DataTreeTest_NumStoredBytes_P, ThreeLevelTreeWithThreeChildren) {
|
||||||
BlockId blockId = CreateThreeLevelWithThreeChildrenAndLastLeafSize(GetParam())->blockId();
|
BlockId blockId = CreateThreeLevelWithThreeChildrenAndLastLeafSize(GetParam())->blockId();
|
||||||
auto tree = treeStore.load(blockId).value();
|
auto tree = treeStore.load(blockId).value();
|
||||||
EXPECT_EQ(2*nodeStore->layout().maxBytesPerLeaf()*nodeStore->layout().maxChildrenPerInnerNode() + nodeStore->layout().maxBytesPerLeaf() + GetParam(), tree->numStoredBytes());
|
EXPECT_EQ(2*nodeStore->layout().maxBytesPerLeaf()*nodeStore->layout().maxChildrenPerInnerNode() + nodeStore->layout().maxBytesPerLeaf() + GetParam(), tree->numBytes());
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_P(DataTreeTest_NumStoredBytes_P, FullThreeLevelTree) {
|
TEST_P(DataTreeTest_NumStoredBytes_P, FullThreeLevelTree) {
|
||||||
BlockId blockId = CreateFullThreeLevelWithLastLeafSize(GetParam())->blockId();
|
BlockId blockId = CreateFullThreeLevelWithLastLeafSize(GetParam())->blockId();
|
||||||
auto tree = treeStore.load(blockId).value();
|
auto tree = treeStore.load(blockId).value();
|
||||||
EXPECT_EQ(nodeStore->layout().maxBytesPerLeaf()*nodeStore->layout().maxChildrenPerInnerNode()*(nodeStore->layout().maxChildrenPerInnerNode()-1) + nodeStore->layout().maxBytesPerLeaf()*(nodeStore->layout().maxChildrenPerInnerNode()-1) + GetParam(), tree->numStoredBytes());
|
EXPECT_EQ(nodeStore->layout().maxBytesPerLeaf()*nodeStore->layout().maxChildrenPerInnerNode()*(nodeStore->layout().maxChildrenPerInnerNode()-1) + nodeStore->layout().maxBytesPerLeaf()*(nodeStore->layout().maxChildrenPerInnerNode()-1) + GetParam(), tree->numBytes());
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_P(DataTreeTest_NumStoredBytes_P, FourLevelMinDataTree) {
|
TEST_P(DataTreeTest_NumStoredBytes_P, FourLevelMinDataTree) {
|
||||||
BlockId blockId = CreateFourLevelMinDataWithLastLeafSize(GetParam())->blockId();
|
BlockId blockId = CreateFourLevelMinDataWithLastLeafSize(GetParam())->blockId();
|
||||||
auto tree = treeStore.load(blockId).value();
|
auto tree = treeStore.load(blockId).value();
|
||||||
EXPECT_EQ(nodeStore->layout().maxBytesPerLeaf()*nodeStore->layout().maxChildrenPerInnerNode()*nodeStore->layout().maxChildrenPerInnerNode() + GetParam(), tree->numStoredBytes());
|
EXPECT_EQ(nodeStore->layout().maxBytesPerLeaf()*nodeStore->layout().maxChildrenPerInnerNode()*nodeStore->layout().maxChildrenPerInnerNode() + GetParam(), tree->numBytes());
|
||||||
}
|
}
|
||||||
|
@ -3,14 +3,24 @@
|
|||||||
#include <gmock/gmock.h>
|
#include <gmock/gmock.h>
|
||||||
|
|
||||||
using blobstore::onblocks::datatreestore::DataTree;
|
using blobstore::onblocks::datatreestore::DataTree;
|
||||||
using blobstore::onblocks::datatreestore::LeafHandle;
|
|
||||||
using blockstore::BlockId;
|
using blockstore::BlockId;
|
||||||
using cpputils::Data;
|
using cpputils::Data;
|
||||||
|
|
||||||
class DataTreeTest_Performance: public DataTreeTest {
|
class DataTreeTest_Performance: public DataTreeTest {
|
||||||
public:
|
public:
|
||||||
void Traverse(DataTree *tree, uint64_t beginIndex, uint64_t endIndex) {
|
void TraverseByWriting(DataTree *tree, uint64_t beginIndex, uint64_t endIndex) {
|
||||||
tree->traverseLeaves(beginIndex, endIndex, [] (uint32_t /*index*/, bool /*isRightBorderNode*/, LeafHandle /*leaf*/) {}, [this] (uint32_t /*index*/) -> Data {return Data(maxChildrenPerInnerNode).FillWithZeroes();});
|
uint64_t offset = beginIndex * maxBytesPerLeaf;
|
||||||
|
uint64_t count = endIndex * maxBytesPerLeaf - offset;
|
||||||
|
Data data(count);
|
||||||
|
data.FillWithZeroes();
|
||||||
|
tree->writeBytes(data.data(), offset, count);
|
||||||
|
}
|
||||||
|
|
||||||
|
void TraverseByReading(DataTree *tree, uint64_t beginIndex, uint64_t endIndex) {
|
||||||
|
uint64_t offset = beginIndex * maxBytesPerLeaf;
|
||||||
|
uint64_t count = endIndex * maxBytesPerLeaf - offset;
|
||||||
|
Data data(count);
|
||||||
|
tree->readBytes(data.data(), offset, count);
|
||||||
}
|
}
|
||||||
|
|
||||||
uint64_t maxChildrenPerInnerNode = nodeStore->layout().maxChildrenPerInnerNode();
|
uint64_t maxChildrenPerInnerNode = nodeStore->layout().maxChildrenPerInnerNode();
|
||||||
@ -71,84 +81,168 @@ TEST_F(DataTreeTest_Performance, DeletingDoesntLoadLeaves_Threelevel_DeleteByKey
|
|||||||
EXPECT_EQ(0u, blockStore->resizedBlocks().size());
|
EXPECT_EQ(0u, blockStore->resizedBlocks().size());
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_F(DataTreeTest_Performance, TraverseLeaves_Twolevel_All) {
|
TEST_F(DataTreeTest_Performance, TraverseLeaves_Twolevel_All_ByWriting) {
|
||||||
auto blockId = CreateFullTwoLevel()->blockId();
|
auto blockId = CreateFullTwoLevel()->blockId();
|
||||||
auto tree = treeStore.load(blockId).value();
|
auto tree = treeStore.load(blockId).value();
|
||||||
blockStore->resetCounters();
|
blockStore->resetCounters();
|
||||||
|
|
||||||
Traverse(tree.get(), 0, maxChildrenPerInnerNode);
|
TraverseByWriting(tree.get(), 0, maxChildrenPerInnerNode);
|
||||||
|
|
||||||
EXPECT_EQ(0u, blockStore->loadedBlocks().size()); // Doesn't actually load the leaves, but returns the keys of the leaves to the callback
|
EXPECT_EQ(1u, blockStore->loadedBlocks().size()); // Has to load the rightmost leaf once to adapt its size, rest of the leaves aren't loaded but just overwritten
|
||||||
EXPECT_EQ(0u, blockStore->createdBlocks());
|
EXPECT_EQ(0u, blockStore->createdBlocks());
|
||||||
EXPECT_EQ(0u, blockStore->removedBlocks().size());
|
EXPECT_EQ(0u, blockStore->removedBlocks().size());
|
||||||
EXPECT_EQ(0u, blockStore->distinctWrittenBlocks().size());
|
EXPECT_EQ(maxChildrenPerInnerNode, blockStore->distinctWrittenBlocks().size());
|
||||||
EXPECT_EQ(0u, blockStore->resizedBlocks().size());
|
EXPECT_EQ(0u, blockStore->resizedBlocks().size());
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_F(DataTreeTest_Performance, TraverseLeaves_Twolevel_Some) {
|
TEST_F(DataTreeTest_Performance, TraverseLeaves_Twolevel_All_ByReading) {
|
||||||
auto blockId = CreateFullTwoLevel()->blockId();
|
auto blockId = CreateFullTwoLevel()->blockId();
|
||||||
auto tree = treeStore.load(blockId).value();
|
auto tree = treeStore.load(blockId).value();
|
||||||
blockStore->resetCounters();
|
blockStore->resetCounters();
|
||||||
|
|
||||||
Traverse(tree.get(), 3, 5);
|
TraverseByReading(tree.get(), 0, maxChildrenPerInnerNode);
|
||||||
|
|
||||||
EXPECT_EQ(0u, blockStore->loadedBlocks().size()); // Doesn't actually load the leaves, but returns the keys of the leaves to the callback
|
EXPECT_EQ(1u + maxChildrenPerInnerNode, blockStore->loadedBlocks().size()); // Has to read the rightmost leaf an additional time in the beginning to determine size.
|
||||||
EXPECT_EQ(0u, blockStore->createdBlocks());
|
EXPECT_EQ(0u, blockStore->createdBlocks());
|
||||||
EXPECT_EQ(0u, blockStore->removedBlocks().size());
|
EXPECT_EQ(0u, blockStore->removedBlocks().size());
|
||||||
EXPECT_EQ(0u, blockStore->distinctWrittenBlocks().size());
|
EXPECT_EQ(0u, blockStore->distinctWrittenBlocks().size());
|
||||||
EXPECT_EQ(0u, blockStore->resizedBlocks().size());
|
EXPECT_EQ(0u, blockStore->resizedBlocks().size());
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_F(DataTreeTest_Performance, TraverseLeaves_Threelevel_All) {
|
TEST_F(DataTreeTest_Performance, TraverseLeaves_Twolevel_Some_ByWriting) {
|
||||||
auto blockId = CreateFullThreeLevel()->blockId();
|
auto blockId = CreateFullTwoLevel()->blockId();
|
||||||
auto tree = treeStore.load(blockId).value();
|
auto tree = treeStore.load(blockId).value();
|
||||||
blockStore->resetCounters();
|
blockStore->resetCounters();
|
||||||
|
|
||||||
Traverse(tree.get(), 0, maxChildrenPerInnerNode * maxChildrenPerInnerNode);
|
TraverseByWriting(tree.get(), 3, 5);
|
||||||
|
|
||||||
EXPECT_EQ(maxChildrenPerInnerNode, blockStore->loadedBlocks().size()); // Loads inner nodes. Doesn't load the leaves, but returns the keys of the leaves to the callback
|
EXPECT_EQ(0u, blockStore->loadedBlocks().size());
|
||||||
|
EXPECT_EQ(0u, blockStore->createdBlocks());
|
||||||
|
EXPECT_EQ(0u, blockStore->removedBlocks().size());
|
||||||
|
EXPECT_EQ(2u, blockStore->distinctWrittenBlocks().size());
|
||||||
|
EXPECT_EQ(0u, blockStore->resizedBlocks().size());
|
||||||
|
}
|
||||||
|
|
||||||
|
TEST_F(DataTreeTest_Performance, TraverseLeaves_Twolevel_Some_ByReading) {
|
||||||
|
auto blockId = CreateFullTwoLevel()->blockId();
|
||||||
|
auto tree = treeStore.load(blockId).value();
|
||||||
|
blockStore->resetCounters();
|
||||||
|
|
||||||
|
TraverseByReading(tree.get(), 3, 5);
|
||||||
|
|
||||||
|
EXPECT_EQ(3u, blockStore->loadedBlocks().size()); // reads 2 leaves and the rightmost leaf to determine size
|
||||||
EXPECT_EQ(0u, blockStore->createdBlocks());
|
EXPECT_EQ(0u, blockStore->createdBlocks());
|
||||||
EXPECT_EQ(0u, blockStore->removedBlocks().size());
|
EXPECT_EQ(0u, blockStore->removedBlocks().size());
|
||||||
EXPECT_EQ(0u, blockStore->distinctWrittenBlocks().size());
|
EXPECT_EQ(0u, blockStore->distinctWrittenBlocks().size());
|
||||||
EXPECT_EQ(0u, blockStore->resizedBlocks().size());
|
EXPECT_EQ(0u, blockStore->resizedBlocks().size());
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_F(DataTreeTest_Performance, TraverseLeaves_Threelevel_InOneInner) {
|
TEST_F(DataTreeTest_Performance, TraverseLeaves_Threelevel_All_ByWriting) {
|
||||||
auto blockId = CreateFullThreeLevel()->blockId();
|
auto blockId = CreateFullThreeLevel()->blockId();
|
||||||
auto tree = treeStore.load(blockId).value();
|
auto tree = treeStore.load(blockId).value();
|
||||||
blockStore->resetCounters();
|
blockStore->resetCounters();
|
||||||
|
|
||||||
Traverse(tree.get(), 3, 5);
|
TraverseByWriting(tree.get(), 0, maxChildrenPerInnerNode * maxChildrenPerInnerNode);
|
||||||
|
|
||||||
EXPECT_EQ(1u, blockStore->loadedBlocks().size()); // Loads inner node. Doesn't load the leaves, but returns the keys of the leaves to the callback
|
EXPECT_EQ(maxChildrenPerInnerNode + 1, blockStore->loadedBlocks().size()); // Loads inner nodes and has to load the rightmost leaf once to adapt its size, rest of the leaves aren't loaded but just overwritten.
|
||||||
|
EXPECT_EQ(0u, blockStore->createdBlocks());
|
||||||
|
EXPECT_EQ(0u, blockStore->removedBlocks().size());
|
||||||
|
EXPECT_EQ(maxChildrenPerInnerNode*maxChildrenPerInnerNode, blockStore->distinctWrittenBlocks().size());
|
||||||
|
EXPECT_EQ(0u, blockStore->resizedBlocks().size());
|
||||||
|
}
|
||||||
|
|
||||||
|
TEST_F(DataTreeTest_Performance, TraverseLeaves_Threelevel_All_ByReading) {
|
||||||
|
auto blockId = CreateFullThreeLevel()->blockId();
|
||||||
|
auto tree = treeStore.load(blockId).value();
|
||||||
|
blockStore->resetCounters();
|
||||||
|
|
||||||
|
TraverseByReading(tree.get(), 0, maxChildrenPerInnerNode * maxChildrenPerInnerNode);
|
||||||
|
|
||||||
|
EXPECT_EQ(maxChildrenPerInnerNode*maxChildrenPerInnerNode + maxChildrenPerInnerNode + 2, blockStore->loadedBlocks().size()); // Loads inner nodes and leaves. Has to load the rightmost inner node and leaf an additional time at the beginning to compute size
|
||||||
EXPECT_EQ(0u, blockStore->createdBlocks());
|
EXPECT_EQ(0u, blockStore->createdBlocks());
|
||||||
EXPECT_EQ(0u, blockStore->removedBlocks().size());
|
EXPECT_EQ(0u, blockStore->removedBlocks().size());
|
||||||
EXPECT_EQ(0u, blockStore->distinctWrittenBlocks().size());
|
EXPECT_EQ(0u, blockStore->distinctWrittenBlocks().size());
|
||||||
EXPECT_EQ(0u, blockStore->resizedBlocks().size());
|
EXPECT_EQ(0u, blockStore->resizedBlocks().size());
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_F(DataTreeTest_Performance, TraverseLeaves_Threelevel_InTwoInner) {
|
TEST_F(DataTreeTest_Performance, TraverseLeaves_Threelevel_InOneInner_ByWriting) {
|
||||||
auto blockId = CreateFullThreeLevel()->blockId();
|
auto blockId = CreateFullThreeLevel()->blockId();
|
||||||
auto tree = treeStore.load(blockId).value();
|
auto tree = treeStore.load(blockId).value();
|
||||||
blockStore->resetCounters();
|
blockStore->resetCounters();
|
||||||
|
|
||||||
Traverse(tree.get(), 3, 3 + maxChildrenPerInnerNode);
|
TraverseByWriting(tree.get(), 3, 5);
|
||||||
|
|
||||||
|
EXPECT_EQ(1u, blockStore->loadedBlocks().size()); // Loads inner node. Doesn't load the leaves, they're just overwritten.
|
||||||
|
EXPECT_EQ(0u, blockStore->createdBlocks());
|
||||||
|
EXPECT_EQ(0u, blockStore->removedBlocks().size());
|
||||||
|
EXPECT_EQ(2u, blockStore->distinctWrittenBlocks().size());
|
||||||
|
EXPECT_EQ(0u, blockStore->resizedBlocks().size());
|
||||||
|
}
|
||||||
|
|
||||||
|
TEST_F(DataTreeTest_Performance, TraverseLeaves_Threelevel_InOneInner_ByReading) {
|
||||||
|
auto blockId = CreateFullThreeLevel()->blockId();
|
||||||
|
auto tree = treeStore.load(blockId).value();
|
||||||
|
blockStore->resetCounters();
|
||||||
|
|
||||||
|
TraverseByReading(tree.get(), 3, 5);
|
||||||
|
|
||||||
|
EXPECT_EQ(5u, blockStore->loadedBlocks().size()); // reads 2 leaves and the inner node, also has to read the rightmost inner node and leaf additionally at the beginning to determine size
|
||||||
|
EXPECT_EQ(0u, blockStore->createdBlocks());
|
||||||
|
EXPECT_EQ(0u, blockStore->removedBlocks().size());
|
||||||
|
EXPECT_EQ(0u, blockStore->distinctWrittenBlocks().size());
|
||||||
|
EXPECT_EQ(0u, blockStore->resizedBlocks().size());
|
||||||
|
}
|
||||||
|
|
||||||
|
TEST_F(DataTreeTest_Performance, TraverseLeaves_Threelevel_InTwoInner_ByWriting) {
|
||||||
|
auto blockId = CreateFullThreeLevel()->blockId();
|
||||||
|
auto tree = treeStore.load(blockId).value();
|
||||||
|
blockStore->resetCounters();
|
||||||
|
|
||||||
|
TraverseByWriting(tree.get(), 3, 3 + maxChildrenPerInnerNode);
|
||||||
|
|
||||||
EXPECT_EQ(2u, blockStore->loadedBlocks().size()); // Loads both inner node
|
EXPECT_EQ(2u, blockStore->loadedBlocks().size()); // Loads both inner node
|
||||||
EXPECT_EQ(0u, blockStore->createdBlocks());
|
EXPECT_EQ(0u, blockStore->createdBlocks());
|
||||||
EXPECT_EQ(0u, blockStore->removedBlocks().size());
|
EXPECT_EQ(0u, blockStore->removedBlocks().size());
|
||||||
EXPECT_EQ(0u, blockStore->distinctWrittenBlocks().size());
|
EXPECT_EQ(maxChildrenPerInnerNode, blockStore->distinctWrittenBlocks().size());
|
||||||
EXPECT_EQ(0u, blockStore->resizedBlocks().size());
|
EXPECT_EQ(0u, blockStore->resizedBlocks().size());
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_F(DataTreeTest_Performance, TraverseLeaves_Threelevel_WholeInner) {
|
TEST_F(DataTreeTest_Performance, TraverseLeaves_Threelevel_InTwoInner_ByReading) {
|
||||||
auto blockId = CreateFullThreeLevel()->blockId();
|
auto blockId = CreateFullThreeLevel()->blockId();
|
||||||
auto tree = treeStore.load(blockId).value();
|
auto tree = treeStore.load(blockId).value();
|
||||||
blockStore->resetCounters();
|
blockStore->resetCounters();
|
||||||
|
|
||||||
Traverse(tree.get(), maxChildrenPerInnerNode, 2*maxChildrenPerInnerNode);
|
TraverseByReading(tree.get(), 3, 3 + maxChildrenPerInnerNode);
|
||||||
|
|
||||||
EXPECT_EQ(1u, blockStore->loadedBlocks().size()); // Loads inner node. Doesn't load the leaves, but returns the keys of the leaves to the callback
|
EXPECT_EQ(4u + maxChildrenPerInnerNode, blockStore->loadedBlocks().size()); // Loads both inner nodes and the requested leaves. Also has to load rightmost inner node and leaf additionally in the beginning to determine size.
|
||||||
|
EXPECT_EQ(0u, blockStore->createdBlocks());
|
||||||
|
EXPECT_EQ(0u, blockStore->removedBlocks().size());
|
||||||
|
EXPECT_EQ(0u, blockStore->distinctWrittenBlocks().size());
|
||||||
|
EXPECT_EQ(0u, blockStore->resizedBlocks().size());
|
||||||
|
}
|
||||||
|
|
||||||
|
TEST_F(DataTreeTest_Performance, TraverseLeaves_Threelevel_WholeInner_ByWriting) {
|
||||||
|
auto blockId = CreateFullThreeLevel()->blockId();
|
||||||
|
auto tree = treeStore.load(blockId).value();
|
||||||
|
blockStore->resetCounters();
|
||||||
|
|
||||||
|
TraverseByWriting(tree.get(), maxChildrenPerInnerNode, 2*maxChildrenPerInnerNode);
|
||||||
|
|
||||||
|
EXPECT_EQ(1u, blockStore->loadedBlocks().size()); // Loads inner node. Doesn't load the leaves, they're just overwritten.
|
||||||
|
EXPECT_EQ(0u, blockStore->createdBlocks());
|
||||||
|
EXPECT_EQ(0u, blockStore->removedBlocks().size());
|
||||||
|
EXPECT_EQ(maxChildrenPerInnerNode, blockStore->distinctWrittenBlocks().size());
|
||||||
|
EXPECT_EQ(0u, blockStore->resizedBlocks().size());
|
||||||
|
}
|
||||||
|
|
||||||
|
TEST_F(DataTreeTest_Performance, TraverseLeaves_Threelevel_WholeInner_ByReading) {
|
||||||
|
auto blockId = CreateFullThreeLevel()->blockId();
|
||||||
|
auto tree = treeStore.load(blockId).value();
|
||||||
|
blockStore->resetCounters();
|
||||||
|
|
||||||
|
TraverseByReading(tree.get(), maxChildrenPerInnerNode, 2*maxChildrenPerInnerNode);
|
||||||
|
|
||||||
|
EXPECT_EQ(3u + maxChildrenPerInnerNode, blockStore->loadedBlocks().size()); // Loads inner node and all requested leaves. Also has to load rightmost inner node and leaf additionally in the beginning to determine size.
|
||||||
EXPECT_EQ(0u, blockStore->createdBlocks());
|
EXPECT_EQ(0u, blockStore->createdBlocks());
|
||||||
EXPECT_EQ(0u, blockStore->removedBlocks().size());
|
EXPECT_EQ(0u, blockStore->removedBlocks().size());
|
||||||
EXPECT_EQ(0u, blockStore->distinctWrittenBlocks().size());
|
EXPECT_EQ(0u, blockStore->distinctWrittenBlocks().size());
|
||||||
@ -160,12 +254,12 @@ TEST_F(DataTreeTest_Performance, TraverseLeaves_GrowingTree_StartingInside) {
|
|||||||
auto tree = treeStore.load(blockId).value();
|
auto tree = treeStore.load(blockId).value();
|
||||||
blockStore->resetCounters();
|
blockStore->resetCounters();
|
||||||
|
|
||||||
Traverse(tree.get(), 1, 4);
|
TraverseByWriting(tree.get(), 1, 4);
|
||||||
|
|
||||||
EXPECT_EQ(1u, blockStore->loadedBlocks().size()); // Loads last old child (for growing it)
|
EXPECT_EQ(1u, blockStore->loadedBlocks().size()); // Loads last old child (for growing it)
|
||||||
EXPECT_EQ(2u, blockStore->createdBlocks());
|
EXPECT_EQ(2u, blockStore->createdBlocks());
|
||||||
EXPECT_EQ(0u, blockStore->removedBlocks().size());
|
EXPECT_EQ(0u, blockStore->removedBlocks().size());
|
||||||
EXPECT_EQ(1u, blockStore->distinctWrittenBlocks().size()); // add children to inner node
|
EXPECT_EQ(2u, blockStore->distinctWrittenBlocks().size()); // write the data and add children to inner node
|
||||||
EXPECT_EQ(0u, blockStore->resizedBlocks().size());
|
EXPECT_EQ(0u, blockStore->resizedBlocks().size());
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -174,7 +268,7 @@ TEST_F(DataTreeTest_Performance, TraverseLeaves_GrowingTree_StartingOutside_TwoL
|
|||||||
auto tree = treeStore.load(blockId).value();
|
auto tree = treeStore.load(blockId).value();
|
||||||
blockStore->resetCounters();
|
blockStore->resetCounters();
|
||||||
|
|
||||||
Traverse(tree.get(), 4, 5);
|
TraverseByWriting(tree.get(), 4, 5);
|
||||||
|
|
||||||
EXPECT_EQ(1u, blockStore->loadedBlocks().size()); // Loads last old leaf for growing it
|
EXPECT_EQ(1u, blockStore->loadedBlocks().size()); // Loads last old leaf for growing it
|
||||||
EXPECT_EQ(3u, blockStore->createdBlocks());
|
EXPECT_EQ(3u, blockStore->createdBlocks());
|
||||||
@ -188,7 +282,7 @@ TEST_F(DataTreeTest_Performance, TraverseLeaves_GrowingTree_StartingOutside_Thre
|
|||||||
auto tree = treeStore.load(blockId).value();
|
auto tree = treeStore.load(blockId).value();
|
||||||
blockStore->resetCounters();
|
blockStore->resetCounters();
|
||||||
|
|
||||||
Traverse(tree.get(), 2*maxChildrenPerInnerNode+1, 2*maxChildrenPerInnerNode+2);
|
TraverseByWriting(tree.get(), 2*maxChildrenPerInnerNode+1, 2*maxChildrenPerInnerNode+2);
|
||||||
|
|
||||||
EXPECT_EQ(2u, blockStore->loadedBlocks().size()); // Loads last old leaf (and its inner node) for growing it
|
EXPECT_EQ(2u, blockStore->loadedBlocks().size()); // Loads last old leaf (and its inner node) for growing it
|
||||||
EXPECT_EQ(3u, blockStore->createdBlocks()); // inner node and two leaves
|
EXPECT_EQ(3u, blockStore->createdBlocks()); // inner node and two leaves
|
||||||
@ -202,12 +296,12 @@ TEST_F(DataTreeTest_Performance, TraverseLeaves_GrowingTree_StartingAtBeginOfChi
|
|||||||
auto tree = treeStore.load(blockId).value();
|
auto tree = treeStore.load(blockId).value();
|
||||||
blockStore->resetCounters();
|
blockStore->resetCounters();
|
||||||
|
|
||||||
Traverse(tree.get(), maxChildrenPerInnerNode, 3*maxChildrenPerInnerNode);
|
TraverseByWriting(tree.get(), maxChildrenPerInnerNode, 3*maxChildrenPerInnerNode);
|
||||||
|
|
||||||
EXPECT_EQ(2u, blockStore->loadedBlocks().size()); // Loads inner node and one leaf to check whether we have to grow it. Doesn't load the leaves, but returns the keys of the leaves to the callback.
|
EXPECT_EQ(2u, blockStore->loadedBlocks().size()); // Loads inner node and one leaf to check whether we have to grow it. Doesn't load the leaves, but returns the keys of the leaves to the callback.
|
||||||
EXPECT_EQ(1u + maxChildrenPerInnerNode, blockStore->createdBlocks()); // Creates an inner node and its leaves
|
EXPECT_EQ(1u + maxChildrenPerInnerNode, blockStore->createdBlocks()); // Creates an inner node and its leaves
|
||||||
EXPECT_EQ(0u, blockStore->removedBlocks().size());
|
EXPECT_EQ(0u, blockStore->removedBlocks().size());
|
||||||
EXPECT_EQ(1u, blockStore->distinctWrittenBlocks().size()); // add children to existing inner node
|
EXPECT_EQ(maxChildrenPerInnerNode + 1u, blockStore->distinctWrittenBlocks().size()); // write data and add children to existing inner node
|
||||||
EXPECT_EQ(0u, blockStore->resizedBlocks().size());
|
EXPECT_EQ(0u, blockStore->resizedBlocks().size());
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -216,7 +310,7 @@ TEST_F(DataTreeTest_Performance, TraverseLeaves_GrowingTreeDepth_StartingInOldDe
|
|||||||
auto tree = treeStore.load(blockId).value();
|
auto tree = treeStore.load(blockId).value();
|
||||||
blockStore->resetCounters();
|
blockStore->resetCounters();
|
||||||
|
|
||||||
Traverse(tree.get(), 4, maxChildrenPerInnerNode+2);
|
TraverseByWriting(tree.get(), 4, maxChildrenPerInnerNode+2);
|
||||||
|
|
||||||
EXPECT_EQ(1u, blockStore->loadedBlocks().size()); // Loads last old leaf for growing it
|
EXPECT_EQ(1u, blockStore->loadedBlocks().size()); // Loads last old leaf for growing it
|
||||||
EXPECT_EQ(2u + maxChildrenPerInnerNode, blockStore->createdBlocks()); // 2x new inner node + leaves
|
EXPECT_EQ(2u + maxChildrenPerInnerNode, blockStore->createdBlocks()); // 2x new inner node + leaves
|
||||||
@ -230,7 +324,7 @@ TEST_F(DataTreeTest_Performance, TraverseLeaves_GrowingTreeDepth_StartingInOldDe
|
|||||||
auto tree = treeStore.load(blockId).value();
|
auto tree = treeStore.load(blockId).value();
|
||||||
blockStore->resetCounters();
|
blockStore->resetCounters();
|
||||||
|
|
||||||
Traverse(tree.get(), 4, maxChildrenPerInnerNode+2);
|
TraverseByWriting(tree.get(), 4, maxChildrenPerInnerNode+2);
|
||||||
|
|
||||||
EXPECT_EQ(1u, blockStore->loadedBlocks().size()); // Loads last old leaf for growing it
|
EXPECT_EQ(1u, blockStore->loadedBlocks().size()); // Loads last old leaf for growing it
|
||||||
EXPECT_EQ(2u + maxChildrenPerInnerNode, blockStore->createdBlocks()); // 2x new inner node + leaves
|
EXPECT_EQ(2u + maxChildrenPerInnerNode, blockStore->createdBlocks()); // 2x new inner node + leaves
|
||||||
@ -244,7 +338,7 @@ TEST_F(DataTreeTest_Performance, TraverseLeaves_GrowingTreeDepth_StartingInNewDe
|
|||||||
auto tree = treeStore.load(blockId).value();
|
auto tree = treeStore.load(blockId).value();
|
||||||
blockStore->resetCounters();
|
blockStore->resetCounters();
|
||||||
|
|
||||||
Traverse(tree.get(), maxChildrenPerInnerNode, maxChildrenPerInnerNode+2);
|
TraverseByWriting(tree.get(), maxChildrenPerInnerNode, maxChildrenPerInnerNode+2);
|
||||||
|
|
||||||
EXPECT_EQ(1u, blockStore->loadedBlocks().size()); // Loads last old leaf for growing it
|
EXPECT_EQ(1u, blockStore->loadedBlocks().size()); // Loads last old leaf for growing it
|
||||||
EXPECT_EQ(2u + maxChildrenPerInnerNode, blockStore->createdBlocks()); // 2x new inner node + leaves
|
EXPECT_EQ(2u + maxChildrenPerInnerNode, blockStore->createdBlocks()); // 2x new inner node + leaves
|
||||||
@ -258,7 +352,7 @@ TEST_F(DataTreeTest_Performance, TraverseLeaves_GrowingTreeDepth_StartingInNewDe
|
|||||||
auto tree = treeStore.load(blockId).value();
|
auto tree = treeStore.load(blockId).value();
|
||||||
blockStore->resetCounters();
|
blockStore->resetCounters();
|
||||||
|
|
||||||
Traverse(tree.get(), maxChildrenPerInnerNode, maxChildrenPerInnerNode+2);
|
TraverseByWriting(tree.get(), maxChildrenPerInnerNode, maxChildrenPerInnerNode+2);
|
||||||
|
|
||||||
EXPECT_EQ(1u, blockStore->loadedBlocks().size()); // Loads last old leaf for growing it
|
EXPECT_EQ(1u, blockStore->loadedBlocks().size()); // Loads last old leaf for growing it
|
||||||
EXPECT_EQ(2u + maxChildrenPerInnerNode, blockStore->createdBlocks()); // 2x new inner node + leaves
|
EXPECT_EQ(2u + maxChildrenPerInnerNode, blockStore->createdBlocks()); // 2x new inner node + leaves
|
||||||
|
@ -19,7 +19,6 @@ using blobstore::onblocks::datanodestore::DataInnerNode;
|
|||||||
using blobstore::onblocks::datanodestore::DataNode;
|
using blobstore::onblocks::datanodestore::DataNode;
|
||||||
using blobstore::onblocks::datanodestore::DataNodeLayout;
|
using blobstore::onblocks::datanodestore::DataNodeLayout;
|
||||||
using blobstore::onblocks::datatreestore::DataTree;
|
using blobstore::onblocks::datatreestore::DataTree;
|
||||||
using blobstore::onblocks::datatreestore::LeafHandle;
|
|
||||||
using blobstore::onblocks::utils::ceilDivision;
|
using blobstore::onblocks::utils::ceilDivision;
|
||||||
using blockstore::BlockId;
|
using blockstore::BlockId;
|
||||||
using cpputils::Data;
|
using cpputils::Data;
|
||||||
@ -109,9 +108,13 @@ public:
|
|||||||
GrowTree(tree.get().get());
|
GrowTree(tree.get().get());
|
||||||
}
|
}
|
||||||
|
|
||||||
void GrowTree(DataTree *tree, std::function<void (int32_t)> traverse = [] (uint32_t){}) {
|
void GrowTree(DataTree *tree) {
|
||||||
uint64_t maxBytesPerLeaf = tree->maxBytesPerLeaf();
|
uint64_t maxBytesPerLeaf = tree->maxBytesPerLeaf();
|
||||||
tree->traverseLeaves(traversalBeginIndex, newNumberOfLeaves, [&traverse] (uint32_t index, bool, LeafHandle){traverse(index);}, [maxBytesPerLeaf, &traverse] (uint32_t index) -> Data { traverse(index); return Data(maxBytesPerLeaf).FillWithZeroes();});
|
uint64_t offset = traversalBeginIndex * maxBytesPerLeaf;
|
||||||
|
uint64_t count = newNumberOfLeaves * maxBytesPerLeaf - offset;
|
||||||
|
Data data(count);
|
||||||
|
data.FillWithZeroes();
|
||||||
|
tree->writeBytes(data.data(), offset, count);
|
||||||
tree->flush();
|
tree->flush();
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -163,7 +166,6 @@ INSTANTIATE_TEST_CASE_P(DataTreeTest_ResizeByTraversing_P, DataTreeTest_ResizeBy
|
|||||||
),
|
),
|
||||||
//Decide the traversal begin index
|
//Decide the traversal begin index
|
||||||
Values(
|
Values(
|
||||||
[] (uint32_t /*oldNumberOfLeaves*/, uint32_t newNumberOfLeaves) {return newNumberOfLeaves;}, // Don't traverse any leaves, just resize (begin==end)
|
|
||||||
[] (uint32_t /*oldNumberOfLeaves*/, uint32_t newNumberOfLeaves) {return newNumberOfLeaves-1;}, // Traverse last leaf (begin==end-1)
|
[] (uint32_t /*oldNumberOfLeaves*/, uint32_t newNumberOfLeaves) {return newNumberOfLeaves-1;}, // Traverse last leaf (begin==end-1)
|
||||||
[] (uint32_t oldNumberOfLeaves, uint32_t newNumberOfLeaves) {return (oldNumberOfLeaves+newNumberOfLeaves)/2;}, // Start traversal in middle of new leaves
|
[] (uint32_t oldNumberOfLeaves, uint32_t newNumberOfLeaves) {return (oldNumberOfLeaves+newNumberOfLeaves)/2;}, // Start traversal in middle of new leaves
|
||||||
[] (uint32_t oldNumberOfLeaves, uint32_t /*newNumberOfLeaves*/) {return oldNumberOfLeaves-1;}, // Start traversal with last old leaf
|
[] (uint32_t oldNumberOfLeaves, uint32_t /*newNumberOfLeaves*/) {return oldNumberOfLeaves-1;}, // Start traversal with last old leaf
|
||||||
@ -189,9 +191,9 @@ TEST_P(DataTreeTest_ResizeByTraversing_P, NumLeavesIsCorrect_FromCache) {
|
|||||||
|
|
||||||
TEST_P(DataTreeTest_ResizeByTraversing_P, NumLeavesIsCorrect) {
|
TEST_P(DataTreeTest_ResizeByTraversing_P, NumLeavesIsCorrect) {
|
||||||
GrowTree(tree.get());
|
GrowTree(tree.get());
|
||||||
// tree->_forceComputeNumLeaves() only goes down the right border nodes and expects the tree to be a left max data tree.
|
// tree->forceComputeNumLeaves() only goes down the right border nodes and expects the tree to be a left max data tree.
|
||||||
// This is what the StructureIsValid test case is for.
|
// This is what the StructureIsValid test case is for.
|
||||||
EXPECT_EQ(newNumberOfLeaves, tree->_forceComputeNumLeaves());
|
EXPECT_EQ(newNumberOfLeaves, tree->forceComputeNumLeaves());
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_P(DataTreeTest_ResizeByTraversing_P, DepthFlagsAreCorrect) {
|
TEST_P(DataTreeTest_ResizeByTraversing_P, DepthFlagsAreCorrect) {
|
||||||
@ -208,7 +210,8 @@ TEST_P(DataTreeTest_ResizeByTraversing_P, KeyDoesntChange) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
TEST_P(DataTreeTest_ResizeByTraversing_P, DataStaysIntact) {
|
TEST_P(DataTreeTest_ResizeByTraversing_P, DataStaysIntact) {
|
||||||
uint32_t oldNumberOfLeaves = std::max(UINT64_C(1), ceilDivision(tree->numStoredBytes(), static_cast<uint64_t>(nodeStore->layout().maxBytesPerLeaf())));
|
uint32_t oldNumberOfLeaves = std::max(UINT64_C(1), ceilDivision(tree->numBytes(), static_cast<uint64_t>(nodeStore->layout().maxBytesPerLeaf())));
|
||||||
|
|
||||||
TwoLevelDataFixture data(nodeStore, TwoLevelDataFixture::SizePolicy::Unchanged);
|
TwoLevelDataFixture data(nodeStore, TwoLevelDataFixture::SizePolicy::Unchanged);
|
||||||
BlockId blockId = tree->blockId();
|
BlockId blockId = tree->blockId();
|
||||||
cpputils::destruct(std::move(tree));
|
cpputils::destruct(std::move(tree));
|
||||||
@ -216,15 +219,13 @@ TEST_P(DataTreeTest_ResizeByTraversing_P, DataStaysIntact) {
|
|||||||
|
|
||||||
GrowTree(blockId);
|
GrowTree(blockId);
|
||||||
|
|
||||||
|
if (traversalBeginIndex < oldNumberOfLeaves) {
|
||||||
|
// Traversal wrote over part of the pre-existing data, we can only check the data before it.
|
||||||
|
if (traversalBeginIndex != 0) {
|
||||||
|
data.EXPECT_DATA_CORRECT(nodeStore->load(blockId).get().get(), traversalBeginIndex - 1);
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// Here, traversal was entirely outside the preexisting data, we can check all preexisting data.
|
||||||
data.EXPECT_DATA_CORRECT(nodeStore->load(blockId).get().get(), oldNumberOfLeaves, oldLastLeafSize);
|
data.EXPECT_DATA_CORRECT(nodeStore->load(blockId).get().get(), oldNumberOfLeaves, oldLastLeafSize);
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_P(DataTreeTest_ResizeByTraversing_P, AllLeavesAreTraversed) {
|
|
||||||
std::vector<uint32_t> traversedLeaves;
|
|
||||||
GrowTree(tree.get(), [&traversedLeaves] (uint32_t index) {traversedLeaves.push_back(index);});
|
|
||||||
|
|
||||||
EXPECT_EQ(newNumberOfLeaves-traversalBeginIndex, traversedLeaves.size());
|
|
||||||
for (uint32_t i = traversalBeginIndex; i < newNumberOfLeaves; ++i) {
|
|
||||||
EXPECT_NE(traversedLeaves.end(), std::find(traversedLeaves.begin(), traversedLeaves.end(), i));
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
@ -171,9 +171,9 @@ TEST_P(DataTreeTest_ResizeNumBytes_P, StructureIsValid) {
|
|||||||
TEST_P(DataTreeTest_ResizeNumBytes_P, NumBytesIsCorrect) {
|
TEST_P(DataTreeTest_ResizeNumBytes_P, NumBytesIsCorrect) {
|
||||||
tree->resizeNumBytes(newSize);
|
tree->resizeNumBytes(newSize);
|
||||||
tree->flush();
|
tree->flush();
|
||||||
// tree->numStoredBytes() only goes down the right border nodes and expects the tree to be a left max data tree.
|
// tree->numBytes() only goes down the right border nodes and expects the tree to be a left max data tree.
|
||||||
// This is what the StructureIsValid test case is for.
|
// This is what the StructureIsValid test case is for.
|
||||||
EXPECT_EQ(newSize, tree->numStoredBytes());
|
EXPECT_EQ(newSize, tree->numBytes());
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_P(DataTreeTest_ResizeNumBytes_P, NumLeavesIsCorrect) {
|
TEST_P(DataTreeTest_ResizeNumBytes_P, NumLeavesIsCorrect) {
|
||||||
@ -181,7 +181,7 @@ TEST_P(DataTreeTest_ResizeNumBytes_P, NumLeavesIsCorrect) {
|
|||||||
tree->flush();
|
tree->flush();
|
||||||
// tree->numLeaves() only goes down the right border nodes and expects the tree to be a left max data tree.
|
// tree->numLeaves() only goes down the right border nodes and expects the tree to be a left max data tree.
|
||||||
// This is what the StructureIsValid test case is for.
|
// This is what the StructureIsValid test case is for.
|
||||||
EXPECT_EQ(newNumberOfLeaves, tree->_forceComputeNumLeaves());
|
EXPECT_EQ(newNumberOfLeaves, tree->forceComputeNumLeaves());
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_P(DataTreeTest_ResizeNumBytes_P, NumLeavesIsCorrect_FromCache) {
|
TEST_P(DataTreeTest_ResizeNumBytes_P, NumLeavesIsCorrect_FromCache) {
|
||||||
@ -208,7 +208,7 @@ TEST_P(DataTreeTest_ResizeNumBytes_P, KeyDoesntChange) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
TEST_P(DataTreeTest_ResizeNumBytes_P, DataStaysIntact) {
|
TEST_P(DataTreeTest_ResizeNumBytes_P, DataStaysIntact) {
|
||||||
uint32_t oldNumberOfLeaves = std::max(UINT64_C(1), ceilDivision(tree->numStoredBytes(), static_cast<uint64_t>(nodeStore->layout().maxBytesPerLeaf())));
|
uint32_t oldNumberOfLeaves = std::max(UINT64_C(1), ceilDivision(tree->numBytes(), static_cast<uint64_t>(nodeStore->layout().maxBytesPerLeaf())));
|
||||||
TwoLevelDataFixture data(nodeStore, TwoLevelDataFixture::SizePolicy::Unchanged);
|
TwoLevelDataFixture data(nodeStore, TwoLevelDataFixture::SizePolicy::Unchanged);
|
||||||
BlockId blockId = tree->blockId();
|
BlockId blockId = tree->blockId();
|
||||||
cpputils::destruct(std::move(tree));
|
cpputils::destruct(std::move(tree));
|
||||||
|
@ -1,4 +1,5 @@
|
|||||||
#include "testutils/DataTreeTest.h"
|
#include "testutils/DataTreeTest.h"
|
||||||
|
#include <blobstore/implementations/onblocks/datatreestore/impl/LeafTraverser.h>
|
||||||
#include <gmock/gmock.h>
|
#include <gmock/gmock.h>
|
||||||
|
|
||||||
using ::testing::_;
|
using ::testing::_;
|
||||||
@ -9,6 +10,7 @@ using blobstore::onblocks::datanodestore::DataLeafNode;
|
|||||||
using blobstore::onblocks::datanodestore::DataInnerNode;
|
using blobstore::onblocks::datanodestore::DataInnerNode;
|
||||||
using blobstore::onblocks::datanodestore::DataNode;
|
using blobstore::onblocks::datanodestore::DataNode;
|
||||||
using blobstore::onblocks::datatreestore::LeafHandle;
|
using blobstore::onblocks::datatreestore::LeafHandle;
|
||||||
|
using blobstore::onblocks::datatreestore::LeafTraverser;
|
||||||
using blockstore::BlockId;
|
using blockstore::BlockId;
|
||||||
|
|
||||||
using cpputils::unique_ref;
|
using cpputils::unique_ref;
|
||||||
@ -26,9 +28,9 @@ MATCHER_P(KeyEq, expected, "node blockId equals") {
|
|||||||
return arg->blockId() == expected;
|
return arg->blockId() == expected;
|
||||||
}
|
}
|
||||||
|
|
||||||
class DataTreeTest_TraverseLeaves: public DataTreeTest {
|
class LeafTraverserTest: public DataTreeTest {
|
||||||
public:
|
public:
|
||||||
DataTreeTest_TraverseLeaves() :traversor() {}
|
LeafTraverserTest() :traversor() {}
|
||||||
|
|
||||||
unique_ref<DataInnerNode> CreateThreeLevel() {
|
unique_ref<DataInnerNode> CreateThreeLevel() {
|
||||||
return CreateInner({
|
return CreateInner({
|
||||||
@ -70,166 +72,172 @@ public:
|
|||||||
EXPECT_CALL(traversor, calledCreateLeaf(_)).Times(0);
|
EXPECT_CALL(traversor, calledCreateLeaf(_)).Times(0);
|
||||||
}
|
}
|
||||||
|
|
||||||
void TraverseLeaves(DataNode *root, uint32_t beginIndex, uint32_t endIndex) {
|
void TraverseLeaves(unique_ref<DataNode> root, uint32_t beginIndex, uint32_t endIndex, bool expectReadOnly) {
|
||||||
root->flush();
|
root->flush();
|
||||||
auto tree = treeStore.load(root->blockId()).value();
|
auto tree = treeStore.load(root->blockId()).value();
|
||||||
tree->traverseLeaves(beginIndex, endIndex, [this] (uint32_t nodeIndex, bool isRightBorderNode,LeafHandle leaf) {
|
auto* old_root = root.get();
|
||||||
|
LeafTraverser(nodeStore, expectReadOnly).traverseAndUpdateRoot(&root, beginIndex, endIndex, [this] (uint32_t nodeIndex, bool isRightBorderNode,LeafHandle leaf) {
|
||||||
traversor.calledExistingLeaf(leaf.node(), isRightBorderNode, nodeIndex);
|
traversor.calledExistingLeaf(leaf.node(), isRightBorderNode, nodeIndex);
|
||||||
}, [this] (uint32_t nodeIndex) -> Data {
|
}, [this] (uint32_t nodeIndex) -> Data {
|
||||||
return traversor.calledCreateLeaf(nodeIndex)->copy();
|
return traversor.calledCreateLeaf(nodeIndex)->copy();
|
||||||
});
|
}, [] (auto) {});
|
||||||
|
if (expectReadOnly) {
|
||||||
|
EXPECT_EQ(old_root, root.get());
|
||||||
|
} else {
|
||||||
|
EXPECT_NE(old_root, root.get());
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
TraversorMock traversor;
|
TraversorMock traversor;
|
||||||
};
|
};
|
||||||
|
|
||||||
TEST_F(DataTreeTest_TraverseLeaves, TraverseSingleLeafTree) {
|
TEST_F(LeafTraverserTest, TraverseSingleLeafTree) {
|
||||||
auto root = CreateLeaf();
|
unique_ref<DataNode> root = CreateLeaf();
|
||||||
EXPECT_TRAVERSE_LEAF(root->blockId(), true, 0);
|
EXPECT_TRAVERSE_LEAF(root->blockId(), true, 0);
|
||||||
|
|
||||||
TraverseLeaves(root.get(), 0, 1);
|
TraverseLeaves(std::move(root), 0, 1, true);
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_F(DataTreeTest_TraverseLeaves, TraverseNothingInSingleLeafTree1) {
|
TEST_F(LeafTraverserTest, TraverseNothingInSingleLeafTree1) {
|
||||||
auto root = CreateLeaf();
|
unique_ref<DataNode> root = CreateLeaf();
|
||||||
EXPECT_DONT_TRAVERSE_ANY_LEAVES();
|
EXPECT_DONT_TRAVERSE_ANY_LEAVES();
|
||||||
|
|
||||||
TraverseLeaves(root.get(), 0, 0);
|
TraverseLeaves(std::move(root), 0, 0, true);
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_F(DataTreeTest_TraverseLeaves, TraverseNothingInSingleLeafTree2) {
|
TEST_F(LeafTraverserTest, TraverseNothingInSingleLeafTree2) {
|
||||||
auto root = CreateLeaf();
|
unique_ref<DataNode> root = CreateLeaf();
|
||||||
EXPECT_DONT_TRAVERSE_ANY_LEAVES();
|
EXPECT_DONT_TRAVERSE_ANY_LEAVES();
|
||||||
|
|
||||||
TraverseLeaves(root.get(), 1, 1);
|
TraverseLeaves(std::move(root), 1, 1, true);
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_F(DataTreeTest_TraverseLeaves, TraverseFirstLeafOfFullTwolevelTree) {
|
TEST_F(LeafTraverserTest, TraverseFirstLeafOfFullTwolevelTree) {
|
||||||
auto root = CreateFullTwoLevel();
|
auto root = CreateFullTwoLevel();
|
||||||
EXPECT_TRAVERSE_LEAF(root->readChild(0).blockId(), false, 0);
|
EXPECT_TRAVERSE_LEAF(root->readChild(0).blockId(), false, 0);
|
||||||
|
|
||||||
TraverseLeaves(root.get(), 0, 1);
|
TraverseLeaves(std::move(root), 0, 1, true);
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_F(DataTreeTest_TraverseLeaves, TraverseMiddleLeafOfFullTwolevelTree) {
|
TEST_F(LeafTraverserTest, TraverseMiddleLeafOfFullTwolevelTree) {
|
||||||
auto root = CreateFullTwoLevel();
|
auto root = CreateFullTwoLevel();
|
||||||
EXPECT_TRAVERSE_LEAF(root->readChild(5).blockId(), false, 5);
|
EXPECT_TRAVERSE_LEAF(root->readChild(5).blockId(), false, 5);
|
||||||
|
|
||||||
TraverseLeaves(root.get(), 5, 6);
|
TraverseLeaves(std::move(root), 5, 6, true);
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_F(DataTreeTest_TraverseLeaves, TraverseLastLeafOfFullTwolevelTree) {
|
TEST_F(LeafTraverserTest, TraverseLastLeafOfFullTwolevelTree) {
|
||||||
auto root = CreateFullTwoLevel();
|
auto root = CreateFullTwoLevel();
|
||||||
EXPECT_TRAVERSE_LEAF(root->readChild(nodeStore->layout().maxChildrenPerInnerNode()-1).blockId(), true, nodeStore->layout().maxChildrenPerInnerNode()-1);
|
EXPECT_TRAVERSE_LEAF(root->readChild(nodeStore->layout().maxChildrenPerInnerNode()-1).blockId(), true, nodeStore->layout().maxChildrenPerInnerNode()-1);
|
||||||
|
|
||||||
TraverseLeaves(root.get(), nodeStore->layout().maxChildrenPerInnerNode()-1, nodeStore->layout().maxChildrenPerInnerNode());
|
TraverseLeaves(std::move(root), nodeStore->layout().maxChildrenPerInnerNode()-1, nodeStore->layout().maxChildrenPerInnerNode(), true);
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_F(DataTreeTest_TraverseLeaves, TraverseNothingInFullTwolevelTree1) {
|
TEST_F(LeafTraverserTest, TraverseNothingInFullTwolevelTree1) {
|
||||||
auto root = CreateFullTwoLevel();
|
auto root = CreateFullTwoLevel();
|
||||||
EXPECT_DONT_TRAVERSE_ANY_LEAVES();
|
EXPECT_DONT_TRAVERSE_ANY_LEAVES();
|
||||||
|
|
||||||
TraverseLeaves(root.get(), 0, 0);
|
TraverseLeaves(std::move(root), 0, 0, true);
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_F(DataTreeTest_TraverseLeaves, TraverseNothingInFullTwolevelTree2) {
|
TEST_F(LeafTraverserTest, TraverseNothingInFullTwolevelTree2) {
|
||||||
auto root = CreateFullTwoLevel();
|
auto root = CreateFullTwoLevel();
|
||||||
EXPECT_DONT_TRAVERSE_ANY_LEAVES();
|
EXPECT_DONT_TRAVERSE_ANY_LEAVES();
|
||||||
|
|
||||||
TraverseLeaves(root.get(), nodeStore->layout().maxChildrenPerInnerNode(), nodeStore->layout().maxChildrenPerInnerNode());
|
TraverseLeaves(std::move(root), nodeStore->layout().maxChildrenPerInnerNode(), nodeStore->layout().maxChildrenPerInnerNode(), true);
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_F(DataTreeTest_TraverseLeaves, TraverseFirstLeafOfThreeLevelMinDataTree) {
|
TEST_F(LeafTraverserTest, TraverseFirstLeafOfThreeLevelMinDataTree) {
|
||||||
auto root = CreateThreeLevelMinData();
|
auto root = CreateThreeLevelMinData();
|
||||||
EXPECT_TRAVERSE_LEAF(LoadInnerNode(root->readChild(0).blockId())->readChild(0).blockId(), false, 0);
|
EXPECT_TRAVERSE_LEAF(LoadInnerNode(root->readChild(0).blockId())->readChild(0).blockId(), false, 0);
|
||||||
|
|
||||||
TraverseLeaves(root.get(), 0, 1);
|
TraverseLeaves(std::move(root), 0, 1, true);
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_F(DataTreeTest_TraverseLeaves, TraverseMiddleLeafOfThreeLevelMinDataTree) {
|
TEST_F(LeafTraverserTest, TraverseMiddleLeafOfThreeLevelMinDataTree) {
|
||||||
auto root = CreateThreeLevelMinData();
|
auto root = CreateThreeLevelMinData();
|
||||||
EXPECT_TRAVERSE_LEAF(LoadInnerNode(root->readChild(0).blockId())->readChild(5).blockId(), false, 5);
|
EXPECT_TRAVERSE_LEAF(LoadInnerNode(root->readChild(0).blockId())->readChild(5).blockId(), false, 5);
|
||||||
|
|
||||||
TraverseLeaves(root.get(), 5, 6);
|
TraverseLeaves(std::move(root), 5, 6, true);
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_F(DataTreeTest_TraverseLeaves, TraverseLastLeafOfThreeLevelMinDataTree) {
|
TEST_F(LeafTraverserTest, TraverseLastLeafOfThreeLevelMinDataTree) {
|
||||||
auto root = CreateThreeLevelMinData();
|
auto root = CreateThreeLevelMinData();
|
||||||
EXPECT_TRAVERSE_LEAF(LoadInnerNode(root->readChild(1).blockId())->readChild(0).blockId(), true, nodeStore->layout().maxChildrenPerInnerNode());
|
EXPECT_TRAVERSE_LEAF(LoadInnerNode(root->readChild(1).blockId())->readChild(0).blockId(), true, nodeStore->layout().maxChildrenPerInnerNode());
|
||||||
|
|
||||||
TraverseLeaves(root.get(), nodeStore->layout().maxChildrenPerInnerNode(), nodeStore->layout().maxChildrenPerInnerNode()+1);
|
TraverseLeaves(std::move(root), nodeStore->layout().maxChildrenPerInnerNode(), nodeStore->layout().maxChildrenPerInnerNode()+1, true);
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_F(DataTreeTest_TraverseLeaves, TraverseAllLeavesOfFullTwolevelTree) {
|
TEST_F(LeafTraverserTest, TraverseAllLeavesOfFullTwolevelTree) {
|
||||||
auto root = CreateFullTwoLevel();
|
auto root = CreateFullTwoLevel();
|
||||||
EXPECT_TRAVERSE_ALL_CHILDREN_OF(*root, true, 0);
|
EXPECT_TRAVERSE_ALL_CHILDREN_OF(*root, true, 0);
|
||||||
|
|
||||||
TraverseLeaves(root.get(), 0, nodeStore->layout().maxChildrenPerInnerNode());
|
TraverseLeaves(std::move(root), 0, nodeStore->layout().maxChildrenPerInnerNode(), true);
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_F(DataTreeTest_TraverseLeaves, TraverseAllLeavesOfThreelevelMinDataTree) {
|
TEST_F(LeafTraverserTest, TraverseAllLeavesOfThreelevelMinDataTree) {
|
||||||
auto root = CreateThreeLevelMinData();
|
auto root = CreateThreeLevelMinData();
|
||||||
EXPECT_TRAVERSE_ALL_CHILDREN_OF(*LoadInnerNode(root->readChild(0).blockId()), false, 0);
|
EXPECT_TRAVERSE_ALL_CHILDREN_OF(*LoadInnerNode(root->readChild(0).blockId()), false, 0);
|
||||||
EXPECT_TRAVERSE_LEAF(LoadInnerNode(root->readChild(1).blockId())->readChild(0).blockId(), true, nodeStore->layout().maxChildrenPerInnerNode());
|
EXPECT_TRAVERSE_LEAF(LoadInnerNode(root->readChild(1).blockId())->readChild(0).blockId(), true, nodeStore->layout().maxChildrenPerInnerNode());
|
||||||
|
|
||||||
TraverseLeaves(root.get(), 0, nodeStore->layout().maxChildrenPerInnerNode()+1);
|
TraverseLeaves(std::move(root), 0, nodeStore->layout().maxChildrenPerInnerNode()+1, true);
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_F(DataTreeTest_TraverseLeaves, TraverseFirstChildOfThreelevelMinDataTree) {
|
TEST_F(LeafTraverserTest, TraverseFirstChildOfThreelevelMinDataTree) {
|
||||||
auto root = CreateThreeLevelMinData();
|
auto root = CreateThreeLevelMinData();
|
||||||
EXPECT_TRAVERSE_ALL_CHILDREN_OF(*LoadInnerNode(root->readChild(0).blockId()), false, 0);
|
EXPECT_TRAVERSE_ALL_CHILDREN_OF(*LoadInnerNode(root->readChild(0).blockId()), false, 0);
|
||||||
|
|
||||||
TraverseLeaves(root.get(), 0, nodeStore->layout().maxChildrenPerInnerNode());
|
TraverseLeaves(std::move(root), 0, nodeStore->layout().maxChildrenPerInnerNode(), true);
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_F(DataTreeTest_TraverseLeaves, TraverseFirstPartOfFullTwolevelTree) {
|
TEST_F(LeafTraverserTest, TraverseFirstPartOfFullTwolevelTree) {
|
||||||
auto root = CreateFullTwoLevel();
|
auto root = CreateFullTwoLevel();
|
||||||
for (unsigned int i = 0; i < 5; ++i) {
|
for (unsigned int i = 0; i < 5; ++i) {
|
||||||
EXPECT_TRAVERSE_LEAF(root->readChild(i).blockId(), false, i);
|
EXPECT_TRAVERSE_LEAF(root->readChild(i).blockId(), false, i);
|
||||||
}
|
}
|
||||||
|
|
||||||
TraverseLeaves(root.get(), 0, 5);
|
TraverseLeaves(std::move(root), 0, 5, true);
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_F(DataTreeTest_TraverseLeaves, TraverseInnerPartOfFullTwolevelTree) {
|
TEST_F(LeafTraverserTest, TraverseInnerPartOfFullTwolevelTree) {
|
||||||
auto root = CreateFullTwoLevel();
|
auto root = CreateFullTwoLevel();
|
||||||
for (unsigned int i = 5; i < 10; ++i) {
|
for (unsigned int i = 5; i < 10; ++i) {
|
||||||
EXPECT_TRAVERSE_LEAF(root->readChild(i).blockId(), false, i);
|
EXPECT_TRAVERSE_LEAF(root->readChild(i).blockId(), false, i);
|
||||||
}
|
}
|
||||||
|
|
||||||
TraverseLeaves(root.get(), 5, 10);
|
TraverseLeaves(std::move(root), 5, 10, true);
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_F(DataTreeTest_TraverseLeaves, TraverseLastPartOfFullTwolevelTree) {
|
TEST_F(LeafTraverserTest, TraverseLastPartOfFullTwolevelTree) {
|
||||||
auto root = CreateFullTwoLevel();
|
auto root = CreateFullTwoLevel();
|
||||||
for (unsigned int i = 5; i < nodeStore->layout().maxChildrenPerInnerNode(); ++i) {
|
for (unsigned int i = 5; i < nodeStore->layout().maxChildrenPerInnerNode(); ++i) {
|
||||||
EXPECT_TRAVERSE_LEAF(root->readChild(i).blockId(), i==nodeStore->layout().maxChildrenPerInnerNode()-1, i);
|
EXPECT_TRAVERSE_LEAF(root->readChild(i).blockId(), i==nodeStore->layout().maxChildrenPerInnerNode()-1, i);
|
||||||
}
|
}
|
||||||
|
|
||||||
TraverseLeaves(root.get(), 5, nodeStore->layout().maxChildrenPerInnerNode());
|
TraverseLeaves(std::move(root), 5, nodeStore->layout().maxChildrenPerInnerNode(), true);
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_F(DataTreeTest_TraverseLeaves, TraverseFirstPartOfThreelevelMinDataTree) {
|
TEST_F(LeafTraverserTest, TraverseFirstPartOfThreelevelMinDataTree) {
|
||||||
auto root = CreateThreeLevelMinData();
|
auto root = CreateThreeLevelMinData();
|
||||||
auto node = LoadInnerNode(root->readChild(0).blockId());
|
auto node = LoadInnerNode(root->readChild(0).blockId());
|
||||||
for (unsigned int i = 0; i < 5; ++i) {
|
for (unsigned int i = 0; i < 5; ++i) {
|
||||||
EXPECT_TRAVERSE_LEAF(node->readChild(i).blockId(), false, i);
|
EXPECT_TRAVERSE_LEAF(node->readChild(i).blockId(), false, i);
|
||||||
}
|
}
|
||||||
|
|
||||||
TraverseLeaves(root.get(), 0, 5);
|
TraverseLeaves(std::move(root), 0, 5, true);
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_F(DataTreeTest_TraverseLeaves, TraverseInnerPartOfThreelevelMinDataTree) {
|
TEST_F(LeafTraverserTest, TraverseInnerPartOfThreelevelMinDataTree) {
|
||||||
auto root = CreateThreeLevelMinData();
|
auto root = CreateThreeLevelMinData();
|
||||||
auto node = LoadInnerNode(root->readChild(0).blockId());
|
auto node = LoadInnerNode(root->readChild(0).blockId());
|
||||||
for (unsigned int i = 5; i < 10; ++i) {
|
for (unsigned int i = 5; i < 10; ++i) {
|
||||||
EXPECT_TRAVERSE_LEAF(node->readChild(i).blockId(), false, i);
|
EXPECT_TRAVERSE_LEAF(node->readChild(i).blockId(), false, i);
|
||||||
}
|
}
|
||||||
|
|
||||||
TraverseLeaves(root.get(), 5, 10);
|
TraverseLeaves(std::move(root), 5, 10, true);
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_F(DataTreeTest_TraverseLeaves, TraverseLastPartOfThreelevelMinDataTree) {
|
TEST_F(LeafTraverserTest, TraverseLastPartOfThreelevelMinDataTree) {
|
||||||
auto root = CreateThreeLevelMinData();
|
auto root = CreateThreeLevelMinData();
|
||||||
auto node = LoadInnerNode(root->readChild(0).blockId());
|
auto node = LoadInnerNode(root->readChild(0).blockId());
|
||||||
for (unsigned int i = 5; i < nodeStore->layout().maxChildrenPerInnerNode(); ++i) {
|
for (unsigned int i = 5; i < nodeStore->layout().maxChildrenPerInnerNode(); ++i) {
|
||||||
@ -237,33 +245,33 @@ TEST_F(DataTreeTest_TraverseLeaves, TraverseLastPartOfThreelevelMinDataTree) {
|
|||||||
}
|
}
|
||||||
EXPECT_TRAVERSE_LEAF(LoadInnerNode(root->readChild(1).blockId())->readChild(0).blockId(), true, nodeStore->layout().maxChildrenPerInnerNode());
|
EXPECT_TRAVERSE_LEAF(LoadInnerNode(root->readChild(1).blockId())->readChild(0).blockId(), true, nodeStore->layout().maxChildrenPerInnerNode());
|
||||||
|
|
||||||
TraverseLeaves(root.get(), 5, nodeStore->layout().maxChildrenPerInnerNode()+1);
|
TraverseLeaves(std::move(root), 5, nodeStore->layout().maxChildrenPerInnerNode()+1, true);
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_F(DataTreeTest_TraverseLeaves, TraverseFirstLeafOfThreelevelTree) {
|
TEST_F(LeafTraverserTest, TraverseFirstLeafOfThreelevelTree) {
|
||||||
auto root = CreateThreeLevel();
|
auto root = CreateThreeLevel();
|
||||||
EXPECT_TRAVERSE_LEAF(LoadInnerNode(root->readChild(0).blockId())->readChild(0).blockId(), false, 0);
|
EXPECT_TRAVERSE_LEAF(LoadInnerNode(root->readChild(0).blockId())->readChild(0).blockId(), false, 0);
|
||||||
|
|
||||||
TraverseLeaves(root.get(), 0, 1);
|
TraverseLeaves(std::move(root), 0, 1, true);
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_F(DataTreeTest_TraverseLeaves, TraverseLastLeafOfThreelevelTree) {
|
TEST_F(LeafTraverserTest, TraverseLastLeafOfThreelevelTree) {
|
||||||
auto root = CreateThreeLevel();
|
auto root = CreateThreeLevel();
|
||||||
uint32_t numLeaves = nodeStore->layout().maxChildrenPerInnerNode() * 5 + 3;
|
uint32_t numLeaves = nodeStore->layout().maxChildrenPerInnerNode() * 5 + 3;
|
||||||
EXPECT_TRAVERSE_LEAF(LoadInnerNode(root->readLastChild().blockId())->readLastChild().blockId(), true, numLeaves-1);
|
EXPECT_TRAVERSE_LEAF(LoadInnerNode(root->readLastChild().blockId())->readLastChild().blockId(), true, numLeaves-1);
|
||||||
|
|
||||||
TraverseLeaves(root.get(), numLeaves-1, numLeaves);
|
TraverseLeaves(std::move(root), numLeaves-1, numLeaves, true);
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_F(DataTreeTest_TraverseLeaves, TraverseMiddleLeafOfThreelevelTree) {
|
TEST_F(LeafTraverserTest, TraverseMiddleLeafOfThreelevelTree) {
|
||||||
auto root = CreateThreeLevel();
|
auto root = CreateThreeLevel();
|
||||||
uint32_t wantedLeafIndex = nodeStore->layout().maxChildrenPerInnerNode() * 2 + 5;
|
uint32_t wantedLeafIndex = nodeStore->layout().maxChildrenPerInnerNode() * 2 + 5;
|
||||||
EXPECT_TRAVERSE_LEAF(LoadInnerNode(root->readChild(2).blockId())->readChild(5).blockId(), false, wantedLeafIndex);
|
EXPECT_TRAVERSE_LEAF(LoadInnerNode(root->readChild(2).blockId())->readChild(5).blockId(), false, wantedLeafIndex);
|
||||||
|
|
||||||
TraverseLeaves(root.get(), wantedLeafIndex, wantedLeafIndex+1);
|
TraverseLeaves(std::move(root), wantedLeafIndex, wantedLeafIndex+1, true);
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_F(DataTreeTest_TraverseLeaves, TraverseFirstPartOfThreelevelTree) {
|
TEST_F(LeafTraverserTest, TraverseFirstPartOfThreelevelTree) {
|
||||||
auto root = CreateThreeLevel();
|
auto root = CreateThreeLevel();
|
||||||
//Traverse all leaves in the first two children of the root
|
//Traverse all leaves in the first two children of the root
|
||||||
for(unsigned int i = 0; i < 2; ++i) {
|
for(unsigned int i = 0; i < 2; ++i) {
|
||||||
@ -275,10 +283,10 @@ TEST_F(DataTreeTest_TraverseLeaves, TraverseFirstPartOfThreelevelTree) {
|
|||||||
EXPECT_TRAVERSE_LEAF(child->readChild(i).blockId(), false, 2 * nodeStore->layout().maxChildrenPerInnerNode() + i);
|
EXPECT_TRAVERSE_LEAF(child->readChild(i).blockId(), false, 2 * nodeStore->layout().maxChildrenPerInnerNode() + i);
|
||||||
}
|
}
|
||||||
|
|
||||||
TraverseLeaves(root.get(), 0, 2 * nodeStore->layout().maxChildrenPerInnerNode() + 5);
|
TraverseLeaves(std::move(root), 0, 2 * nodeStore->layout().maxChildrenPerInnerNode() + 5, true);
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_F(DataTreeTest_TraverseLeaves, TraverseMiddlePartOfThreelevelTree_OnlyFullChildren) {
|
TEST_F(LeafTraverserTest, TraverseMiddlePartOfThreelevelTree_OnlyFullChildren) {
|
||||||
auto root = CreateThreeLevel();
|
auto root = CreateThreeLevel();
|
||||||
//Traverse some of the leaves in the second child of the root
|
//Traverse some of the leaves in the second child of the root
|
||||||
auto child = LoadInnerNode(root->readChild(1).blockId());
|
auto child = LoadInnerNode(root->readChild(1).blockId());
|
||||||
@ -295,10 +303,10 @@ TEST_F(DataTreeTest_TraverseLeaves, TraverseMiddlePartOfThreelevelTree_OnlyFullC
|
|||||||
EXPECT_TRAVERSE_LEAF(child->readChild(i).blockId(), false, 4 * nodeStore->layout().maxChildrenPerInnerNode() + i);
|
EXPECT_TRAVERSE_LEAF(child->readChild(i).blockId(), false, 4 * nodeStore->layout().maxChildrenPerInnerNode() + i);
|
||||||
}
|
}
|
||||||
|
|
||||||
TraverseLeaves(root.get(), nodeStore->layout().maxChildrenPerInnerNode() + 5, 4 * nodeStore->layout().maxChildrenPerInnerNode() + 5);
|
TraverseLeaves(std::move(root), nodeStore->layout().maxChildrenPerInnerNode() + 5, 4 * nodeStore->layout().maxChildrenPerInnerNode() + 5, true);
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_F(DataTreeTest_TraverseLeaves, TraverseMiddlePartOfThreelevelTree_AlsoLastNonfullChild) {
|
TEST_F(LeafTraverserTest, TraverseMiddlePartOfThreelevelTree_AlsoLastNonfullChild) {
|
||||||
auto root = CreateThreeLevel();
|
auto root = CreateThreeLevel();
|
||||||
//Traverse some of the leaves in the second child of the root
|
//Traverse some of the leaves in the second child of the root
|
||||||
auto child = LoadInnerNode(root->readChild(1).blockId());
|
auto child = LoadInnerNode(root->readChild(1).blockId());
|
||||||
@ -315,10 +323,10 @@ TEST_F(DataTreeTest_TraverseLeaves, TraverseMiddlePartOfThreelevelTree_AlsoLastN
|
|||||||
EXPECT_TRAVERSE_LEAF(child->readChild(i).blockId(), false, 5 * nodeStore->layout().maxChildrenPerInnerNode() + i);
|
EXPECT_TRAVERSE_LEAF(child->readChild(i).blockId(), false, 5 * nodeStore->layout().maxChildrenPerInnerNode() + i);
|
||||||
}
|
}
|
||||||
|
|
||||||
TraverseLeaves(root.get(), nodeStore->layout().maxChildrenPerInnerNode() + 5, 5 * nodeStore->layout().maxChildrenPerInnerNode() + 2);
|
TraverseLeaves(std::move(root), nodeStore->layout().maxChildrenPerInnerNode() + 5, 5 * nodeStore->layout().maxChildrenPerInnerNode() + 2, true);
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_F(DataTreeTest_TraverseLeaves, TraverseLastPartOfThreelevelTree) {
|
TEST_F(LeafTraverserTest, TraverseLastPartOfThreelevelTree) {
|
||||||
auto root = CreateThreeLevel();
|
auto root = CreateThreeLevel();
|
||||||
//Traverse some of the leaves in the second child of the root
|
//Traverse some of the leaves in the second child of the root
|
||||||
auto child = LoadInnerNode(root->readChild(1).blockId());
|
auto child = LoadInnerNode(root->readChild(1).blockId());
|
||||||
@ -335,10 +343,10 @@ TEST_F(DataTreeTest_TraverseLeaves, TraverseLastPartOfThreelevelTree) {
|
|||||||
EXPECT_TRAVERSE_LEAF(child->readChild(i).blockId(), i == child->numChildren()-1, 5 * nodeStore->layout().maxChildrenPerInnerNode() + i);
|
EXPECT_TRAVERSE_LEAF(child->readChild(i).blockId(), i == child->numChildren()-1, 5 * nodeStore->layout().maxChildrenPerInnerNode() + i);
|
||||||
}
|
}
|
||||||
|
|
||||||
TraverseLeaves(root.get(), nodeStore->layout().maxChildrenPerInnerNode() + 5, 5 * nodeStore->layout().maxChildrenPerInnerNode() + child->numChildren());
|
TraverseLeaves(std::move(root), nodeStore->layout().maxChildrenPerInnerNode() + 5, 5 * nodeStore->layout().maxChildrenPerInnerNode() + child->numChildren(), true);
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_F(DataTreeTest_TraverseLeaves, TraverseAllLeavesOfThreelevelTree) {
|
TEST_F(LeafTraverserTest, TraverseAllLeavesOfThreelevelTree) {
|
||||||
auto root = CreateThreeLevel();
|
auto root = CreateThreeLevel();
|
||||||
//Traverse all leaves in the third, fourth and fifth child of the root
|
//Traverse all leaves in the third, fourth and fifth child of the root
|
||||||
for(unsigned int i = 0; i < 5; ++i) {
|
for(unsigned int i = 0; i < 5; ++i) {
|
||||||
@ -350,10 +358,10 @@ TEST_F(DataTreeTest_TraverseLeaves, TraverseAllLeavesOfThreelevelTree) {
|
|||||||
EXPECT_TRAVERSE_LEAF(child->readChild(i).blockId(), i==child->numChildren()-1, 5 * nodeStore->layout().maxChildrenPerInnerNode() + i);
|
EXPECT_TRAVERSE_LEAF(child->readChild(i).blockId(), i==child->numChildren()-1, 5 * nodeStore->layout().maxChildrenPerInnerNode() + i);
|
||||||
}
|
}
|
||||||
|
|
||||||
TraverseLeaves(root.get(), 0, 5 * nodeStore->layout().maxChildrenPerInnerNode() + child->numChildren());
|
TraverseLeaves(std::move(root), 0, 5 * nodeStore->layout().maxChildrenPerInnerNode() + child->numChildren(), true);
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_F(DataTreeTest_TraverseLeaves, TraverseAllLeavesOfFourLevelTree) {
|
TEST_F(LeafTraverserTest, TraverseAllLeavesOfFourLevelTree) {
|
||||||
auto root = CreateFourLevel();
|
auto root = CreateFourLevel();
|
||||||
//Traverse all leaves of the full threelevel tree in the first child
|
//Traverse all leaves of the full threelevel tree in the first child
|
||||||
auto firstChild = LoadInnerNode(root->readChild(0).blockId());
|
auto firstChild = LoadInnerNode(root->readChild(0).blockId());
|
||||||
@ -370,10 +378,10 @@ TEST_F(DataTreeTest_TraverseLeaves, TraverseAllLeavesOfFourLevelTree) {
|
|||||||
EXPECT_TRAVERSE_ALL_CHILDREN_OF(*LoadInnerNode(thirdChild->readChild(0).blockId()), false, 2 * nodeStore->layout().maxChildrenPerInnerNode() * nodeStore->layout().maxChildrenPerInnerNode());
|
EXPECT_TRAVERSE_ALL_CHILDREN_OF(*LoadInnerNode(thirdChild->readChild(0).blockId()), false, 2 * nodeStore->layout().maxChildrenPerInnerNode() * nodeStore->layout().maxChildrenPerInnerNode());
|
||||||
EXPECT_TRAVERSE_LEAF(LoadInnerNode(thirdChild->readChild(1).blockId())->readChild(0).blockId(), true, 2 * nodeStore->layout().maxChildrenPerInnerNode() * nodeStore->layout().maxChildrenPerInnerNode() + nodeStore->layout().maxChildrenPerInnerNode());
|
EXPECT_TRAVERSE_LEAF(LoadInnerNode(thirdChild->readChild(1).blockId())->readChild(0).blockId(), true, 2 * nodeStore->layout().maxChildrenPerInnerNode() * nodeStore->layout().maxChildrenPerInnerNode() + nodeStore->layout().maxChildrenPerInnerNode());
|
||||||
|
|
||||||
TraverseLeaves(root.get(), 0, 2*nodeStore->layout().maxChildrenPerInnerNode()*nodeStore->layout().maxChildrenPerInnerNode() + nodeStore->layout().maxChildrenPerInnerNode() + 1);
|
TraverseLeaves(std::move(root), 0, 2*nodeStore->layout().maxChildrenPerInnerNode()*nodeStore->layout().maxChildrenPerInnerNode() + nodeStore->layout().maxChildrenPerInnerNode() + 1, true);
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_F(DataTreeTest_TraverseLeaves, TraverseMiddlePartOfFourLevelTree) {
|
TEST_F(LeafTraverserTest, TraverseMiddlePartOfFourLevelTree) {
|
||||||
auto root = CreateFourLevel();
|
auto root = CreateFourLevel();
|
||||||
//Traverse some leaves of the full threelevel tree in the first child
|
//Traverse some leaves of the full threelevel tree in the first child
|
||||||
auto firstChild = LoadInnerNode(root->readChild(0).blockId());
|
auto firstChild = LoadInnerNode(root->readChild(0).blockId());
|
||||||
@ -396,14 +404,15 @@ TEST_F(DataTreeTest_TraverseLeaves, TraverseMiddlePartOfFourLevelTree) {
|
|||||||
EXPECT_TRAVERSE_LEAF(firstChildOfThirdChild->readChild(i).blockId(), false, 2 * nodeStore->layout().maxChildrenPerInnerNode()*nodeStore->layout().maxChildrenPerInnerNode()+i);
|
EXPECT_TRAVERSE_LEAF(firstChildOfThirdChild->readChild(i).blockId(), false, 2 * nodeStore->layout().maxChildrenPerInnerNode()*nodeStore->layout().maxChildrenPerInnerNode()+i);
|
||||||
}
|
}
|
||||||
|
|
||||||
TraverseLeaves(root.get(), nodeStore->layout().maxChildrenPerInnerNode()+5, 2*nodeStore->layout().maxChildrenPerInnerNode()*nodeStore->layout().maxChildrenPerInnerNode() + nodeStore->layout().maxChildrenPerInnerNode() -1);
|
TraverseLeaves(std::move(root), nodeStore->layout().maxChildrenPerInnerNode()+5, 2*nodeStore->layout().maxChildrenPerInnerNode()*nodeStore->layout().maxChildrenPerInnerNode() + nodeStore->layout().maxChildrenPerInnerNode() -1, true);
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_F(DataTreeTest_TraverseLeaves, LastLeafIsAlreadyResizedInCallback) {
|
TEST_F(LeafTraverserTest, LastLeafIsAlreadyResizedInCallback) {
|
||||||
auto root = CreateLeaf();
|
unique_ref<DataNode> root = CreateLeaf();
|
||||||
root->flush();
|
root->flush();
|
||||||
|
auto* old_root = root.get();
|
||||||
auto tree = treeStore.load(root->blockId()).value();
|
auto tree = treeStore.load(root->blockId()).value();
|
||||||
tree->traverseLeaves(0, 2, [this] (uint32_t leafIndex, bool /*isRightBorderNode*/, LeafHandle leaf) {
|
LeafTraverser(nodeStore, false).traverseAndUpdateRoot(&root, 0, 2, [this] (uint32_t leafIndex, bool /*isRightBorderNode*/, LeafHandle leaf) {
|
||||||
if (leafIndex == 0) {
|
if (leafIndex == 0) {
|
||||||
EXPECT_EQ(nodeStore->layout().maxBytesPerLeaf(), leaf.node()->numBytes());
|
EXPECT_EQ(nodeStore->layout().maxBytesPerLeaf(), leaf.node()->numBytes());
|
||||||
} else {
|
} else {
|
||||||
@ -411,28 +420,31 @@ TEST_F(DataTreeTest_TraverseLeaves, LastLeafIsAlreadyResizedInCallback) {
|
|||||||
}
|
}
|
||||||
}, [] (uint32_t /*nodeIndex*/) -> Data {
|
}, [] (uint32_t /*nodeIndex*/) -> Data {
|
||||||
return Data(1);
|
return Data(1);
|
||||||
});
|
}, [] (auto) {});
|
||||||
|
EXPECT_NE(old_root, root.get()); // expect that we grew the tree
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_F(DataTreeTest_TraverseLeaves, LastLeafIsAlreadyResizedInCallback_TwoLevel) {
|
TEST_F(LeafTraverserTest, LastLeafIsAlreadyResizedInCallback_TwoLevel) {
|
||||||
auto root = CreateFullTwoLevelWithLastLeafSize(5);
|
unique_ref<DataNode> root = CreateFullTwoLevelWithLastLeafSize(5);
|
||||||
root->flush();
|
root->flush();
|
||||||
|
auto* old_root = root.get();
|
||||||
auto tree = treeStore.load(root->blockId()).value();
|
auto tree = treeStore.load(root->blockId()).value();
|
||||||
tree->traverseLeaves(0, nodeStore->layout().maxChildrenPerInnerNode()+1, [this] (uint32_t /*leafIndex*/, bool /*isRightBorderNode*/, LeafHandle leaf) {
|
LeafTraverser(nodeStore, false).traverseAndUpdateRoot(&root, 0, nodeStore->layout().maxChildrenPerInnerNode()+1, [this] (uint32_t /*leafIndex*/, bool /*isRightBorderNode*/, LeafHandle leaf) {
|
||||||
EXPECT_EQ(nodeStore->layout().maxBytesPerLeaf(), leaf.node()->numBytes());
|
EXPECT_EQ(nodeStore->layout().maxBytesPerLeaf(), leaf.node()->numBytes());
|
||||||
}, [] (uint32_t /*nodeIndex*/) -> Data {
|
}, [] (uint32_t /*nodeIndex*/) -> Data {
|
||||||
return Data(1);
|
return Data(1);
|
||||||
});
|
}, [] (auto) {});
|
||||||
|
EXPECT_NE(old_root, root.get()); // expect that we grew the tree
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_F(DataTreeTest_TraverseLeaves, ResizeFromOneLeafToMultipleLeaves) {
|
TEST_F(LeafTraverserTest, ResizeFromOneLeafToMultipleLeaves) {
|
||||||
auto root = CreateLeaf();
|
auto root = CreateLeaf();
|
||||||
EXPECT_TRAVERSE_LEAF(root->blockId(), false, 0);
|
EXPECT_TRAVERSE_LEAF(root->blockId(), false, 0);
|
||||||
//EXPECT_CALL(traversor, calledExistingLeaf(_, false, 0)).Times(1);
|
//EXPECT_CALL(traversor, calledExistingLeaf(_, false, 0)).Times(1);
|
||||||
for (uint32_t i = 1; i < 10; ++i) {
|
for (uint32_t i = 1; i < 10; ++i) {
|
||||||
EXPECT_CREATE_LEAF(i);
|
EXPECT_CREATE_LEAF(i);
|
||||||
}
|
}
|
||||||
TraverseLeaves(root.get(), 0, 10);
|
TraverseLeaves(std::move(root), 0, 10, false);
|
||||||
}
|
}
|
||||||
|
|
||||||
//TODO Refactor the test cases that are too long
|
////TODO Refactor the test cases that are too long
|
Loading…
Reference in New Issue
Block a user