2015-01-23 04:39:36 +01:00
# include "DataTree.h"
2015-02-17 00:40:34 +01:00
# include "../datanodestore/DataNodeStore.h"
# include "../datanodestore/DataInnerNode.h"
# include "../datanodestore/DataLeafNode.h"
2015-02-26 20:19:12 +01:00
# include "../utils/Math.h"
2015-01-23 04:39:36 +01:00
2015-02-21 01:58:23 +01:00
# include "impl/algorithms.h"
2015-01-26 00:38:06 +01:00
2015-06-21 17:43:45 +02:00
# include "messmer/cpp-utils/pointer/cast.h"
# include "messmer/cpp-utils/pointer/optional_ownership_ptr.h"
2015-02-25 01:31:16 +01:00
# include <cmath>
2015-07-22 13:42:44 +02:00
# include <messmer/cpp-utils/assert/assert.h>
2015-01-23 04:39:36 +01:00
using blockstore : : Key ;
using blobstore : : onblocks : : datanodestore : : DataNodeStore ;
using blobstore : : onblocks : : datanodestore : : DataNode ;
using blobstore : : onblocks : : datanodestore : : DataInnerNode ;
using blobstore : : onblocks : : datanodestore : : DataLeafNode ;
2015-02-26 20:19:12 +01:00
using blobstore : : onblocks : : datanodestore : : DataNodeLayout ;
2015-01-23 04:39:36 +01:00
2015-01-23 18:32:26 +01:00
using std : : dynamic_pointer_cast ;
using std : : function ;
2015-04-09 16:10:57 +02:00
using boost : : shared_mutex ;
using boost : : shared_lock ;
using boost : : unique_lock ;
2015-06-26 15:59:18 +02:00
using boost : : none ;
2015-04-10 21:52:30 +02:00
using std : : vector ;
2015-01-23 04:39:36 +01:00
2015-02-17 00:40:34 +01:00
using cpputils : : dynamic_pointer_move ;
using cpputils : : optional_ownership_ptr ;
2015-02-26 17:33:47 +01:00
using cpputils : : WithOwnership ;
using cpputils : : WithoutOwnership ;
2015-06-26 15:59:18 +02:00
using cpputils : : unique_ref ;
2015-01-23 04:39:36 +01:00
namespace blobstore {
namespace onblocks {
namespace datatreestore {
2015-06-26 15:59:18 +02:00
DataTree : : DataTree ( DataNodeStore * nodeStore , unique_ref < DataNode > rootNode )
2015-04-09 16:10:57 +02:00
: _mutex ( ) , _nodeStore ( nodeStore ) , _rootNode ( std : : move ( rootNode ) ) {
2015-01-23 04:39:36 +01:00
}
DataTree : : ~ DataTree ( ) {
}
2015-02-21 01:58:23 +01:00
void DataTree : : removeLastDataLeaf ( ) {
2015-02-22 19:30:42 +01:00
auto deletePosOrNull = algorithms : : GetLowestRightBorderNodeWithMoreThanOneChildOrNull ( _nodeStore , _rootNode . get ( ) ) ;
2015-07-22 13:42:44 +02:00
ASSERT ( deletePosOrNull . get ( ) ! = nullptr , " Tree has only one leaf, can't shrink it. " ) ;
2015-02-22 19:30:42 +01:00
deleteLastChildSubtree ( deletePosOrNull . get ( ) ) ;
ifRootHasOnlyOneChildReplaceRootWithItsChild ( ) ;
}
void DataTree : : ifRootHasOnlyOneChildReplaceRootWithItsChild ( ) {
2015-02-21 01:58:23 +01:00
DataInnerNode * rootNode = dynamic_cast < DataInnerNode * > ( _rootNode . get ( ) ) ;
2015-07-22 13:42:44 +02:00
ASSERT ( rootNode ! = nullptr , " RootNode is not an inner node " ) ;
2015-02-22 19:30:42 +01:00
if ( rootNode - > numChildren ( ) = = 1 ) {
auto child = _nodeStore - > load ( rootNode - > getChild ( 0 ) - > key ( ) ) ;
2015-07-22 13:42:44 +02:00
ASSERT ( child ! = none , " Couldn't load first child of root node " ) ;
2015-06-26 15:59:18 +02:00
_rootNode = _nodeStore - > overwriteNodeWith ( std : : move ( _rootNode ) , * * child ) ;
_nodeStore - > remove ( std : : move ( * child ) ) ;
2015-02-22 19:30:42 +01:00
}
}
2015-02-21 01:58:23 +01:00
2015-02-22 19:30:42 +01:00
void DataTree : : deleteLastChildSubtree ( DataInnerNode * node ) {
2015-02-24 23:11:20 +01:00
auto lastChild = _nodeStore - > load ( node - > LastChild ( ) - > key ( ) ) ;
2015-07-22 13:42:44 +02:00
ASSERT ( lastChild ! = none , " Couldn't load last child " ) ;
2015-06-26 15:59:18 +02:00
_nodeStore - > removeSubtree ( std : : move ( * lastChild ) ) ;
2015-02-22 19:30:42 +01:00
node - > removeLastChild ( ) ;
}
2015-06-26 15:59:18 +02:00
unique_ref < DataLeafNode > DataTree : : addDataLeaf ( ) {
2015-02-21 01:58:23 +01:00
auto insertPosOrNull = algorithms : : GetLowestInnerRightBorderNodeWithLessThanKChildrenOrNull ( _nodeStore , _rootNode . get ( ) ) ;
2015-01-23 04:39:36 +01:00
if ( insertPosOrNull ) {
2015-01-23 18:32:26 +01:00
return addDataLeafAt ( insertPosOrNull . get ( ) ) ;
2015-01-23 04:39:36 +01:00
} else {
2015-01-23 18:32:26 +01:00
return addDataLeafToFullTree ( ) ;
2015-01-23 04:39:36 +01:00
}
}
2015-06-26 15:59:18 +02:00
unique_ref < DataLeafNode > DataTree : : addDataLeafAt ( DataInnerNode * insertPos ) {
2015-01-23 04:39:36 +01:00
auto new_leaf = _nodeStore - > createNewLeafNode ( ) ;
2015-01-23 18:32:26 +01:00
auto chain = createChainOfInnerNodes ( insertPos - > depth ( ) - 1 , new_leaf . get ( ) ) ;
insertPos - > addChild ( * chain ) ;
2015-01-23 04:39:36 +01:00
return new_leaf ;
}
2015-04-10 21:52:30 +02:00
optional_ownership_ptr < DataNode > DataTree : : createChainOfInnerNodes ( unsigned int num , DataNode * child ) {
2015-06-26 15:59:18 +02:00
//TODO This function is implemented twice, once with optional_ownership_ptr, once with unique_ref. Redundancy!
2015-04-10 21:52:30 +02:00
optional_ownership_ptr < DataNode > chain = cpputils : : WithoutOwnership < DataNode > ( child ) ;
2015-01-23 18:32:26 +01:00
for ( unsigned int i = 0 ; i < num ; + + i ) {
auto newnode = _nodeStore - > createNewInnerNode ( * chain ) ;
2015-07-21 15:24:49 +02:00
chain = cpputils : : WithOwnership < DataNode > ( std : : move ( newnode ) ) ;
2015-01-23 04:39:36 +01:00
}
return chain ;
}
2015-06-26 15:59:18 +02:00
unique_ref < DataNode > DataTree : : createChainOfInnerNodes ( unsigned int num , unique_ref < DataNode > child ) {
unique_ref < DataNode > chain = std : : move ( child ) ;
2015-04-10 21:52:30 +02:00
for ( unsigned int i = 0 ; i < num ; + + i ) {
chain = _nodeStore - > createNewInnerNode ( * chain ) ;
}
return chain ;
}
DataInnerNode * DataTree : : increaseTreeDepth ( unsigned int levels ) {
2015-07-22 13:42:44 +02:00
ASSERT ( levels > = 1 , " Parameter out of bounds: tried to increase tree depth by zero. " ) ;
2015-01-24 01:59:42 +01:00
auto copyOfOldRoot = _nodeStore - > createNewNodeAsCopyFrom ( * _rootNode ) ;
2015-04-10 21:52:30 +02:00
auto chain = createChainOfInnerNodes ( levels - 1 , copyOfOldRoot . get ( ) ) ;
auto newRootNode = DataNode : : convertToNewInnerNode ( std : : move ( _rootNode ) , * chain ) ;
DataInnerNode * result = newRootNode . get ( ) ;
2015-01-24 00:54:27 +01:00
_rootNode = std : : move ( newRootNode ) ;
2015-04-10 21:52:30 +02:00
return result ;
}
2015-06-26 15:59:18 +02:00
unique_ref < DataLeafNode > DataTree : : addDataLeafToFullTree ( ) {
2015-04-10 21:52:30 +02:00
DataInnerNode * rootNode = increaseTreeDepth ( 1 ) ;
auto newLeaf = addDataLeafAt ( rootNode ) ;
2015-01-24 00:54:27 +01:00
return newLeaf ;
2015-01-23 04:39:36 +01:00
}
2015-01-27 00:54:25 +01:00
const Key & DataTree : : key ( ) const {
return _rootNode - > key ( ) ;
}
2015-01-23 04:39:36 +01:00
2015-01-28 01:02:32 +01:00
void DataTree : : flush ( ) const {
2015-09-30 13:17:35 +02:00
// By grabbing a lock, we ensure that all modifying functions don't run currently and are therefore flushed
unique_lock < shared_mutex > lock ( _mutex ) ;
// We also have to flush the root node
2015-01-28 01:02:32 +01:00
_rootNode - > flush ( ) ;
}
2015-06-26 15:59:18 +02:00
unique_ref < DataNode > DataTree : : releaseRootNode ( ) {
2015-02-24 22:44:10 +01:00
return std : : move ( _rootNode ) ;
}
2015-04-10 22:57:29 +02:00
//TODO Test numLeaves(), for example also two configurations with same number of bytes but different number of leaves (last leaf has 0 bytes)
2015-04-10 21:52:30 +02:00
uint32_t DataTree : : numLeaves ( ) const {
2015-09-30 10:02:06 +02:00
shared_lock < shared_mutex > lock ( _mutex ) ;
2015-04-10 22:57:29 +02:00
return _numLeaves ( * _rootNode ) ;
}
uint32_t DataTree : : _numLeaves ( const DataNode & node ) const {
const DataLeafNode * leaf = dynamic_cast < const DataLeafNode * > ( & node ) ;
if ( leaf ! = nullptr ) {
return 1 ;
2015-04-10 21:52:30 +02:00
}
2015-04-10 22:57:29 +02:00
const DataInnerNode & inner = dynamic_cast < const DataInnerNode & > ( node ) ;
uint64_t numLeavesInLeftChildren = ( inner . numChildren ( ) - 1 ) * leavesPerFullChild ( inner ) ;
auto lastChild = _nodeStore - > load ( inner . LastChild ( ) - > key ( ) ) ;
2015-07-22 13:42:44 +02:00
ASSERT ( lastChild ! = none , " Couldn't load last child " ) ;
2015-06-26 15:59:18 +02:00
uint64_t numLeavesInRightChild = _numLeaves ( * * lastChild ) ;
2015-04-10 22:57:29 +02:00
return numLeavesInLeftChildren + numLeavesInRightChild ;
2015-03-04 03:17:59 +01:00
}
2015-04-10 21:52:30 +02:00
void DataTree : : traverseLeaves ( uint32_t beginIndex , uint32_t endIndex , function < void ( DataLeafNode * , uint32_t ) > func ) {
2015-12-14 17:18:12 +01:00
//TODO Can we traverse in parallel?
2015-09-30 10:06:53 +02:00
unique_lock < shared_mutex > lock ( _mutex ) ; //TODO Only lock when resizing. Otherwise parallel read/write to a blob is not possible!
2015-07-22 13:42:44 +02:00
ASSERT ( beginIndex < = endIndex , " Invalid parameters " ) ;
2015-12-11 12:48:24 +01:00
if ( 0 = = endIndex ) {
// In this case the utils::ceilLog(_, endIndex) below would fail
return ;
}
2015-04-10 21:52:30 +02:00
uint8_t neededTreeDepth = utils : : ceilLog ( _nodeStore - > layout ( ) . maxChildrenPerInnerNode ( ) , endIndex ) ;
2015-10-05 03:54:18 +02:00
uint32_t numLeaves = this - > _numLeaves ( * _rootNode ) ; // TODO Querying the size causes a tree traversal down to the leaves. Possible without querying the size?
2015-04-10 21:52:30 +02:00
if ( _rootNode - > depth ( ) < neededTreeDepth ) {
//TODO Test cases that actually increase it here by 0 level / 1 level / more than 1 level
increaseTreeDepth ( neededTreeDepth - _rootNode - > depth ( ) ) ;
}
2015-04-10 22:57:29 +02:00
if ( numLeaves < = beginIndex ) {
2015-04-10 21:52:30 +02:00
//TODO Test cases with numLeaves < / >= beginIndex
2015-04-10 22:57:29 +02:00
// There is a gap between the current size and the begin of the traversal
return _traverseLeaves ( _rootNode . get ( ) , 0 , numLeaves - 1 , endIndex , [ beginIndex , numLeaves , & func , this ] ( DataLeafNode * node , uint32_t index ) {
2015-04-10 21:52:30 +02:00
if ( index > = beginIndex ) {
func ( node , index ) ;
2015-04-10 22:57:29 +02:00
} else if ( index = = numLeaves - 1 ) {
// It is the old last leaf - resize it to maximum
node - > resize ( _nodeStore - > layout ( ) . maxBytesPerLeaf ( ) ) ;
}
} ) ;
} else if ( numLeaves < endIndex ) {
// We are starting traversal in the valid region, but traverse until after it (we grow new leaves)
return _traverseLeaves ( _rootNode . get ( ) , 0 , beginIndex , endIndex , [ numLeaves , & func , this ] ( DataLeafNode * node , uint32_t index ) {
if ( index = = numLeaves - 1 ) {
// It is the old last leaf - resize it to maximum
node - > resize ( _nodeStore - > layout ( ) . maxBytesPerLeaf ( ) ) ;
2015-04-10 21:52:30 +02:00
}
2015-04-10 22:57:29 +02:00
func ( node , index ) ;
2015-04-10 21:52:30 +02:00
} ) ;
} else {
2015-09-30 10:02:06 +02:00
//We are traversing entirely inside the valid region
2015-04-10 22:57:29 +02:00
_traverseLeaves ( _rootNode . get ( ) , 0 , beginIndex , endIndex , func ) ;
2015-04-10 21:52:30 +02:00
}
2015-02-25 01:31:16 +01:00
}
2015-04-10 21:52:30 +02:00
void DataTree : : _traverseLeaves ( DataNode * root , uint32_t leafOffset , uint32_t beginIndex , uint32_t endIndex , function < void ( DataLeafNode * , uint32_t ) > func ) {
DataLeafNode * leaf = dynamic_cast < DataLeafNode * > ( root ) ;
2015-02-25 01:31:16 +01:00
if ( leaf ! = nullptr ) {
2015-07-22 13:42:44 +02:00
ASSERT ( beginIndex < = 1 & & endIndex < = 1 , " If root node is a leaf, the (sub)tree has only one leaf - access indices must be 0 or 1. " ) ;
2015-02-25 01:31:16 +01:00
if ( beginIndex = = 0 & & endIndex = = 1 ) {
func ( leaf , leafOffset ) ;
}
return ;
}
2015-04-10 21:52:30 +02:00
DataInnerNode * inner = dynamic_cast < DataInnerNode * > ( root ) ;
2015-02-25 23:08:16 +01:00
uint32_t leavesPerChild = leavesPerFullChild ( * inner ) ;
2015-02-25 01:31:16 +01:00
uint32_t beginChild = beginIndex / leavesPerChild ;
2015-02-26 20:19:12 +01:00
uint32_t endChild = utils : : ceilDivision ( endIndex , leavesPerChild ) ;
2015-06-26 15:59:18 +02:00
vector < unique_ref < DataNode > > children = getOrCreateChildren ( inner , beginChild , endChild ) ;
2015-02-25 01:31:16 +01:00
for ( uint32_t childIndex = beginChild ; childIndex < endChild ; + + childIndex ) {
uint32_t childOffset = childIndex * leavesPerChild ;
2015-02-26 20:19:12 +01:00
uint32_t localBeginIndex = utils : : maxZeroSubtraction ( beginIndex , childOffset ) ;
2015-02-25 01:31:16 +01:00
uint32_t localEndIndex = std : : min ( leavesPerChild , endIndex - childOffset ) ;
2015-04-10 21:52:30 +02:00
auto child = std : : move ( children [ childIndex - beginChild ] ) ;
_traverseLeaves ( child . get ( ) , leafOffset + childOffset , localBeginIndex , localEndIndex , func ) ;
}
}
2015-06-26 15:59:18 +02:00
vector < unique_ref < DataNode > > DataTree : : getOrCreateChildren ( DataInnerNode * node , uint32_t begin , uint32_t end ) {
vector < unique_ref < DataNode > > children ;
2015-04-10 21:52:30 +02:00
children . reserve ( end - begin ) ;
for ( uint32_t childIndex = begin ; childIndex < std : : min ( node - > numChildren ( ) , end ) ; + + childIndex ) {
2015-06-26 15:59:18 +02:00
auto child = _nodeStore - > load ( node - > getChild ( childIndex ) - > key ( ) ) ;
2015-07-22 13:42:44 +02:00
ASSERT ( child ! = none , " Couldn't load child node " ) ;
2015-06-26 15:59:18 +02:00
children . emplace_back ( std : : move ( * child ) ) ;
2015-04-10 21:52:30 +02:00
}
for ( uint32_t childIndex = node - > numChildren ( ) ; childIndex < end ; + + childIndex ) {
2015-09-30 10:02:06 +02:00
//TODO This creates each child with one chain to one leaf only, and then on the next lower level it
// has to create the children for the child. Would be faster to directly create full trees if necessary.
2015-04-10 21:52:30 +02:00
children . emplace_back ( addChildTo ( node ) ) ;
2015-02-25 01:31:16 +01:00
}
2015-07-22 13:42:44 +02:00
ASSERT ( children . size ( ) = = end - begin , " Number of children in the result is wrong " ) ;
2015-04-10 21:52:30 +02:00
return children ;
}
2015-06-26 15:59:18 +02:00
unique_ref < DataNode > DataTree : : addChildTo ( DataInnerNode * node ) {
2015-04-10 21:52:30 +02:00
auto new_leaf = _nodeStore - > createNewLeafNode ( ) ;
new_leaf - > resize ( _nodeStore - > layout ( ) . maxBytesPerLeaf ( ) ) ;
auto chain = createChainOfInnerNodes ( node - > depth ( ) - 1 , std : : move ( new_leaf ) ) ;
node - > addChild ( * chain ) ;
return std : : move ( chain ) ;
2015-02-25 01:31:16 +01:00
}
2015-01-23 04:39:36 +01:00
2015-02-25 23:08:16 +01:00
uint32_t DataTree : : leavesPerFullChild ( const DataInnerNode & root ) const {
2015-12-11 00:18:17 +01:00
return utils : : intPow ( _nodeStore - > layout ( ) . maxChildrenPerInnerNode ( ) , ( uint32_t ) root . depth ( ) - 1 ) ;
2015-02-25 23:08:16 +01:00
}
uint64_t DataTree : : numStoredBytes ( ) const {
2015-04-09 16:30:36 +02:00
shared_lock < shared_mutex > lock ( _mutex ) ;
return _numStoredBytes ( ) ;
2015-02-25 23:08:16 +01:00
}
2015-04-09 16:30:36 +02:00
uint64_t DataTree : : _numStoredBytes ( ) const {
return _numStoredBytes ( * _rootNode ) ;
}
uint64_t DataTree : : _numStoredBytes ( const DataNode & root ) const {
2015-02-25 23:08:16 +01:00
const DataLeafNode * leaf = dynamic_cast < const DataLeafNode * > ( & root ) ;
if ( leaf ! = nullptr ) {
return leaf - > numBytes ( ) ;
}
const DataInnerNode & inner = dynamic_cast < const DataInnerNode & > ( root ) ;
2015-02-26 17:04:02 +01:00
uint64_t numBytesInLeftChildren = ( inner . numChildren ( ) - 1 ) * leavesPerFullChild ( inner ) * _nodeStore - > layout ( ) . maxBytesPerLeaf ( ) ;
2015-02-25 23:08:16 +01:00
auto lastChild = _nodeStore - > load ( inner . LastChild ( ) - > key ( ) ) ;
2015-07-22 13:42:44 +02:00
ASSERT ( lastChild ! = none , " Couldn't load last child " ) ;
2015-06-26 15:59:18 +02:00
uint64_t numBytesInRightChild = _numStoredBytes ( * * lastChild ) ;
2015-02-25 23:08:16 +01:00
return numBytesInLeftChildren + numBytesInRightChild ;
}
2015-02-26 17:33:47 +01:00
void DataTree : : resizeNumBytes ( uint64_t newNumBytes ) {
2015-12-14 17:18:12 +01:00
//TODO Can we resize in parallel? Especially creating new blocks (i.e. encrypting them) is expensive and should be done in parallel.
2015-04-09 16:30:36 +02:00
boost : : upgrade_lock < shared_mutex > lock ( _mutex ) ;
{
boost : : upgrade_to_unique_lock < shared_mutex > exclusiveLock ( lock ) ;
//TODO Faster implementation possible (no addDataLeaf()/removeLastDataLeaf() in a loop, but directly resizing)
LastLeaf ( _rootNode . get ( ) ) - > resize ( _nodeStore - > layout ( ) . maxBytesPerLeaf ( ) ) ;
uint64_t currentNumBytes = _numStoredBytes ( ) ;
2015-07-22 13:42:44 +02:00
ASSERT ( currentNumBytes % _nodeStore - > layout ( ) . maxBytesPerLeaf ( ) = = 0 , " The last leaf is not a max data leaf, although we just resized it to be one. " ) ;
2015-04-09 16:30:36 +02:00
uint32_t currentNumLeaves = currentNumBytes / _nodeStore - > layout ( ) . maxBytesPerLeaf ( ) ;
2015-12-11 00:18:17 +01:00
uint32_t newNumLeaves = std : : max ( UINT64_C ( 1 ) , utils : : ceilDivision ( newNumBytes , _nodeStore - > layout ( ) . maxBytesPerLeaf ( ) ) ) ;
2015-04-09 16:30:36 +02:00
for ( uint32_t i = currentNumLeaves ; i < newNumLeaves ; + + i ) {
addDataLeaf ( ) - > resize ( _nodeStore - > layout ( ) . maxBytesPerLeaf ( ) ) ;
}
for ( uint32_t i = currentNumLeaves ; i > newNumLeaves ; - - i ) {
removeLastDataLeaf ( ) ;
}
uint32_t newLastLeafSize = newNumBytes - ( newNumLeaves - 1 ) * _nodeStore - > layout ( ) . maxBytesPerLeaf ( ) ;
LastLeaf ( _rootNode . get ( ) ) - > resize ( newLastLeafSize ) ;
2015-02-26 17:33:47 +01:00
}
2015-12-11 00:18:17 +01:00
ASSERT ( newNumBytes = = numStoredBytes ( ) , " We resized to the wrong number of bytes ( " + std : : to_string ( numStoredBytes ( ) ) + " instead of " + std : : to_string ( newNumBytes ) + " ) " ) ;
2015-02-26 17:33:47 +01:00
}
optional_ownership_ptr < DataLeafNode > DataTree : : LastLeaf ( DataNode * root ) {
DataLeafNode * leaf = dynamic_cast < DataLeafNode * > ( root ) ;
if ( leaf ! = nullptr ) {
return WithoutOwnership ( leaf ) ;
}
DataInnerNode * inner = dynamic_cast < DataInnerNode * > ( root ) ;
2015-06-26 15:59:18 +02:00
auto lastChild = _nodeStore - > load ( inner - > LastChild ( ) - > key ( ) ) ;
2015-07-22 13:42:44 +02:00
ASSERT ( lastChild ! = none , " Couldn't load last child " ) ;
2015-07-21 15:24:49 +02:00
return WithOwnership ( LastLeaf ( std : : move ( * lastChild ) ) ) ;
2015-02-26 17:33:47 +01:00
}
2015-06-26 15:59:18 +02:00
unique_ref < DataLeafNode > DataTree : : LastLeaf ( unique_ref < DataNode > root ) {
2015-02-26 17:33:47 +01:00
auto leaf = dynamic_pointer_move < DataLeafNode > ( root ) ;
2015-06-26 15:59:18 +02:00
if ( leaf ! = none ) {
return std : : move ( * leaf ) ;
2015-02-26 17:33:47 +01:00
}
auto inner = dynamic_pointer_move < DataInnerNode > ( root ) ;
2015-07-22 13:42:44 +02:00
ASSERT ( inner ! = none , " Root node is neither a leaf nor an inner node " ) ;
2015-06-26 15:59:18 +02:00
auto child = _nodeStore - > load ( ( * inner ) - > LastChild ( ) - > key ( ) ) ;
2015-07-22 13:42:44 +02:00
ASSERT ( child ! = none , " Couldn't load last child " ) ;
2015-06-26 15:59:18 +02:00
return LastLeaf ( std : : move ( * child ) ) ;
2015-02-26 17:33:47 +01:00
}
2015-12-11 00:18:17 +01:00
uint64_t DataTree : : maxBytesPerLeaf ( ) const {
2015-02-26 20:19:12 +01:00
return _nodeStore - > layout ( ) . maxBytesPerLeaf ( ) ;
}
2015-01-23 04:39:36 +01:00
}
}
}