libcryfs: switch to cryfs develop
This commit is contained in:
commit
0398d48b09
@ -16,6 +16,9 @@ Checks: |
|
|||||||
-cert-err60-cpp,
|
-cert-err60-cpp,
|
||||||
-bugprone-macro-parentheses,
|
-bugprone-macro-parentheses,
|
||||||
-bugprone-exception-escape,
|
-bugprone-exception-escape,
|
||||||
|
-bugprone-easily-swappable-parameters,
|
||||||
|
-bugprone-implicit-widening-of-multiplication-result,
|
||||||
|
-bugprone-narrowing-conversions,
|
||||||
-cppcoreguidelines-owning-memory,
|
-cppcoreguidelines-owning-memory,
|
||||||
-cppcoreguidelines-no-malloc,
|
-cppcoreguidelines-no-malloc,
|
||||||
-cppcoreguidelines-pro-type-const-cast,
|
-cppcoreguidelines-pro-type-const-cast,
|
||||||
@ -30,6 +33,7 @@ Checks: |
|
|||||||
-cppcoreguidelines-macro-usage,
|
-cppcoreguidelines-macro-usage,
|
||||||
-cppcoreguidelines-non-private-member-variables-in-classes,
|
-cppcoreguidelines-non-private-member-variables-in-classes,
|
||||||
-cppcoreguidelines-avoid-non-const-global-variables,
|
-cppcoreguidelines-avoid-non-const-global-variables,
|
||||||
|
-cppcoreguidelines-narrowing-conversions,
|
||||||
-clang-analyzer-optin.cplusplus.VirtualCall,
|
-clang-analyzer-optin.cplusplus.VirtualCall,
|
||||||
-clang-analyzer-cplusplus.NewDeleteLeaks,
|
-clang-analyzer-cplusplus.NewDeleteLeaks,
|
||||||
-misc-macro-parentheses,
|
-misc-macro-parentheses,
|
||||||
|
@ -1,8 +1,5 @@
|
|||||||
cmake_minimum_required(VERSION 3.10 FATAL_ERROR)
|
cmake_minimum_required(VERSION 3.10 FATAL_ERROR)
|
||||||
|
|
||||||
# TODO Remove this deprecated policy switch once we're on cmake 3.4 or later
|
|
||||||
cmake_policy(SET CMP0065 OLD)
|
|
||||||
|
|
||||||
# TODO Perf test:
|
# TODO Perf test:
|
||||||
# - try if setting CRYPTOPP_NATIVE_ARCH=ON and adding -march=native to the compile commands for cryfs source files makes a difference
|
# - try if setting CRYPTOPP_NATIVE_ARCH=ON and adding -march=native to the compile commands for cryfs source files makes a difference
|
||||||
# -> if yes, offer a cmake option to enable both of these
|
# -> if yes, offer a cmake option to enable both of these
|
||||||
@ -27,7 +24,7 @@ option(USE_IWYU "build with iwyu checks enabled" OFF)
|
|||||||
option(CLANG_TIDY_WARNINGS_AS_ERRORS "treat clang-tidy warnings as errors" OFF)
|
option(CLANG_TIDY_WARNINGS_AS_ERRORS "treat clang-tidy warnings as errors" OFF)
|
||||||
|
|
||||||
if (MSVC)
|
if (MSVC)
|
||||||
option(DOKAN_PATH "Location of the Dokan library, e.g. C:\\Program Files\\Dokan\\DokanLibrary-1.1.0" "")
|
option(DOKAN_PATH "Location of the Dokan library, e.g. C:\\Program Files\\Dokan\\DokanLibrary-2.0.6" "")
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
# Default value is to build in release mode but with debug symbols
|
# Default value is to build in release mode but with debug symbols
|
||||||
|
@ -7,7 +7,7 @@
|
|||||||
"inheritEnvironments": [ "msvc_x86" ],
|
"inheritEnvironments": [ "msvc_x86" ],
|
||||||
"buildRoot": "${env.USERPROFILE}\\CMakeBuilds\\${workspaceHash}\\build\\${name}",
|
"buildRoot": "${env.USERPROFILE}\\CMakeBuilds\\${workspaceHash}\\build\\${name}",
|
||||||
"installRoot": "${env.USERPROFILE}\\CMakeBuilds\\${workspaceHash}\\install\\${name}",
|
"installRoot": "${env.USERPROFILE}\\CMakeBuilds\\${workspaceHash}\\install\\${name}",
|
||||||
"cmakeCommandArgs": "-DBUILD_TESTING=on -DDOKAN_PATH=\"C:\\Program Files\\Dokan\\Dokan Library-1.3.0\"",
|
"cmakeCommandArgs": "-DBUILD_TESTING=on -DDOKAN_PATH=\"C:\\Program Files\\Dokan\\Dokan Library-2.0.6\"",
|
||||||
"buildCommandArgs": "-v",
|
"buildCommandArgs": "-v",
|
||||||
"ctestCommandArgs": ""
|
"ctestCommandArgs": ""
|
||||||
},
|
},
|
||||||
@ -18,7 +18,7 @@
|
|||||||
"inheritEnvironments": [ "msvc_x86" ],
|
"inheritEnvironments": [ "msvc_x86" ],
|
||||||
"buildRoot": "${env.USERPROFILE}\\CMakeBuilds\\${workspaceHash}\\build\\${name}",
|
"buildRoot": "${env.USERPROFILE}\\CMakeBuilds\\${workspaceHash}\\build\\${name}",
|
||||||
"installRoot": "${env.USERPROFILE}\\CMakeBuilds\\${workspaceHash}\\install\\${name}",
|
"installRoot": "${env.USERPROFILE}\\CMakeBuilds\\${workspaceHash}\\install\\${name}",
|
||||||
"cmakeCommandArgs": "-DBUILD_TESTING=on -DDOKAN_PATH=\"C:\\Program Files\\Dokan\\Dokan Library-1.3.0\"",
|
"cmakeCommandArgs": "-DBUILD_TESTING=on -DDOKAN_PATH=\"C:\\Program Files\\Dokan\\Dokan Library-2.0.6\"",
|
||||||
"buildCommandArgs": "-v",
|
"buildCommandArgs": "-v",
|
||||||
"ctestCommandArgs": ""
|
"ctestCommandArgs": ""
|
||||||
},
|
},
|
||||||
@ -29,7 +29,7 @@
|
|||||||
"inheritEnvironments": [ "msvc_x64_x64" ],
|
"inheritEnvironments": [ "msvc_x64_x64" ],
|
||||||
"buildRoot": "${env.USERPROFILE}\\CMakeBuilds\\${workspaceHash}\\build\\${name}",
|
"buildRoot": "${env.USERPROFILE}\\CMakeBuilds\\${workspaceHash}\\build\\${name}",
|
||||||
"installRoot": "${env.USERPROFILE}\\CMakeBuilds\\${workspaceHash}\\install\\${name}",
|
"installRoot": "${env.USERPROFILE}\\CMakeBuilds\\${workspaceHash}\\install\\${name}",
|
||||||
"cmakeCommandArgs": "-DBUILD_TESTING=on -DDOKAN_PATH=\"C:\\Program Files\\Dokan\\Dokan Library-1.3.0\"",
|
"cmakeCommandArgs": "-DBUILD_TESTING=on -DDOKAN_PATH=\"C:\\Program Files\\Dokan\\Dokan Library-2.0.6\"",
|
||||||
"buildCommandArgs": "-v",
|
"buildCommandArgs": "-v",
|
||||||
"ctestCommandArgs": ""
|
"ctestCommandArgs": ""
|
||||||
},
|
},
|
||||||
@ -40,7 +40,7 @@
|
|||||||
"inheritEnvironments": [ "msvc_x64_x64" ],
|
"inheritEnvironments": [ "msvc_x64_x64" ],
|
||||||
"buildRoot": "${env.USERPROFILE}\\CMakeBuilds\\${workspaceHash}\\build\\${name}",
|
"buildRoot": "${env.USERPROFILE}\\CMakeBuilds\\${workspaceHash}\\build\\${name}",
|
||||||
"installRoot": "${env.USERPROFILE}\\CMakeBuilds\\${workspaceHash}\\install\\${name}",
|
"installRoot": "${env.USERPROFILE}\\CMakeBuilds\\${workspaceHash}\\install\\${name}",
|
||||||
"cmakeCommandArgs": "-DBUILD_TESTING=on -DDOKAN_PATH=\"C:\\Program Files\\Dokan\\Dokan Library-1.3.0\"",
|
"cmakeCommandArgs": "-DBUILD_TESTING=on -DDOKAN_PATH=\"C:\\Program Files\\Dokan\\Dokan Library-2.0.6\"",
|
||||||
"buildCommandArgs": "-v",
|
"buildCommandArgs": "-v",
|
||||||
"ctestCommandArgs": ""
|
"ctestCommandArgs": ""
|
||||||
}
|
}
|
||||||
|
@ -1,3 +1,19 @@
|
|||||||
|
Version 0.12.0 (unreleased)
|
||||||
|
---------------
|
||||||
|
* Added a man page for `cryfs-unmount`
|
||||||
|
* Fixed small inaccuracy in calculation of free space in statvfs
|
||||||
|
* Use libcurl dependency from conan instead of requiring it to be preinstalled
|
||||||
|
* Updated dependencies to
|
||||||
|
* Fuse 2.9
|
||||||
|
* DokanY 2.0.6.1000
|
||||||
|
* range-v3/0.12.0
|
||||||
|
* boost 1.79
|
||||||
|
* spdlog/1.11.0
|
||||||
|
|
||||||
|
Version 0.11.5 (unreleased)
|
||||||
|
---------------
|
||||||
|
* Fix an issue when using `-o` atime mount options
|
||||||
|
|
||||||
Version 0.11.4
|
Version 0.11.4
|
||||||
---------------
|
---------------
|
||||||
* Fixed build issue with GCC 13 (see https://github.com/cryfs/cryfs/pull/448 )
|
* Fixed build issue with GCC 13 (see https://github.com/cryfs/cryfs/pull/448 )
|
||||||
|
@ -20,6 +20,9 @@ function(target_activate_cpp14 TARGET)
|
|||||||
if(CMAKE_CXX_COMPILER_ID MATCHES "Clang" AND APPLE)
|
if(CMAKE_CXX_COMPILER_ID MATCHES "Clang" AND APPLE)
|
||||||
target_compile_options(${TARGET} PUBLIC -stdlib=libc++)
|
target_compile_options(${TARGET} PUBLIC -stdlib=libc++)
|
||||||
endif(CMAKE_CXX_COMPILER_ID MATCHES "Clang" AND APPLE)
|
endif(CMAKE_CXX_COMPILER_ID MATCHES "Clang" AND APPLE)
|
||||||
|
|
||||||
|
# We need ENABLE_EXPORTS so that boost::stacktrace works correctly
|
||||||
|
set_property(TARGET ${TARGET} PROPERTY ENABLE_EXPORTS 1)
|
||||||
endfunction(target_activate_cpp14)
|
endfunction(target_activate_cpp14)
|
||||||
|
|
||||||
# Find clang-tidy executable (for use in target_enable_style_warnings)
|
# Find clang-tidy executable (for use in target_enable_style_warnings)
|
||||||
|
@ -10,7 +10,6 @@ set(SOURCES
|
|||||||
implementations/onblocks/datanodestore/DataLeafNode.cpp
|
implementations/onblocks/datanodestore/DataLeafNode.cpp
|
||||||
implementations/onblocks/datanodestore/DataInnerNode.cpp
|
implementations/onblocks/datanodestore/DataInnerNode.cpp
|
||||||
implementations/onblocks/datanodestore/DataNodeStore.cpp
|
implementations/onblocks/datanodestore/DataNodeStore.cpp
|
||||||
implementations/onblocks/datatreestore/impl/algorithms.cpp
|
|
||||||
implementations/onblocks/datatreestore/impl/CachedValue.cpp
|
implementations/onblocks/datatreestore/impl/CachedValue.cpp
|
||||||
implementations/onblocks/datatreestore/impl/LeafTraverser.cpp
|
implementations/onblocks/datatreestore/impl/LeafTraverser.cpp
|
||||||
implementations/onblocks/datatreestore/LeafHandle.cpp
|
implementations/onblocks/datatreestore/LeafHandle.cpp
|
||||||
|
@ -21,7 +21,7 @@ class DataTreeRef;
|
|||||||
class BlobOnBlocks final: public Blob {
|
class BlobOnBlocks final: public Blob {
|
||||||
public:
|
public:
|
||||||
BlobOnBlocks(cpputils::unique_ref<parallelaccessdatatreestore::DataTreeRef> datatree);
|
BlobOnBlocks(cpputils::unique_ref<parallelaccessdatatreestore::DataTreeRef> datatree);
|
||||||
~BlobOnBlocks();
|
~BlobOnBlocks() override;
|
||||||
|
|
||||||
const blockstore::BlockId &blockId() const override;
|
const blockstore::BlockId &blockId() const override;
|
||||||
|
|
||||||
|
@ -17,7 +17,7 @@ class ParallelAccessDataTreeStore;
|
|||||||
class BlobStoreOnBlocks final: public BlobStore {
|
class BlobStoreOnBlocks final: public BlobStore {
|
||||||
public:
|
public:
|
||||||
BlobStoreOnBlocks(cpputils::unique_ref<blockstore::BlockStore> blockStore, uint64_t physicalBlocksizeBytes);
|
BlobStoreOnBlocks(cpputils::unique_ref<blockstore::BlockStore> blockStore, uint64_t physicalBlocksizeBytes);
|
||||||
~BlobStoreOnBlocks();
|
~BlobStoreOnBlocks() override;
|
||||||
|
|
||||||
cpputils::unique_ref<Blob> create() override;
|
cpputils::unique_ref<Blob> create() override;
|
||||||
boost::optional<cpputils::unique_ref<Blob>> load(const blockstore::BlockId &blockId) override;
|
boost::optional<cpputils::unique_ref<Blob>> load(const blockstore::BlockId &blockId) override;
|
||||||
|
@ -17,7 +17,7 @@ public:
|
|||||||
using ChildEntry = DataInnerNode_ChildEntry;
|
using ChildEntry = DataInnerNode_ChildEntry;
|
||||||
|
|
||||||
DataInnerNode(DataNodeView block);
|
DataInnerNode(DataNodeView block);
|
||||||
~DataInnerNode();
|
~DataInnerNode() override;
|
||||||
|
|
||||||
uint32_t maxStoreableChildren() const;
|
uint32_t maxStoreableChildren() const;
|
||||||
|
|
||||||
|
@ -26,13 +26,13 @@ DataLeafNode::~DataLeafNode() {
|
|||||||
|
|
||||||
unique_ref<DataLeafNode> DataLeafNode::CreateNewNode(BlockStore *blockStore, const DataNodeLayout &layout, Data data) {
|
unique_ref<DataLeafNode> DataLeafNode::CreateNewNode(BlockStore *blockStore, const DataNodeLayout &layout, Data data) {
|
||||||
ASSERT(data.size() <= layout.maxBytesPerLeaf(), "Data passed in is too large for one leaf.");
|
ASSERT(data.size() <= layout.maxBytesPerLeaf(), "Data passed in is too large for one leaf.");
|
||||||
uint32_t size = data.size();
|
const uint32_t size = data.size();
|
||||||
return make_unique_ref<DataLeafNode>(DataNodeView::create(blockStore, layout, DataNode::FORMAT_VERSION_HEADER, 0, size, std::move(data)));
|
return make_unique_ref<DataLeafNode>(DataNodeView::create(blockStore, layout, DataNode::FORMAT_VERSION_HEADER, 0, size, std::move(data)));
|
||||||
}
|
}
|
||||||
|
|
||||||
unique_ref<DataLeafNode> DataLeafNode::OverwriteNode(BlockStore *blockStore, const DataNodeLayout &layout, const BlockId &blockId, Data data) {
|
unique_ref<DataLeafNode> DataLeafNode::OverwriteNode(BlockStore *blockStore, const DataNodeLayout &layout, const BlockId &blockId, Data data) {
|
||||||
ASSERT(data.size() == layout.maxBytesPerLeaf(), "Data passed in is too large for one leaf.");
|
ASSERT(data.size() == layout.maxBytesPerLeaf(), "Data passed in is too large for one leaf.");
|
||||||
uint32_t size = data.size();
|
const uint32_t size = data.size();
|
||||||
return make_unique_ref<DataLeafNode>(DataNodeView::overwrite(blockStore, layout, DataNode::FORMAT_VERSION_HEADER, 0, size, blockId, std::move(data)));
|
return make_unique_ref<DataLeafNode>(DataNodeView::overwrite(blockStore, layout, DataNode::FORMAT_VERSION_HEADER, 0, size, blockId, std::move(data)));
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -52,7 +52,7 @@ uint32_t DataLeafNode::numBytes() const {
|
|||||||
|
|
||||||
void DataLeafNode::resize(uint32_t new_size) {
|
void DataLeafNode::resize(uint32_t new_size) {
|
||||||
ASSERT(new_size <= maxStoreableBytes(), "Trying to resize to a size larger than the maximal size");
|
ASSERT(new_size <= maxStoreableBytes(), "Trying to resize to a size larger than the maximal size");
|
||||||
uint32_t old_size = node().Size();
|
const uint32_t old_size = node().Size();
|
||||||
if (new_size < old_size) {
|
if (new_size < old_size) {
|
||||||
fillDataWithZeroesFromTo(new_size, old_size);
|
fillDataWithZeroesFromTo(new_size, old_size);
|
||||||
}
|
}
|
||||||
|
@ -15,7 +15,7 @@ public:
|
|||||||
static cpputils::unique_ref<DataLeafNode> OverwriteNode(blockstore::BlockStore *blockStore, const DataNodeLayout &layout, const blockstore::BlockId &blockId, cpputils::Data data);
|
static cpputils::unique_ref<DataLeafNode> OverwriteNode(blockstore::BlockStore *blockStore, const DataNodeLayout &layout, const blockstore::BlockId &blockId, cpputils::Data data);
|
||||||
|
|
||||||
DataLeafNode(DataNodeView block);
|
DataLeafNode(DataNodeView block);
|
||||||
~DataLeafNode();
|
~DataLeafNode() override;
|
||||||
|
|
||||||
//Returning uint64_t, because calculations handling this probably need to be done in 64bit to support >4GB blobs.
|
//Returning uint64_t, because calculations handling this probably need to be done in 64bit to support >4GB blobs.
|
||||||
uint64_t maxStoreableBytes() const;
|
uint64_t maxStoreableBytes() const;
|
||||||
|
@ -23,7 +23,7 @@ namespace onblocks {
|
|||||||
namespace datanodestore {
|
namespace datanodestore {
|
||||||
|
|
||||||
DataNodeStore::DataNodeStore(unique_ref<BlockStore> blockstore, uint64_t physicalBlocksizeBytes)
|
DataNodeStore::DataNodeStore(unique_ref<BlockStore> blockstore, uint64_t physicalBlocksizeBytes)
|
||||||
: _blockstore(std::move(blockstore)), _layout(_blockstore->blockSizeFromPhysicalBlockSize(physicalBlocksizeBytes)) {
|
: _blockstore(std::move(blockstore)), _layout(_blockstore->blockSizeFromPhysicalBlockSize(physicalBlocksizeBytes)), _physicalBlockSizeBytes(physicalBlocksizeBytes) {
|
||||||
}
|
}
|
||||||
|
|
||||||
DataNodeStore::~DataNodeStore() {
|
DataNodeStore::~DataNodeStore() {
|
||||||
@ -80,7 +80,7 @@ unique_ref<DataNode> DataNodeStore::overwriteNodeWith(unique_ref<DataNode> targe
|
|||||||
}
|
}
|
||||||
|
|
||||||
void DataNodeStore::remove(unique_ref<DataNode> node) {
|
void DataNodeStore::remove(unique_ref<DataNode> node) {
|
||||||
BlockId blockId = node->blockId();
|
const BlockId blockId = node->blockId();
|
||||||
cpputils::destruct(std::move(node));
|
cpputils::destruct(std::move(node));
|
||||||
remove(blockId);
|
remove(blockId);
|
||||||
}
|
}
|
||||||
@ -127,11 +127,11 @@ uint64_t DataNodeStore::numNodes() const {
|
|||||||
}
|
}
|
||||||
|
|
||||||
uint64_t DataNodeStore::estimateSpaceForNumNodesLeft() const {
|
uint64_t DataNodeStore::estimateSpaceForNumNodesLeft() const {
|
||||||
return _blockstore->estimateNumFreeBytes() / _layout.blocksizeBytes();
|
return _blockstore->estimateNumFreeBytes() / _physicalBlockSizeBytes;
|
||||||
}
|
}
|
||||||
|
|
||||||
uint64_t DataNodeStore::virtualBlocksizeBytes() const {
|
uint64_t DataNodeStore::virtualBlocksizeBytes() const {
|
||||||
return _layout.blocksizeBytes();
|
return _layout.maxBytesPerLeaf();
|
||||||
}
|
}
|
||||||
|
|
||||||
DataNodeLayout DataNodeStore::layout() const {
|
DataNodeLayout DataNodeStore::layout() const {
|
||||||
|
@ -57,6 +57,7 @@ private:
|
|||||||
|
|
||||||
cpputils::unique_ref<blockstore::BlockStore> _blockstore;
|
cpputils::unique_ref<blockstore::BlockStore> _blockstore;
|
||||||
const DataNodeLayout _layout;
|
const DataNodeLayout _layout;
|
||||||
|
uint64_t _physicalBlockSizeBytes;
|
||||||
|
|
||||||
DISALLOW_COPY_AND_ASSIGN(DataNodeStore);
|
DISALLOW_COPY_AND_ASSIGN(DataNodeStore);
|
||||||
};
|
};
|
||||||
|
@ -67,7 +67,7 @@ public:
|
|||||||
|
|
||||||
static DataNodeView create(blockstore::BlockStore *blockStore, const DataNodeLayout &layout, uint16_t formatVersion, uint8_t depth, uint32_t size, cpputils::Data data) {
|
static DataNodeView create(blockstore::BlockStore *blockStore, const DataNodeLayout &layout, uint16_t formatVersion, uint8_t depth, uint32_t size, cpputils::Data data) {
|
||||||
ASSERT(data.size() <= layout.datasizeBytes(), "Data is too large for node");
|
ASSERT(data.size() <= layout.datasizeBytes(), "Data is too large for node");
|
||||||
cpputils::Data serialized = serialize_(layout, formatVersion, depth, size, std::move(data));
|
const cpputils::Data serialized = serialize_(layout, formatVersion, depth, size, std::move(data));
|
||||||
ASSERT(serialized.size() == layout.blocksizeBytes(), "Wrong block size");
|
ASSERT(serialized.size() == layout.blocksizeBytes(), "Wrong block size");
|
||||||
auto block = blockStore->create(serialized);
|
auto block = blockStore->create(serialized);
|
||||||
return DataNodeView(std::move(block));
|
return DataNodeView(std::move(block));
|
||||||
|
@ -5,8 +5,6 @@
|
|||||||
#include "../datanodestore/DataLeafNode.h"
|
#include "../datanodestore/DataLeafNode.h"
|
||||||
#include "../utils/Math.h"
|
#include "../utils/Math.h"
|
||||||
|
|
||||||
#include "impl/algorithms.h"
|
|
||||||
|
|
||||||
#include <cpp-utils/pointer/cast.h>
|
#include <cpp-utils/pointer/cast.h>
|
||||||
#include <cpp-utils/pointer/optional_ownership_ptr.h>
|
#include <cpp-utils/pointer/optional_ownership_ptr.h>
|
||||||
#include <cmath>
|
#include <cmath>
|
||||||
@ -53,7 +51,7 @@ const BlockId &DataTree::blockId() const {
|
|||||||
void DataTree::flush() const {
|
void DataTree::flush() const {
|
||||||
// By grabbing a lock, we ensure that all modifying functions don't run currently and are therefore flushed.
|
// By grabbing a lock, we ensure that all modifying functions don't run currently and are therefore flushed.
|
||||||
// It's only a shared lock, because this doesn't modify the tree structure.
|
// It's only a shared lock, because this doesn't modify the tree structure.
|
||||||
shared_lock<shared_mutex> lock(_treeStructureMutex);
|
const shared_lock<shared_mutex> lock(_treeStructureMutex);
|
||||||
// We also have to flush the root node
|
// We also have to flush the root node
|
||||||
_rootNode->flush();
|
_rootNode->flush();
|
||||||
}
|
}
|
||||||
@ -61,7 +59,7 @@ void DataTree::flush() const {
|
|||||||
unique_ref<DataNode> DataTree::releaseRootNode() {
|
unique_ref<DataNode> DataTree::releaseRootNode() {
|
||||||
// Lock also ensures that the root node is currently set (traversing unsets it temporarily)
|
// Lock also ensures that the root node is currently set (traversing unsets it temporarily)
|
||||||
// It's a unique lock because this "modifies" tree structure by changing _rootNode.
|
// It's a unique lock because this "modifies" tree structure by changing _rootNode.
|
||||||
unique_lock<shared_mutex> lock(_treeStructureMutex);
|
const unique_lock<shared_mutex> lock(_treeStructureMutex);
|
||||||
return std::move(_rootNode);
|
return std::move(_rootNode);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -76,13 +74,13 @@ uint32_t DataTree::numNodes() const {
|
|||||||
}
|
}
|
||||||
|
|
||||||
uint32_t DataTree::numLeaves() const {
|
uint32_t DataTree::numLeaves() const {
|
||||||
shared_lock<shared_mutex> lock(_treeStructureMutex);
|
const shared_lock<shared_mutex> lock(_treeStructureMutex);
|
||||||
|
|
||||||
return _getOrComputeSizeCache().numLeaves;
|
return _getOrComputeSizeCache().numLeaves;
|
||||||
}
|
}
|
||||||
|
|
||||||
uint64_t DataTree::numBytes() const {
|
uint64_t DataTree::numBytes() const {
|
||||||
shared_lock<shared_mutex> lock(_treeStructureMutex);
|
const shared_lock<shared_mutex> lock(_treeStructureMutex);
|
||||||
return _numBytes();
|
return _numBytes();
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -109,11 +107,11 @@ DataTree::SizeCache DataTree::_computeSizeCache(const DataNode &node) const {
|
|||||||
}
|
}
|
||||||
|
|
||||||
const DataInnerNode &inner = dynamic_cast<const DataInnerNode&>(node);
|
const DataInnerNode &inner = dynamic_cast<const DataInnerNode&>(node);
|
||||||
uint32_t numLeavesInLeftChildren = static_cast<uint32_t>(inner.numChildren()-1) * _leavesPerFullChild(inner);
|
const uint32_t numLeavesInLeftChildren = static_cast<uint32_t>(inner.numChildren()-1) * _leavesPerFullChild(inner);
|
||||||
uint64_t numBytesInLeftChildren = numLeavesInLeftChildren * _nodeStore->layout().maxBytesPerLeaf();
|
const uint64_t numBytesInLeftChildren = numLeavesInLeftChildren * _nodeStore->layout().maxBytesPerLeaf();
|
||||||
auto lastChild = _nodeStore->load(inner.readLastChild().blockId());
|
auto lastChild = _nodeStore->load(inner.readLastChild().blockId());
|
||||||
ASSERT(lastChild != none, "Couldn't load last child");
|
ASSERT(lastChild != none, "Couldn't load last child");
|
||||||
SizeCache sizeInRightChild = _computeSizeCache(**lastChild);
|
const SizeCache sizeInRightChild = _computeSizeCache(**lastChild);
|
||||||
|
|
||||||
return SizeCache {
|
return SizeCache {
|
||||||
numLeavesInLeftChildren + sizeInRightChild.numLeaves,
|
numLeavesInLeftChildren + sizeInRightChild.numLeaves,
|
||||||
@ -138,16 +136,16 @@ void DataTree::_traverseLeavesByByteIndices(uint64_t beginByte, uint64_t sizeByt
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
uint64_t endByte = beginByte + sizeBytes;
|
const uint64_t endByte = beginByte + sizeBytes;
|
||||||
uint64_t _maxBytesPerLeaf = maxBytesPerLeaf();
|
const uint64_t _maxBytesPerLeaf = maxBytesPerLeaf();
|
||||||
uint32_t firstLeaf = beginByte / _maxBytesPerLeaf;
|
const uint32_t firstLeaf = beginByte / _maxBytesPerLeaf;
|
||||||
uint32_t endLeaf = utils::ceilDivision(endByte, _maxBytesPerLeaf);
|
const uint32_t endLeaf = utils::ceilDivision(endByte, _maxBytesPerLeaf);
|
||||||
bool blobIsGrowingFromThisTraversal = false;
|
bool blobIsGrowingFromThisTraversal = false;
|
||||||
auto _onExistingLeaf = [&onExistingLeaf, beginByte, endByte, endLeaf, _maxBytesPerLeaf, &blobIsGrowingFromThisTraversal] (uint32_t leafIndex, bool isRightBorderLeaf, LeafHandle leafHandle) {
|
auto _onExistingLeaf = [&onExistingLeaf, beginByte, endByte, endLeaf, _maxBytesPerLeaf, &blobIsGrowingFromThisTraversal] (uint32_t leafIndex, bool isRightBorderLeaf, LeafHandle leafHandle) {
|
||||||
uint64_t indexOfFirstLeafByte = leafIndex * _maxBytesPerLeaf;
|
const uint64_t indexOfFirstLeafByte = leafIndex * _maxBytesPerLeaf;
|
||||||
ASSERT(endByte > indexOfFirstLeafByte, "Traversal went too far right");
|
ASSERT(endByte > indexOfFirstLeafByte, "Traversal went too far right");
|
||||||
uint32_t dataBegin = utils::maxZeroSubtraction(beginByte, indexOfFirstLeafByte);
|
const uint32_t dataBegin = utils::maxZeroSubtraction(beginByte, indexOfFirstLeafByte);
|
||||||
uint32_t dataEnd = std::min(_maxBytesPerLeaf, endByte - indexOfFirstLeafByte);
|
const uint32_t dataEnd = std::min(_maxBytesPerLeaf, endByte - indexOfFirstLeafByte);
|
||||||
// If we are traversing exactly until the last leaf, then the last leaf wasn't resized by the traversal and might have a wrong size. We have to fix it.
|
// If we are traversing exactly until the last leaf, then the last leaf wasn't resized by the traversal and might have a wrong size. We have to fix it.
|
||||||
if (isRightBorderLeaf) {
|
if (isRightBorderLeaf) {
|
||||||
ASSERT(leafIndex == endLeaf-1, "If we traversed further right, this wouldn't be the right border leaf.");
|
ASSERT(leafIndex == endLeaf-1, "If we traversed further right, this wouldn't be the right border leaf.");
|
||||||
@ -162,10 +160,10 @@ void DataTree::_traverseLeavesByByteIndices(uint64_t beginByte, uint64_t sizeByt
|
|||||||
auto _onCreateLeaf = [&onCreateLeaf, _maxBytesPerLeaf, beginByte, firstLeaf, endByte, endLeaf, &blobIsGrowingFromThisTraversal, readOnlyTraversal] (uint32_t leafIndex) -> Data {
|
auto _onCreateLeaf = [&onCreateLeaf, _maxBytesPerLeaf, beginByte, firstLeaf, endByte, endLeaf, &blobIsGrowingFromThisTraversal, readOnlyTraversal] (uint32_t leafIndex) -> Data {
|
||||||
ASSERT(!readOnlyTraversal, "Cannot create leaves in a read-only traversal");
|
ASSERT(!readOnlyTraversal, "Cannot create leaves in a read-only traversal");
|
||||||
blobIsGrowingFromThisTraversal = true;
|
blobIsGrowingFromThisTraversal = true;
|
||||||
uint64_t indexOfFirstLeafByte = leafIndex * _maxBytesPerLeaf;
|
const uint64_t indexOfFirstLeafByte = leafIndex * _maxBytesPerLeaf;
|
||||||
ASSERT(endByte > indexOfFirstLeafByte, "Traversal went too far right");
|
ASSERT(endByte > indexOfFirstLeafByte, "Traversal went too far right");
|
||||||
uint32_t dataBegin = utils::maxZeroSubtraction(beginByte, indexOfFirstLeafByte);
|
const uint32_t dataBegin = utils::maxZeroSubtraction(beginByte, indexOfFirstLeafByte);
|
||||||
uint32_t dataEnd = std::min(_maxBytesPerLeaf, endByte - indexOfFirstLeafByte);
|
const uint32_t dataEnd = std::min(_maxBytesPerLeaf, endByte - indexOfFirstLeafByte);
|
||||||
ASSERT(leafIndex == firstLeaf || dataBegin == 0, "Only the leftmost leaf can have a gap on the left.");
|
ASSERT(leafIndex == firstLeaf || dataBegin == 0, "Only the leftmost leaf can have a gap on the left.");
|
||||||
ASSERT(leafIndex == endLeaf-1 || dataEnd == _maxBytesPerLeaf, "Only the rightmost leaf can have a gap on the right");
|
ASSERT(leafIndex == endLeaf-1 || dataEnd == _maxBytesPerLeaf, "Only the rightmost leaf can have a gap on the right");
|
||||||
Data data = onCreateLeaf(indexOfFirstLeafByte + dataBegin, dataEnd-dataBegin);
|
Data data = onCreateLeaf(indexOfFirstLeafByte + dataBegin, dataEnd-dataBegin);
|
||||||
@ -197,11 +195,11 @@ uint32_t DataTree::_leavesPerFullChild(const DataInnerNode &root) const {
|
|||||||
}
|
}
|
||||||
|
|
||||||
void DataTree::resizeNumBytes(uint64_t newNumBytes) {
|
void DataTree::resizeNumBytes(uint64_t newNumBytes) {
|
||||||
std::unique_lock<shared_mutex> lock(_treeStructureMutex);
|
const std::unique_lock<shared_mutex> lock(_treeStructureMutex);
|
||||||
|
|
||||||
uint32_t newNumLeaves = std::max(UINT64_C(1), utils::ceilDivision(newNumBytes, _nodeStore->layout().maxBytesPerLeaf()));
|
const uint32_t newNumLeaves = std::max(UINT64_C(1), utils::ceilDivision(newNumBytes, _nodeStore->layout().maxBytesPerLeaf()));
|
||||||
uint32_t newLastLeafSize = newNumBytes - (newNumLeaves-1) * _nodeStore->layout().maxBytesPerLeaf();
|
const uint32_t newLastLeafSize = newNumBytes - (newNumLeaves-1) * _nodeStore->layout().maxBytesPerLeaf();
|
||||||
uint32_t maxChildrenPerInnerNode = _nodeStore->layout().maxChildrenPerInnerNode();
|
const uint32_t maxChildrenPerInnerNode = _nodeStore->layout().maxChildrenPerInnerNode();
|
||||||
auto onExistingLeaf = [newLastLeafSize] (uint32_t /*index*/, bool /*isRightBorderLeaf*/, LeafHandle leafHandle) {
|
auto onExistingLeaf = [newLastLeafSize] (uint32_t /*index*/, bool /*isRightBorderLeaf*/, LeafHandle leafHandle) {
|
||||||
auto leaf = leafHandle.node();
|
auto leaf = leafHandle.node();
|
||||||
// This is only called, if the new last leaf was already existing
|
// This is only called, if the new last leaf was already existing
|
||||||
@ -216,10 +214,10 @@ void DataTree::resizeNumBytes(uint64_t newNumBytes) {
|
|||||||
auto onBacktrackFromSubtree = [this, newNumLeaves, maxChildrenPerInnerNode] (DataInnerNode* node) {
|
auto onBacktrackFromSubtree = [this, newNumLeaves, maxChildrenPerInnerNode] (DataInnerNode* node) {
|
||||||
// This is only called for the right border nodes of the new tree.
|
// This is only called for the right border nodes of the new tree.
|
||||||
// When growing size, the following is a no-op. When shrinking, we're deleting the children that aren't needed anymore.
|
// When growing size, the following is a no-op. When shrinking, we're deleting the children that aren't needed anymore.
|
||||||
uint32_t maxLeavesPerChild = utils::intPow(static_cast<uint64_t>(maxChildrenPerInnerNode), (static_cast<uint64_t>(node->depth())-1));
|
const uint32_t maxLeavesPerChild = utils::intPow(static_cast<uint64_t>(maxChildrenPerInnerNode), (static_cast<uint64_t>(node->depth())-1));
|
||||||
uint32_t neededNodesOnChildLevel = utils::ceilDivision(newNumLeaves, maxLeavesPerChild);
|
const uint32_t neededNodesOnChildLevel = utils::ceilDivision(newNumLeaves, maxLeavesPerChild);
|
||||||
uint32_t neededSiblings = utils::ceilDivision(neededNodesOnChildLevel, maxChildrenPerInnerNode);
|
const uint32_t neededSiblings = utils::ceilDivision(neededNodesOnChildLevel, maxChildrenPerInnerNode);
|
||||||
uint32_t neededChildrenForRightBorderNode = neededNodesOnChildLevel - (neededSiblings-1) * maxChildrenPerInnerNode;
|
const uint32_t neededChildrenForRightBorderNode = neededNodesOnChildLevel - (neededSiblings-1) * maxChildrenPerInnerNode;
|
||||||
ASSERT(neededChildrenForRightBorderNode <= node->numChildren(), "Node has too few children");
|
ASSERT(neededChildrenForRightBorderNode <= node->numChildren(), "Node has too few children");
|
||||||
// All children to the right of the new right-border-node are removed including their subtree.
|
// All children to the right of the new right-border-node are removed including their subtree.
|
||||||
while(node->numChildren() > neededChildrenForRightBorderNode) {
|
while(node->numChildren() > neededChildrenForRightBorderNode) {
|
||||||
@ -240,12 +238,12 @@ uint64_t DataTree::maxBytesPerLeaf() const {
|
|||||||
}
|
}
|
||||||
|
|
||||||
uint8_t DataTree::depth() const {
|
uint8_t DataTree::depth() const {
|
||||||
shared_lock<shared_mutex> lock(_treeStructureMutex);
|
const shared_lock<shared_mutex> lock(_treeStructureMutex);
|
||||||
return _rootNode->depth();
|
return _rootNode->depth();
|
||||||
}
|
}
|
||||||
|
|
||||||
void DataTree::readBytes(void *target, uint64_t offset, uint64_t count) const {
|
void DataTree::readBytes(void *target, uint64_t offset, uint64_t count) const {
|
||||||
shared_lock<shared_mutex> lock(_treeStructureMutex);
|
const shared_lock<shared_mutex> lock(_treeStructureMutex);
|
||||||
|
|
||||||
const uint64_t _size = _numBytes();
|
const uint64_t _size = _numBytes();
|
||||||
if(offset > _size || offset + count > _size) {
|
if(offset > _size || offset + count > _size) {
|
||||||
@ -258,10 +256,10 @@ void DataTree::readBytes(void *target, uint64_t offset, uint64_t count) const {
|
|||||||
}
|
}
|
||||||
|
|
||||||
Data DataTree::readAllBytes() const {
|
Data DataTree::readAllBytes() const {
|
||||||
shared_lock<shared_mutex> lock(_treeStructureMutex);
|
const shared_lock<shared_mutex> lock(_treeStructureMutex);
|
||||||
|
|
||||||
//TODO Querying numBytes can be inefficient. Is this possible without a call to size()?
|
//TODO Querying numBytes can be inefficient. Is this possible without a call to size()?
|
||||||
uint64_t count = _numBytes();
|
const uint64_t count = _numBytes();
|
||||||
Data result(count);
|
Data result(count);
|
||||||
_doReadBytes(result.data(), 0, count);
|
_doReadBytes(result.data(), 0, count);
|
||||||
|
|
||||||
@ -269,7 +267,7 @@ Data DataTree::readAllBytes() const {
|
|||||||
}
|
}
|
||||||
|
|
||||||
uint64_t DataTree::tryReadBytes(void *target, uint64_t offset, uint64_t count) const {
|
uint64_t DataTree::tryReadBytes(void *target, uint64_t offset, uint64_t count) const {
|
||||||
shared_lock<shared_mutex> lock(_treeStructureMutex);
|
const shared_lock<shared_mutex> lock(_treeStructureMutex);
|
||||||
auto result = _tryReadBytes(target, offset, count);
|
auto result = _tryReadBytes(target, offset, count);
|
||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
@ -296,7 +294,7 @@ void DataTree::_doReadBytes(void *target, uint64_t offset, uint64_t count) const
|
|||||||
}
|
}
|
||||||
|
|
||||||
void DataTree::writeBytes(const void *source, uint64_t offset, uint64_t count) {
|
void DataTree::writeBytes(const void *source, uint64_t offset, uint64_t count) {
|
||||||
unique_lock<shared_mutex> lock(_treeStructureMutex);
|
const unique_lock<shared_mutex> lock(_treeStructureMutex);
|
||||||
|
|
||||||
auto onExistingLeaf = [source, offset, count] (uint64_t indexOfFirstLeafByte, LeafHandle leaf, uint32_t leafDataOffset, uint32_t leafDataSize) {
|
auto onExistingLeaf = [source, offset, count] (uint64_t indexOfFirstLeafByte, LeafHandle leaf, uint32_t leafDataOffset, uint32_t leafDataSize) {
|
||||||
ASSERT(indexOfFirstLeafByte+leafDataOffset>=offset && indexOfFirstLeafByte-offset+leafDataOffset <= count && indexOfFirstLeafByte-offset+leafDataOffset+leafDataSize <= count, "Reading from source out of bounds");
|
ASSERT(indexOfFirstLeafByte+leafDataOffset>=offset && indexOfFirstLeafByte-offset+leafDataOffset <= count && indexOfFirstLeafByte-offset+leafDataOffset+leafDataSize <= count, "Reading from source out of bounds");
|
||||||
|
@ -18,14 +18,14 @@ public:
|
|||||||
T getOrCompute(std::function<T ()> compute) {
|
T getOrCompute(std::function<T ()> compute) {
|
||||||
boost::upgrade_lock<boost::shared_mutex> readLock(_mutex);
|
boost::upgrade_lock<boost::shared_mutex> readLock(_mutex);
|
||||||
if (_cache == boost::none) {
|
if (_cache == boost::none) {
|
||||||
boost::upgrade_to_unique_lock<boost::shared_mutex> writeLock(readLock);
|
const boost::upgrade_to_unique_lock<boost::shared_mutex> writeLock(readLock);
|
||||||
_cache = compute();
|
_cache = compute();
|
||||||
}
|
}
|
||||||
return *_cache;
|
return *_cache;
|
||||||
}
|
}
|
||||||
|
|
||||||
void update(std::function<void (boost::optional<T>*)> func) {
|
void update(std::function<void (boost::optional<T>*)> func) {
|
||||||
boost::unique_lock<boost::shared_mutex> writeLock(_mutex);
|
const boost::unique_lock<boost::shared_mutex> writeLock(_mutex);
|
||||||
func(&_cache);
|
func(&_cache);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -37,8 +37,8 @@ namespace blobstore {
|
|||||||
// beginIndex<numLeaves<endIndex, beginIndex=numLeaves<endIndex,
|
// beginIndex<numLeaves<endIndex, beginIndex=numLeaves<endIndex,
|
||||||
// numLeaves<beginIndex<endIndex, numLeaves<beginIndex=endIndex
|
// numLeaves<beginIndex<endIndex, numLeaves<beginIndex=endIndex
|
||||||
|
|
||||||
uint32_t maxLeavesForDepth = _maxLeavesForTreeDepth((*root)->depth());
|
const uint32_t maxLeavesForDepth = _maxLeavesForTreeDepth((*root)->depth());
|
||||||
bool increaseTreeDepth = endIndex > maxLeavesForDepth;
|
const bool increaseTreeDepth = endIndex > maxLeavesForDepth;
|
||||||
ASSERT(!_readOnlyTraversal || !increaseTreeDepth, "Tried to grow a tree on a read only traversal");
|
ASSERT(!_readOnlyTraversal || !increaseTreeDepth, "Tried to grow a tree on a read only traversal");
|
||||||
|
|
||||||
if ((*root)->depth() == 0) {
|
if ((*root)->depth() == 0) {
|
||||||
@ -49,7 +49,7 @@ namespace blobstore {
|
|||||||
leaf->resize(_nodeStore->layout().maxBytesPerLeaf());
|
leaf->resize(_nodeStore->layout().maxBytesPerLeaf());
|
||||||
}
|
}
|
||||||
if (beginIndex == 0 && endIndex >= 1) {
|
if (beginIndex == 0 && endIndex >= 1) {
|
||||||
bool isRightBorderLeaf = (endIndex == 1);
|
const bool isRightBorderLeaf = (endIndex == 1);
|
||||||
onExistingLeaf(0, isRightBorderLeaf, LeafHandle(_nodeStore, leaf));
|
onExistingLeaf(0, isRightBorderLeaf, LeafHandle(_nodeStore, leaf));
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
@ -119,21 +119,21 @@ namespace blobstore {
|
|||||||
|
|
||||||
//TODO Call callbacks for different leaves in parallel.
|
//TODO Call callbacks for different leaves in parallel.
|
||||||
|
|
||||||
uint32_t leavesPerChild = _maxLeavesForTreeDepth(root->depth()-1);
|
const uint32_t leavesPerChild = _maxLeavesForTreeDepth(root->depth()-1);
|
||||||
uint32_t beginChild = beginIndex/leavesPerChild;
|
const uint32_t beginChild = beginIndex/leavesPerChild;
|
||||||
uint32_t endChild = utils::ceilDivision(endIndex, leavesPerChild);
|
const uint32_t endChild = utils::ceilDivision(endIndex, leavesPerChild);
|
||||||
ASSERT(endChild <= _nodeStore->layout().maxChildrenPerInnerNode(), "Traversal region would need increasing the tree depth. This should have happened before calling this function.");
|
ASSERT(endChild <= _nodeStore->layout().maxChildrenPerInnerNode(), "Traversal region would need increasing the tree depth. This should have happened before calling this function.");
|
||||||
uint32_t numChildren = root->numChildren();
|
const uint32_t numChildren = root->numChildren();
|
||||||
ASSERT(!growLastLeaf || endChild >= numChildren, "Can only grow last leaf if it exists");
|
ASSERT(!growLastLeaf || endChild >= numChildren, "Can only grow last leaf if it exists");
|
||||||
ASSERT(!_readOnlyTraversal || endChild <= numChildren, "Can only traverse out of bounds in a read-only traversal");
|
ASSERT(!_readOnlyTraversal || endChild <= numChildren, "Can only traverse out of bounds in a read-only traversal");
|
||||||
bool shouldGrowLastExistingLeaf = growLastLeaf || endChild > numChildren;
|
const bool shouldGrowLastExistingLeaf = growLastLeaf || endChild > numChildren;
|
||||||
|
|
||||||
// If we traverse outside of the valid region (i.e. usually would only traverse to new leaves and not to the last leaf),
|
// If we traverse outside of the valid region (i.e. usually would only traverse to new leaves and not to the last leaf),
|
||||||
// we still have to descend to the last old child to fill it with leaves and grow the last old leaf.
|
// we still have to descend to the last old child to fill it with leaves and grow the last old leaf.
|
||||||
if (isLeftBorderOfTraversal && beginChild >= numChildren) {
|
if (isLeftBorderOfTraversal && beginChild >= numChildren) {
|
||||||
ASSERT(numChildren > 0, "Node doesn't have children.");
|
ASSERT(numChildren > 0, "Node doesn't have children.");
|
||||||
auto childBlockId = root->readLastChild().blockId();
|
auto childBlockId = root->readLastChild().blockId();
|
||||||
uint32_t childOffset = (numChildren-1) * leavesPerChild;
|
const uint32_t childOffset = (numChildren-1) * leavesPerChild;
|
||||||
_traverseExistingSubtree(childBlockId, root->depth()-1, leavesPerChild, leavesPerChild, childOffset, true, false, true,
|
_traverseExistingSubtree(childBlockId, root->depth()-1, leavesPerChild, leavesPerChild, childOffset, true, false, true,
|
||||||
[] (uint32_t /*index*/, bool /*isRightBorderNode*/, LeafHandle /*leaf*/) {ASSERT(false, "We don't actually traverse any leaves.");},
|
[] (uint32_t /*index*/, bool /*isRightBorderNode*/, LeafHandle /*leaf*/) {ASSERT(false, "We don't actually traverse any leaves.");},
|
||||||
[] (uint32_t /*index*/) -> Data {ASSERT(false, "We don't actually traverse any leaves.");},
|
[] (uint32_t /*index*/) -> Data {ASSERT(false, "We don't actually traverse any leaves.");},
|
||||||
@ -143,12 +143,12 @@ namespace blobstore {
|
|||||||
// Traverse existing children
|
// Traverse existing children
|
||||||
for (uint32_t childIndex = beginChild; childIndex < std::min(endChild, numChildren); ++childIndex) {
|
for (uint32_t childIndex = beginChild; childIndex < std::min(endChild, numChildren); ++childIndex) {
|
||||||
auto childBlockId = root->readChild(childIndex).blockId();
|
auto childBlockId = root->readChild(childIndex).blockId();
|
||||||
uint32_t childOffset = childIndex * leavesPerChild;
|
const uint32_t childOffset = childIndex * leavesPerChild;
|
||||||
uint32_t localBeginIndex = utils::maxZeroSubtraction(beginIndex, childOffset);
|
const uint32_t localBeginIndex = utils::maxZeroSubtraction(beginIndex, childOffset);
|
||||||
uint32_t localEndIndex = std::min(leavesPerChild, endIndex - childOffset);
|
const uint32_t localEndIndex = std::min(leavesPerChild, endIndex - childOffset);
|
||||||
bool isFirstChild = (childIndex == beginChild);
|
const bool isFirstChild = (childIndex == beginChild);
|
||||||
bool isLastExistingChild = (childIndex == numChildren - 1);
|
const bool isLastExistingChild = (childIndex == numChildren - 1);
|
||||||
bool isLastChild = isLastExistingChild && (numChildren == endChild);
|
const bool isLastChild = isLastExistingChild && (numChildren == endChild);
|
||||||
ASSERT(localEndIndex <= leavesPerChild, "We don't want the child to add a tree level because it doesn't have enough space for the traversal.");
|
ASSERT(localEndIndex <= leavesPerChild, "We don't want the child to add a tree level because it doesn't have enough space for the traversal.");
|
||||||
_traverseExistingSubtree(childBlockId, root->depth()-1, localBeginIndex, localEndIndex, leafOffset + childOffset, isLeftBorderOfTraversal && isFirstChild,
|
_traverseExistingSubtree(childBlockId, root->depth()-1, localBeginIndex, localEndIndex, leafOffset + childOffset, isLeftBorderOfTraversal && isFirstChild,
|
||||||
isRightBorderNode && isLastChild, shouldGrowLastExistingLeaf && isLastExistingChild, onExistingLeaf, onCreateLeaf, onBacktrackFromSubtree);
|
isRightBorderNode && isLastChild, shouldGrowLastExistingLeaf && isLastExistingChild, onExistingLeaf, onCreateLeaf, onBacktrackFromSubtree);
|
||||||
@ -158,9 +158,9 @@ namespace blobstore {
|
|||||||
for (uint32_t childIndex = numChildren; childIndex < endChild; ++childIndex) {
|
for (uint32_t childIndex = numChildren; childIndex < endChild; ++childIndex) {
|
||||||
ASSERT(!_readOnlyTraversal, "Can't create new children in a read-only traversal");
|
ASSERT(!_readOnlyTraversal, "Can't create new children in a read-only traversal");
|
||||||
|
|
||||||
uint32_t childOffset = childIndex * leavesPerChild;
|
const uint32_t childOffset = childIndex * leavesPerChild;
|
||||||
uint32_t localBeginIndex = std::min(leavesPerChild, utils::maxZeroSubtraction(beginIndex, childOffset));
|
const uint32_t localBeginIndex = std::min(leavesPerChild, utils::maxZeroSubtraction(beginIndex, childOffset));
|
||||||
uint32_t localEndIndex = std::min(leavesPerChild, endIndex - childOffset);
|
const uint32_t localEndIndex = std::min(leavesPerChild, endIndex - childOffset);
|
||||||
auto leafCreator = (childIndex >= beginChild) ? onCreateLeaf : _createMaxSizeLeaf();
|
auto leafCreator = (childIndex >= beginChild) ? onCreateLeaf : _createMaxSizeLeaf();
|
||||||
auto child = _createNewSubtree(localBeginIndex, localEndIndex, leafOffset + childOffset, root->depth() - 1, leafCreator, onBacktrackFromSubtree);
|
auto child = _createNewSubtree(localBeginIndex, localEndIndex, leafOffset + childOffset, root->depth() - 1, leafCreator, onBacktrackFromSubtree);
|
||||||
root->addChild(*child);
|
root->addChild(*child);
|
||||||
@ -184,18 +184,18 @@ namespace blobstore {
|
|||||||
return _nodeStore->createNewLeafNode(leafCreator(leafOffset));
|
return _nodeStore->createNewLeafNode(leafCreator(leafOffset));
|
||||||
}
|
}
|
||||||
|
|
||||||
uint8_t minNeededDepth = utils::ceilLog(_nodeStore->layout().maxChildrenPerInnerNode(), static_cast<uint64_t>(endIndex));
|
const uint8_t minNeededDepth = utils::ceilLog(_nodeStore->layout().maxChildrenPerInnerNode(), static_cast<uint64_t>(endIndex));
|
||||||
ASSERT(depth >= minNeededDepth, "Given tree depth doesn't fit given number of leaves to create.");
|
ASSERT(depth >= minNeededDepth, "Given tree depth doesn't fit given number of leaves to create.");
|
||||||
uint32_t leavesPerChild = _maxLeavesForTreeDepth(depth-1);
|
const uint32_t leavesPerChild = _maxLeavesForTreeDepth(depth-1);
|
||||||
uint32_t beginChild = beginIndex/leavesPerChild;
|
const uint32_t beginChild = beginIndex/leavesPerChild;
|
||||||
uint32_t endChild = utils::ceilDivision(endIndex, leavesPerChild);
|
const uint32_t endChild = utils::ceilDivision(endIndex, leavesPerChild);
|
||||||
|
|
||||||
vector<blockstore::BlockId> children;
|
vector<blockstore::BlockId> children;
|
||||||
children.reserve(endChild);
|
children.reserve(endChild);
|
||||||
// TODO Remove redundancy of following two for loops by using min/max for calculating the parameters of the recursive call.
|
// TODO Remove redundancy of following two for loops by using min/max for calculating the parameters of the recursive call.
|
||||||
// Create gap children (i.e. children before the traversal but after the current size)
|
// Create gap children (i.e. children before the traversal but after the current size)
|
||||||
for (uint32_t childIndex = 0; childIndex < beginChild; ++childIndex) {
|
for (uint32_t childIndex = 0; childIndex < beginChild; ++childIndex) {
|
||||||
uint32_t childOffset = childIndex * leavesPerChild;
|
const uint32_t childOffset = childIndex * leavesPerChild;
|
||||||
auto child = _createNewSubtree(leavesPerChild, leavesPerChild, leafOffset + childOffset, depth - 1,
|
auto child = _createNewSubtree(leavesPerChild, leavesPerChild, leafOffset + childOffset, depth - 1,
|
||||||
[] (uint32_t /*index*/)->Data {ASSERT(false, "We're only creating gap leaves here, not traversing any.");},
|
[] (uint32_t /*index*/)->Data {ASSERT(false, "We're only creating gap leaves here, not traversing any.");},
|
||||||
[] (DataInnerNode* /*node*/) {});
|
[] (DataInnerNode* /*node*/) {});
|
||||||
@ -204,9 +204,9 @@ namespace blobstore {
|
|||||||
}
|
}
|
||||||
// Create new children that are traversed
|
// Create new children that are traversed
|
||||||
for(uint32_t childIndex = beginChild; childIndex < endChild; ++childIndex) {
|
for(uint32_t childIndex = beginChild; childIndex < endChild; ++childIndex) {
|
||||||
uint32_t childOffset = childIndex * leavesPerChild;
|
const uint32_t childOffset = childIndex * leavesPerChild;
|
||||||
uint32_t localBeginIndex = utils::maxZeroSubtraction(beginIndex, childOffset);
|
const uint32_t localBeginIndex = utils::maxZeroSubtraction(beginIndex, childOffset);
|
||||||
uint32_t localEndIndex = std::min(leavesPerChild, endIndex - childOffset);
|
const uint32_t localEndIndex = std::min(leavesPerChild, endIndex - childOffset);
|
||||||
auto child = _createNewSubtree(localBeginIndex, localEndIndex, leafOffset + childOffset, depth - 1, onCreateLeaf, onBacktrackFromSubtree);
|
auto child = _createNewSubtree(localBeginIndex, localEndIndex, leafOffset + childOffset, depth - 1, onCreateLeaf, onBacktrackFromSubtree);
|
||||||
ASSERT(child->depth() == depth-1, "Created child node has wrong depth");
|
ASSERT(child->depth() == depth-1, "Created child node has wrong depth");
|
||||||
children.push_back(child->blockId());
|
children.push_back(child->blockId());
|
||||||
@ -229,7 +229,7 @@ namespace blobstore {
|
|||||||
function<Data (uint32_t index)> LeafTraverser::_createMaxSizeLeaf() const {
|
function<Data (uint32_t index)> LeafTraverser::_createMaxSizeLeaf() const {
|
||||||
ASSERT(!_readOnlyTraversal, "Can't create a new leaf in a read-only traversal");
|
ASSERT(!_readOnlyTraversal, "Can't create a new leaf in a read-only traversal");
|
||||||
|
|
||||||
uint64_t maxBytesPerLeaf = _nodeStore->layout().maxBytesPerLeaf();
|
const uint64_t maxBytesPerLeaf = _nodeStore->layout().maxBytesPerLeaf();
|
||||||
return [maxBytesPerLeaf] (uint32_t /*index*/) -> Data {
|
return [maxBytesPerLeaf] (uint32_t /*index*/) -> Data {
|
||||||
return Data(maxBytesPerLeaf).FillWithZeroes();
|
return Data(maxBytesPerLeaf).FillWithZeroes();
|
||||||
};
|
};
|
||||||
|
@ -1,69 +0,0 @@
|
|||||||
#include "algorithms.h"
|
|
||||||
#include <cpp-utils/pointer/cast.h>
|
|
||||||
#include <blockstore/utils/BlockId.h>
|
|
||||||
|
|
||||||
#include "../../datanodestore/DataInnerNode.h"
|
|
||||||
#include "../../datanodestore/DataNodeStore.h"
|
|
||||||
#include <cpp-utils/assert/assert.h>
|
|
||||||
|
|
||||||
using std::function;
|
|
||||||
using cpputils::optional_ownership_ptr;
|
|
||||||
using cpputils::dynamic_pointer_move;
|
|
||||||
using cpputils::unique_ref;
|
|
||||||
using blobstore::onblocks::datanodestore::DataInnerNode;
|
|
||||||
using blobstore::onblocks::datanodestore::DataNode;
|
|
||||||
using blobstore::onblocks::datanodestore::DataNodeStore;
|
|
||||||
using blockstore::BlockId;
|
|
||||||
using boost::optional;
|
|
||||||
using boost::none;
|
|
||||||
|
|
||||||
namespace blobstore {
|
|
||||||
namespace onblocks {
|
|
||||||
namespace datatreestore {
|
|
||||||
namespace algorithms {
|
|
||||||
|
|
||||||
optional<unique_ref<DataInnerNode>> getLastChildAsInnerNode(DataNodeStore *nodeStore, const DataInnerNode &node) {
|
|
||||||
BlockId blockId = node.readLastChild().blockId();
|
|
||||||
auto lastChild = nodeStore->load(blockId);
|
|
||||||
ASSERT(lastChild != none, "Couldn't load last child");
|
|
||||||
return dynamic_pointer_move<DataInnerNode>(*lastChild);
|
|
||||||
}
|
|
||||||
|
|
||||||
//Returns the lowest right border node meeting the condition specified (exclusive the leaf).
|
|
||||||
//Returns nullptr, if no inner right border node meets the condition.
|
|
||||||
optional_ownership_ptr<DataInnerNode> GetLowestInnerRightBorderNodeWithConditionOrNull(DataNodeStore *nodeStore, datanodestore::DataNode *rootNode, function<bool(const DataInnerNode &)> condition) {
|
|
||||||
optional_ownership_ptr<DataInnerNode> currentNode = cpputils::WithoutOwnership(dynamic_cast<DataInnerNode*>(rootNode));
|
|
||||||
optional_ownership_ptr<DataInnerNode> result = cpputils::null<DataInnerNode>();
|
|
||||||
for (unsigned int i=0; i < rootNode->depth(); ++i) {
|
|
||||||
//TODO This unnecessarily loads the leaf node in the last loop run
|
|
||||||
auto lastChild = getLastChildAsInnerNode(nodeStore, *currentNode);
|
|
||||||
if (condition(*currentNode)) {
|
|
||||||
result = std::move(currentNode);
|
|
||||||
}
|
|
||||||
if (lastChild == none) {
|
|
||||||
// lastChild is a leaf
|
|
||||||
ASSERT(static_cast<int>(i) == rootNode->depth()-1, "Couldn't get last child as inner node but we're not deep enough yet for the last child to be a leaf");
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
currentNode = cpputils::WithOwnership(std::move(*lastChild));
|
|
||||||
}
|
|
||||||
|
|
||||||
return result;
|
|
||||||
}
|
|
||||||
|
|
||||||
optional_ownership_ptr<DataInnerNode> GetLowestRightBorderNodeWithMoreThanOneChildOrNull(DataNodeStore *nodeStore, DataNode *rootNode) {
|
|
||||||
return GetLowestInnerRightBorderNodeWithConditionOrNull(nodeStore, rootNode, [] (const datanodestore::DataInnerNode &node) {
|
|
||||||
return node.numChildren() > 1;
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
optional_ownership_ptr<datanodestore::DataInnerNode> GetLowestInnerRightBorderNodeWithLessThanKChildrenOrNull(datanodestore::DataNodeStore *nodeStore, datanodestore::DataNode *rootNode) {
|
|
||||||
return GetLowestInnerRightBorderNodeWithConditionOrNull(nodeStore, rootNode, [] (const datanodestore::DataInnerNode &node) {
|
|
||||||
return node.numChildren() < node.maxStoreableChildren();
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
@ -1,30 +0,0 @@
|
|||||||
#pragma once
|
|
||||||
#ifndef MESSMER_BLOBSTORE_IMPLEMENTATIONS_ONBLOCKS_DATATREESTORE_IMPL_ALGORITHMS_H_
|
|
||||||
#define MESSMER_BLOBSTORE_IMPLEMENTATIONS_ONBLOCKS_DATATREESTORE_IMPL_ALGORITHMS_H_
|
|
||||||
|
|
||||||
#include <cpp-utils/pointer/optional_ownership_ptr.h>
|
|
||||||
|
|
||||||
namespace blobstore {
|
|
||||||
namespace onblocks {
|
|
||||||
namespace datanodestore{
|
|
||||||
class DataNode;
|
|
||||||
class DataInnerNode;
|
|
||||||
class DataNodeStore;
|
|
||||||
}
|
|
||||||
namespace datatreestore {
|
|
||||||
namespace algorithms {
|
|
||||||
|
|
||||||
//Returns the lowest right border node with at least two children.
|
|
||||||
//Returns nullptr, if all right border nodes have only one child (since the root is a right border node, this means that the whole tree has exactly one leaf)
|
|
||||||
cpputils::optional_ownership_ptr<datanodestore::DataInnerNode> GetLowestRightBorderNodeWithMoreThanOneChildOrNull(datanodestore::DataNodeStore *nodeStore, datanodestore::DataNode *rootNode);
|
|
||||||
|
|
||||||
//Returns the lowest right border node with less than k children (not considering leaves).
|
|
||||||
//Returns nullptr, if all right border nodes have k children (the tree is full)
|
|
||||||
cpputils::optional_ownership_ptr<datanodestore::DataInnerNode> GetLowestInnerRightBorderNodeWithLessThanKChildrenOrNull(datanodestore::DataNodeStore *nodeStore, datanodestore::DataNode *rootNode);
|
|
||||||
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#endif
|
|
@ -31,12 +31,12 @@ optional<unique_ref<DataTreeRef>> ParallelAccessDataTreeStore::load(const blocks
|
|||||||
|
|
||||||
unique_ref<DataTreeRef> ParallelAccessDataTreeStore::createNewTree() {
|
unique_ref<DataTreeRef> ParallelAccessDataTreeStore::createNewTree() {
|
||||||
auto dataTree = _dataTreeStore->createNewTree();
|
auto dataTree = _dataTreeStore->createNewTree();
|
||||||
BlockId blockId = dataTree->blockId();
|
const BlockId blockId = dataTree->blockId();
|
||||||
return _parallelAccessStore.add(blockId, std::move(dataTree)); // NOLINT (workaround https://gcc.gnu.org/bugzilla/show_bug.cgi?id=82481 )
|
return _parallelAccessStore.add(blockId, std::move(dataTree)); // NOLINT (workaround https://gcc.gnu.org/bugzilla/show_bug.cgi?id=82481 )
|
||||||
}
|
}
|
||||||
|
|
||||||
void ParallelAccessDataTreeStore::remove(unique_ref<DataTreeRef> tree) {
|
void ParallelAccessDataTreeStore::remove(unique_ref<DataTreeRef> tree) {
|
||||||
BlockId blockId = tree->blockId();
|
const BlockId blockId = tree->blockId();
|
||||||
return _parallelAccessStore.remove(blockId, std::move(tree));
|
return _parallelAccessStore.remove(blockId, std::move(tree));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -9,7 +9,6 @@ using cpputils::Data;
|
|||||||
using cpputils::unique_ref;
|
using cpputils::unique_ref;
|
||||||
using cpputils::make_unique_ref;
|
using cpputils::make_unique_ref;
|
||||||
using boost::optional;
|
using boost::optional;
|
||||||
using boost::none;
|
|
||||||
using std::unique_lock;
|
using std::unique_lock;
|
||||||
using std::mutex;
|
using std::mutex;
|
||||||
|
|
||||||
@ -27,7 +26,7 @@ CachingBlockStore2::CachedBlock::~CachedBlock() {
|
|||||||
_blockStore->_baseBlockStore->store(_blockId, _data);
|
_blockStore->_baseBlockStore->store(_blockId, _data);
|
||||||
}
|
}
|
||||||
// remove it from the list of blocks not in the base store, if it's on it
|
// remove it from the list of blocks not in the base store, if it's on it
|
||||||
unique_lock<mutex> lock(_blockStore->_cachedBlocksNotInBaseStoreMutex);
|
const unique_lock<mutex> lock(_blockStore->_cachedBlocksNotInBaseStoreMutex);
|
||||||
_blockStore->_cachedBlocksNotInBaseStore.erase(_blockId);
|
_blockStore->_cachedBlocksNotInBaseStore.erase(_blockId);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -57,7 +56,7 @@ bool CachingBlockStore2::tryCreate(const BlockId &blockId, const Data &data) {
|
|||||||
return false;
|
return false;
|
||||||
} else {
|
} else {
|
||||||
_cache.push(blockId, make_unique_ref<CachingBlockStore2::CachedBlock>(this, blockId, data.copy(), true));
|
_cache.push(blockId, make_unique_ref<CachingBlockStore2::CachedBlock>(this, blockId, data.copy(), true));
|
||||||
unique_lock<mutex> lock(_cachedBlocksNotInBaseStoreMutex);
|
const unique_lock<mutex> lock(_cachedBlocksNotInBaseStoreMutex);
|
||||||
_cachedBlocksNotInBaseStore.insert(blockId);
|
_cachedBlocksNotInBaseStore.insert(blockId);
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
@ -69,7 +68,7 @@ bool CachingBlockStore2::remove(const BlockId &blockId) {
|
|||||||
if (popped != boost::none) {
|
if (popped != boost::none) {
|
||||||
// Remove from base store if it exists in the base store
|
// Remove from base store if it exists in the base store
|
||||||
{
|
{
|
||||||
unique_lock<mutex> lock(_cachedBlocksNotInBaseStoreMutex);
|
const unique_lock<mutex> lock(_cachedBlocksNotInBaseStoreMutex);
|
||||||
if (_cachedBlocksNotInBaseStore.count(blockId) == 0) {
|
if (_cachedBlocksNotInBaseStore.count(blockId) == 0) {
|
||||||
const bool existedInBaseStore = _baseBlockStore->remove(blockId);
|
const bool existedInBaseStore = _baseBlockStore->remove(blockId);
|
||||||
if (!existedInBaseStore) {
|
if (!existedInBaseStore) {
|
||||||
@ -125,7 +124,7 @@ void CachingBlockStore2::store(const BlockId &blockId, const Data &data) {
|
|||||||
uint64_t CachingBlockStore2::numBlocks() const {
|
uint64_t CachingBlockStore2::numBlocks() const {
|
||||||
uint64_t numInCacheButNotInBaseStore = 0;
|
uint64_t numInCacheButNotInBaseStore = 0;
|
||||||
{
|
{
|
||||||
unique_lock<mutex> lock(_cachedBlocksNotInBaseStoreMutex);
|
const unique_lock<mutex> lock(_cachedBlocksNotInBaseStoreMutex);
|
||||||
numInCacheButNotInBaseStore = _cachedBlocksNotInBaseStore.size();
|
numInCacheButNotInBaseStore = _cachedBlocksNotInBaseStore.size();
|
||||||
}
|
}
|
||||||
return _baseBlockStore->numBlocks() + numInCacheButNotInBaseStore;
|
return _baseBlockStore->numBlocks() + numInCacheButNotInBaseStore;
|
||||||
@ -141,7 +140,7 @@ uint64_t CachingBlockStore2::blockSizeFromPhysicalBlockSize(uint64_t blockSize)
|
|||||||
|
|
||||||
void CachingBlockStore2::forEachBlock(std::function<void (const BlockId &)> callback) const {
|
void CachingBlockStore2::forEachBlock(std::function<void (const BlockId &)> callback) const {
|
||||||
{
|
{
|
||||||
unique_lock<mutex> lock(_cachedBlocksNotInBaseStoreMutex);
|
const unique_lock<mutex> lock(_cachedBlocksNotInBaseStoreMutex);
|
||||||
for (const BlockId &blockId : _cachedBlocksNotInBaseStore) {
|
for (const BlockId &blockId : _cachedBlocksNotInBaseStore) {
|
||||||
callback(blockId);
|
callback(blockId);
|
||||||
}
|
}
|
||||||
|
@ -71,7 +71,7 @@ Cache<Key, Value, MAX_ENTRIES>::~Cache() {
|
|||||||
template<class Key, class Value, uint32_t MAX_ENTRIES>
|
template<class Key, class Value, uint32_t MAX_ENTRIES>
|
||||||
boost::optional<Value> Cache<Key, Value, MAX_ENTRIES>::pop(const Key &key) {
|
boost::optional<Value> Cache<Key, Value, MAX_ENTRIES>::pop(const Key &key) {
|
||||||
std::unique_lock<std::mutex> lock(_mutex);
|
std::unique_lock<std::mutex> lock(_mutex);
|
||||||
cpputils::MutexPoolLock<Key> lockEntryFromBeingPopped(&_currentlyFlushingEntries, key, &lock);
|
const cpputils::MutexPoolLock<Key> lockEntryFromBeingPopped(&_currentlyFlushingEntries, key, &lock);
|
||||||
|
|
||||||
auto found = _cachedBlocks.pop(key);
|
auto found = _cachedBlocks.pop(key);
|
||||||
if (!found) {
|
if (!found) {
|
||||||
@ -132,7 +132,7 @@ void Cache<Key, Value, MAX_ENTRIES>::_deleteOldEntriesParallel() {
|
|||||||
template<class Key, class Value, uint32_t MAX_ENTRIES>
|
template<class Key, class Value, uint32_t MAX_ENTRIES>
|
||||||
void Cache<Key, Value, MAX_ENTRIES>::_deleteMatchingEntriesAtBeginningParallel(std::function<bool (const CacheEntry<Key, Value> &)> matches) {
|
void Cache<Key, Value, MAX_ENTRIES>::_deleteMatchingEntriesAtBeginningParallel(std::function<bool (const CacheEntry<Key, Value> &)> matches) {
|
||||||
// Twice the number of cores, so we use full CPU even if half the threads are doing I/O
|
// Twice the number of cores, so we use full CPU even if half the threads are doing I/O
|
||||||
unsigned int numThreads = 2 * (std::max)(1u, std::thread::hardware_concurrency());
|
const unsigned int numThreads = 2 * (std::max)(1u, std::thread::hardware_concurrency());
|
||||||
std::vector<std::future<void>> waitHandles;
|
std::vector<std::future<void>> waitHandles;
|
||||||
for (unsigned int i = 0; i < numThreads; ++i) {
|
for (unsigned int i = 0; i < numThreads; ++i) {
|
||||||
waitHandles.push_back(std::async(std::launch::async, [this, matches] {
|
waitHandles.push_back(std::async(std::launch::async, [this, matches] {
|
||||||
|
@ -21,7 +21,7 @@ public:
|
|||||||
static cpputils::unique_ref<CompressedBlock> Decompress(cpputils::unique_ref<Block> baseBlock);
|
static cpputils::unique_ref<CompressedBlock> Decompress(cpputils::unique_ref<Block> baseBlock);
|
||||||
|
|
||||||
CompressedBlock(cpputils::unique_ref<Block> baseBlock, cpputils::Data decompressedData);
|
CompressedBlock(cpputils::unique_ref<Block> baseBlock, cpputils::Data decompressedData);
|
||||||
~CompressedBlock();
|
~CompressedBlock() override;
|
||||||
|
|
||||||
const void *data() const override;
|
const void *data() const override;
|
||||||
void write(const void *source, uint64_t offset, uint64_t size) override;
|
void write(const void *source, uint64_t offset, uint64_t size) override;
|
||||||
@ -80,7 +80,7 @@ CompressedBlock<Compressor>::CompressedBlock(cpputils::unique_ref<Block> baseBlo
|
|||||||
|
|
||||||
template<class Compressor>
|
template<class Compressor>
|
||||||
CompressedBlock<Compressor>::~CompressedBlock() {
|
CompressedBlock<Compressor>::~CompressedBlock() {
|
||||||
std::unique_lock<std::mutex> lock(_mutex);
|
const std::unique_lock<std::mutex> lock(_mutex);
|
||||||
_compressToBaseBlock();
|
_compressToBaseBlock();
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -97,7 +97,7 @@ void CompressedBlock<Compressor>::write(const void *source, uint64_t offset, uin
|
|||||||
|
|
||||||
template<class Compressor>
|
template<class Compressor>
|
||||||
void CompressedBlock<Compressor>::flush() {
|
void CompressedBlock<Compressor>::flush() {
|
||||||
std::unique_lock<std::mutex> lock(_mutex);
|
const std::unique_lock<std::mutex> lock(_mutex);
|
||||||
_compressToBaseBlock();
|
_compressToBaseBlock();
|
||||||
return _baseBlock->flush();
|
return _baseBlock->flush();
|
||||||
}
|
}
|
||||||
|
@ -12,7 +12,7 @@ template<class Compressor>
|
|||||||
class CompressingBlockStore final: public BlockStore {
|
class CompressingBlockStore final: public BlockStore {
|
||||||
public:
|
public:
|
||||||
CompressingBlockStore(cpputils::unique_ref<BlockStore> baseBlockStore);
|
CompressingBlockStore(cpputils::unique_ref<BlockStore> baseBlockStore);
|
||||||
~CompressingBlockStore();
|
~CompressingBlockStore() override;
|
||||||
|
|
||||||
BlockId createBlockId() override;
|
BlockId createBlockId() override;
|
||||||
boost::optional<cpputils::unique_ref<Block>> tryCreate(const BlockId &blockId, cpputils::Data data) override;
|
boost::optional<cpputils::unique_ref<Block>> tryCreate(const BlockId &blockId, cpputils::Data data) override;
|
||||||
|
@ -84,7 +84,7 @@ namespace blockstore {
|
|||||||
}
|
}
|
||||||
|
|
||||||
Data RunLengthEncoding::_extractData(ostringstream *stream) {
|
Data RunLengthEncoding::_extractData(ostringstream *stream) {
|
||||||
string str = stream->str();
|
const string str = stream->str();
|
||||||
Data data(str.size());
|
Data data(str.size());
|
||||||
std::memcpy(data.data(), str.c_str(), str.size());
|
std::memcpy(data.data(), str.c_str(), str.size());
|
||||||
return data;
|
return data;
|
||||||
|
@ -71,7 +71,7 @@ inline EncryptedBlockStore2<Cipher>::EncryptedBlockStore2(cpputils::unique_ref<B
|
|||||||
|
|
||||||
template<class Cipher>
|
template<class Cipher>
|
||||||
inline bool EncryptedBlockStore2<Cipher>::tryCreate(const BlockId &blockId, const cpputils::Data &data) {
|
inline bool EncryptedBlockStore2<Cipher>::tryCreate(const BlockId &blockId, const cpputils::Data &data) {
|
||||||
cpputils::Data encrypted = _encrypt(data);
|
const cpputils::Data encrypted = _encrypt(data);
|
||||||
return _baseBlockStore->tryCreate(blockId, encrypted);
|
return _baseBlockStore->tryCreate(blockId, encrypted);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -92,7 +92,7 @@ inline boost::optional<cpputils::Data> EncryptedBlockStore2<Cipher>::load(const
|
|||||||
|
|
||||||
template<class Cipher>
|
template<class Cipher>
|
||||||
inline void EncryptedBlockStore2<Cipher>::store(const BlockId &blockId, const cpputils::Data &data) {
|
inline void EncryptedBlockStore2<Cipher>::store(const BlockId &blockId, const cpputils::Data &data) {
|
||||||
cpputils::Data encrypted = _encrypt(data);
|
const cpputils::Data encrypted = _encrypt(data);
|
||||||
return _baseBlockStore->store(blockId, encrypted);
|
return _baseBlockStore->store(blockId, encrypted);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -108,7 +108,7 @@ inline uint64_t EncryptedBlockStore2<Cipher>::estimateNumFreeBytes() const {
|
|||||||
|
|
||||||
template<class Cipher>
|
template<class Cipher>
|
||||||
inline uint64_t EncryptedBlockStore2<Cipher>::blockSizeFromPhysicalBlockSize(uint64_t blockSize) const {
|
inline uint64_t EncryptedBlockStore2<Cipher>::blockSizeFromPhysicalBlockSize(uint64_t blockSize) const {
|
||||||
uint64_t baseBlockSize = _baseBlockStore->blockSizeFromPhysicalBlockSize(blockSize);
|
const uint64_t baseBlockSize = _baseBlockStore->blockSizeFromPhysicalBlockSize(blockSize);
|
||||||
if (baseBlockSize <= Cipher::ciphertextSize(0) + sizeof(FORMAT_VERSION_HEADER)) {
|
if (baseBlockSize <= Cipher::ciphertextSize(0) + sizeof(FORMAT_VERSION_HEADER)) {
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
@ -122,7 +122,7 @@ inline void EncryptedBlockStore2<Cipher>::forEachBlock(std::function<void (const
|
|||||||
|
|
||||||
template<class Cipher>
|
template<class Cipher>
|
||||||
inline cpputils::Data EncryptedBlockStore2<Cipher>::_encrypt(const cpputils::Data &data) const {
|
inline cpputils::Data EncryptedBlockStore2<Cipher>::_encrypt(const cpputils::Data &data) const {
|
||||||
cpputils::Data encrypted = Cipher::encrypt(static_cast<const CryptoPP::byte*>(data.data()), data.size(), _encKey);
|
const cpputils::Data encrypted = Cipher::encrypt(static_cast<const CryptoPP::byte*>(data.data()), data.size(), _encKey);
|
||||||
return _prependFormatHeaderToData(encrypted);
|
return _prependFormatHeaderToData(encrypted);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -9,7 +9,6 @@ using std::make_pair;
|
|||||||
using std::vector;
|
using std::vector;
|
||||||
using cpputils::Data;
|
using cpputils::Data;
|
||||||
using boost::optional;
|
using boost::optional;
|
||||||
using boost::none;
|
|
||||||
|
|
||||||
namespace blockstore {
|
namespace blockstore {
|
||||||
namespace inmemory {
|
namespace inmemory {
|
||||||
@ -18,7 +17,7 @@ InMemoryBlockStore2::InMemoryBlockStore2()
|
|||||||
: _blocks() {}
|
: _blocks() {}
|
||||||
|
|
||||||
bool InMemoryBlockStore2::tryCreate(const BlockId &blockId, const Data &data) {
|
bool InMemoryBlockStore2::tryCreate(const BlockId &blockId, const Data &data) {
|
||||||
std::unique_lock<std::mutex> lock(_mutex);
|
const std::unique_lock<std::mutex> lock(_mutex);
|
||||||
return _tryCreate(blockId, data);
|
return _tryCreate(blockId, data);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -28,7 +27,7 @@ bool InMemoryBlockStore2::_tryCreate(const BlockId &blockId, const Data &data) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
bool InMemoryBlockStore2::remove(const BlockId &blockId) {
|
bool InMemoryBlockStore2::remove(const BlockId &blockId) {
|
||||||
std::unique_lock<std::mutex> lock(_mutex);
|
const std::unique_lock<std::mutex> lock(_mutex);
|
||||||
auto found = _blocks.find(blockId);
|
auto found = _blocks.find(blockId);
|
||||||
if (found == _blocks.end()) {
|
if (found == _blocks.end()) {
|
||||||
// BlockId not found
|
// BlockId not found
|
||||||
@ -40,7 +39,7 @@ bool InMemoryBlockStore2::remove(const BlockId &blockId) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
optional<Data> InMemoryBlockStore2::load(const BlockId &blockId) const {
|
optional<Data> InMemoryBlockStore2::load(const BlockId &blockId) const {
|
||||||
std::unique_lock<std::mutex> lock(_mutex);
|
const std::unique_lock<std::mutex> lock(_mutex);
|
||||||
auto found = _blocks.find(blockId);
|
auto found = _blocks.find(blockId);
|
||||||
if (found == _blocks.end()) {
|
if (found == _blocks.end()) {
|
||||||
return boost::none;
|
return boost::none;
|
||||||
@ -49,10 +48,10 @@ optional<Data> InMemoryBlockStore2::load(const BlockId &blockId) const {
|
|||||||
}
|
}
|
||||||
|
|
||||||
void InMemoryBlockStore2::store(const BlockId &blockId, const Data &data) {
|
void InMemoryBlockStore2::store(const BlockId &blockId, const Data &data) {
|
||||||
std::unique_lock<std::mutex> lock(_mutex);
|
const std::unique_lock<std::mutex> lock(_mutex);
|
||||||
auto found = _blocks.find(blockId);
|
auto found = _blocks.find(blockId);
|
||||||
if (found == _blocks.end()) {
|
if (found == _blocks.end()) {
|
||||||
bool success = _tryCreate(blockId, data);
|
const bool success = _tryCreate(blockId, data);
|
||||||
if (!success) {
|
if (!success) {
|
||||||
throw std::runtime_error("Could neither save nor create the block in InMemoryBlockStore::store()");
|
throw std::runtime_error("Could neither save nor create the block in InMemoryBlockStore::store()");
|
||||||
}
|
}
|
||||||
@ -63,7 +62,7 @@ void InMemoryBlockStore2::store(const BlockId &blockId, const Data &data) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
uint64_t InMemoryBlockStore2::numBlocks() const {
|
uint64_t InMemoryBlockStore2::numBlocks() const {
|
||||||
std::unique_lock<std::mutex> lock(_mutex);
|
const std::unique_lock<std::mutex> lock(_mutex);
|
||||||
return _blocks.size();
|
return _blocks.size();
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -76,7 +75,7 @@ uint64_t InMemoryBlockStore2::blockSizeFromPhysicalBlockSize(uint64_t blockSize)
|
|||||||
}
|
}
|
||||||
|
|
||||||
vector<BlockId> InMemoryBlockStore2::_allBlockIds() const {
|
vector<BlockId> InMemoryBlockStore2::_allBlockIds() const {
|
||||||
std::unique_lock<std::mutex> lock(_mutex);
|
const std::unique_lock<std::mutex> lock(_mutex);
|
||||||
vector<BlockId> result;
|
vector<BlockId> result;
|
||||||
result.reserve(_blocks.size());
|
result.reserve(_blocks.size());
|
||||||
for (const auto &entry : _blocks) {
|
for (const auto &entry : _blocks) {
|
||||||
|
@ -50,8 +50,8 @@ void IntegrityBlockStore2::_checkFormatHeader(const Data &data) const {
|
|||||||
}
|
}
|
||||||
|
|
||||||
bool IntegrityBlockStore2::_checkVersionHeader(const BlockId &blockId, const Data &data) const {
|
bool IntegrityBlockStore2::_checkVersionHeader(const BlockId &blockId, const Data &data) const {
|
||||||
uint32_t clientId = _readClientId(data);
|
const uint32_t clientId = _readClientId(data);
|
||||||
uint64_t version = _readVersion(data);
|
const uint64_t version = _readVersion(data);
|
||||||
|
|
||||||
if(!_knownBlockVersions.checkAndUpdateVersion(clientId, blockId, version)) {
|
if(!_knownBlockVersions.checkAndUpdateVersion(clientId, blockId, version)) {
|
||||||
integrityViolationDetected("The block version number is too low. Did an attacker try to roll back the block or to re-introduce a deleted block?");
|
integrityViolationDetected("The block version number is too low. Did an attacker try to roll back the block or to re-introduce a deleted block?");
|
||||||
@ -64,7 +64,7 @@ bool IntegrityBlockStore2::_checkVersionHeader(const BlockId &blockId, const Dat
|
|||||||
bool IntegrityBlockStore2::_checkIdHeader(const BlockId &expectedBlockId, const Data &data) const {
|
bool IntegrityBlockStore2::_checkIdHeader(const BlockId &expectedBlockId, const Data &data) const {
|
||||||
// The obvious reason for this is to prevent adversaries from renaming blocks, but storing the block id in this way also
|
// The obvious reason for this is to prevent adversaries from renaming blocks, but storing the block id in this way also
|
||||||
// makes the authenticated cipher more robust, see https://libsodium.gitbook.io/doc/secret-key_cryptography/aead#robustness
|
// makes the authenticated cipher more robust, see https://libsodium.gitbook.io/doc/secret-key_cryptography/aead#robustness
|
||||||
BlockId actualBlockId = _readBlockId(data);
|
const BlockId actualBlockId = _readBlockId(data);
|
||||||
if (expectedBlockId != actualBlockId) {
|
if (expectedBlockId != actualBlockId) {
|
||||||
integrityViolationDetected("The block id is wrong. Did an attacker try to rename some blocks?");
|
integrityViolationDetected("The block id is wrong. Did an attacker try to rename some blocks?");
|
||||||
return false;
|
return false;
|
||||||
@ -110,8 +110,8 @@ IntegrityBlockStore2::IntegrityBlockStore2(unique_ref<BlockStore2> baseBlockStor
|
|||||||
}
|
}
|
||||||
|
|
||||||
bool IntegrityBlockStore2::tryCreate(const BlockId &blockId, const Data &data) {
|
bool IntegrityBlockStore2::tryCreate(const BlockId &blockId, const Data &data) {
|
||||||
uint64_t version = _knownBlockVersions.incrementVersion(blockId);
|
const uint64_t version = _knownBlockVersions.incrementVersion(blockId);
|
||||||
Data dataWithHeader = _prependHeaderToData(blockId, _knownBlockVersions.myClientId(), version, data);
|
const Data dataWithHeader = _prependHeaderToData(blockId, _knownBlockVersions.myClientId(), version, data);
|
||||||
return _baseBlockStore->tryCreate(blockId, dataWithHeader);
|
return _baseBlockStore->tryCreate(blockId, dataWithHeader);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -130,11 +130,11 @@ optional<Data> IntegrityBlockStore2::load(const BlockId &blockId) const {
|
|||||||
}
|
}
|
||||||
#ifndef CRYFS_NO_COMPATIBILITY
|
#ifndef CRYFS_NO_COMPATIBILITY
|
||||||
if (FORMAT_VERSION_HEADER_OLD == _readFormatHeader(*loaded)) {
|
if (FORMAT_VERSION_HEADER_OLD == _readFormatHeader(*loaded)) {
|
||||||
Data migrated = _migrateBlock(blockId, *loaded);
|
const Data migrated = _migrateBlock(blockId, *loaded);
|
||||||
if (!_checkHeader(blockId, migrated) && !_allowIntegrityViolations) {
|
if (!_checkHeader(blockId, migrated) && !_allowIntegrityViolations) {
|
||||||
return optional<Data>(none);
|
return optional<Data>(none);
|
||||||
}
|
}
|
||||||
Data content = _removeHeader(migrated);
|
const Data content = _removeHeader(migrated);
|
||||||
const_cast<IntegrityBlockStore2*>(this)->store(blockId, content);
|
const_cast<IntegrityBlockStore2*>(this)->store(blockId, content);
|
||||||
return optional<Data>(_removeHeader(migrated));
|
return optional<Data>(_removeHeader(migrated));
|
||||||
}
|
}
|
||||||
@ -157,8 +157,8 @@ Data IntegrityBlockStore2::_migrateBlock(const BlockId &blockId, const Data &dat
|
|||||||
#endif
|
#endif
|
||||||
|
|
||||||
void IntegrityBlockStore2::store(const BlockId &blockId, const Data &data) {
|
void IntegrityBlockStore2::store(const BlockId &blockId, const Data &data) {
|
||||||
uint64_t version = _knownBlockVersions.incrementVersion(blockId);
|
const uint64_t version = _knownBlockVersions.incrementVersion(blockId);
|
||||||
Data dataWithHeader = _prependHeaderToData(blockId, _knownBlockVersions.myClientId(), version, data);
|
const Data dataWithHeader = _prependHeaderToData(blockId, _knownBlockVersions.myClientId(), version, data);
|
||||||
return _baseBlockStore->store(blockId, dataWithHeader);
|
return _baseBlockStore->store(blockId, dataWithHeader);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -171,7 +171,7 @@ uint64_t IntegrityBlockStore2::estimateNumFreeBytes() const {
|
|||||||
}
|
}
|
||||||
|
|
||||||
uint64_t IntegrityBlockStore2::blockSizeFromPhysicalBlockSize(uint64_t blockSize) const {
|
uint64_t IntegrityBlockStore2::blockSizeFromPhysicalBlockSize(uint64_t blockSize) const {
|
||||||
uint64_t baseBlockSize = _baseBlockStore->blockSizeFromPhysicalBlockSize(blockSize);
|
const uint64_t baseBlockSize = _baseBlockStore->blockSizeFromPhysicalBlockSize(blockSize);
|
||||||
if (baseBlockSize <= HEADER_LENGTH) {
|
if (baseBlockSize <= HEADER_LENGTH) {
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
@ -221,9 +221,9 @@ void IntegrityBlockStore2::migrateBlockFromBlockstoreWithoutVersionNumbers(block
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
uint64_t version = knownBlockVersions->incrementVersion(blockId);
|
const uint64_t version = knownBlockVersions->incrementVersion(blockId);
|
||||||
cpputils::Data data = std::move(*data_);
|
const cpputils::Data data = std::move(*data_);
|
||||||
cpputils::Data dataWithHeader = _prependHeaderToData(blockId, knownBlockVersions->myClientId(), version, data);
|
const cpputils::Data dataWithHeader = _prependHeaderToData(blockId, knownBlockVersions->myClientId(), version, data);
|
||||||
baseBlockStore->store(blockId, dataWithHeader);
|
baseBlockStore->store(blockId, dataWithHeader);
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
@ -23,25 +23,27 @@ constexpr uint32_t KnownBlockVersions::CLIENT_ID_FOR_DELETED_BLOCK;
|
|||||||
|
|
||||||
KnownBlockVersions::KnownBlockVersions(const bf::path &stateFilePath, uint32_t myClientId)
|
KnownBlockVersions::KnownBlockVersions(const bf::path &stateFilePath, uint32_t myClientId)
|
||||||
:_integrityViolationOnPreviousRun(false), _knownVersions(), _lastUpdateClientId(), _stateFilePath(stateFilePath), _myClientId(myClientId), _mutex(), _valid(true) {
|
:_integrityViolationOnPreviousRun(false), _knownVersions(), _lastUpdateClientId(), _stateFilePath(stateFilePath), _myClientId(myClientId), _mutex(), _valid(true) {
|
||||||
unique_lock<mutex> lock(_mutex);
|
const unique_lock<mutex> lock(_mutex);
|
||||||
ASSERT(_myClientId != CLIENT_ID_FOR_DELETED_BLOCK, "This is not a valid client id");
|
ASSERT(_myClientId != CLIENT_ID_FOR_DELETED_BLOCK, "This is not a valid client id");
|
||||||
_loadStateFile();
|
_loadStateFile();
|
||||||
}
|
}
|
||||||
|
|
||||||
KnownBlockVersions::KnownBlockVersions(KnownBlockVersions &&rhs) // NOLINT (intentionally not noexcept)
|
KnownBlockVersions::KnownBlockVersions(KnownBlockVersions &&rhs) // NOLINT (intentionally not noexcept)
|
||||||
: _integrityViolationOnPreviousRun(false), _knownVersions(), _lastUpdateClientId(), _stateFilePath(), _myClientId(0), _mutex(), _valid(true) {
|
: _integrityViolationOnPreviousRun(false), _knownVersions(), _lastUpdateClientId(), _stateFilePath(), _myClientId(0), _mutex(), _valid(true) {
|
||||||
unique_lock<mutex> rhsLock(rhs._mutex);
|
const unique_lock<mutex> rhsLock(rhs._mutex);
|
||||||
unique_lock<mutex> lock(_mutex);
|
const unique_lock<mutex> lock(_mutex);
|
||||||
|
// NOLINTBEGIN(cppcoreguidelines-prefer-member-initializer) -- we need to initialize those within the mutexes
|
||||||
_integrityViolationOnPreviousRun = rhs._integrityViolationOnPreviousRun;
|
_integrityViolationOnPreviousRun = rhs._integrityViolationOnPreviousRun;
|
||||||
_knownVersions = std::move(rhs._knownVersions);
|
_knownVersions = std::move(rhs._knownVersions);
|
||||||
_lastUpdateClientId = std::move(rhs._lastUpdateClientId);
|
_lastUpdateClientId = std::move(rhs._lastUpdateClientId);
|
||||||
_stateFilePath = std::move(rhs._stateFilePath);
|
_stateFilePath = std::move(rhs._stateFilePath);
|
||||||
_myClientId = rhs._myClientId;
|
_myClientId = rhs._myClientId;
|
||||||
rhs._valid = false;
|
rhs._valid = false;
|
||||||
|
// NOLINTEND(cppcoreguidelines-prefer-member-initializer)
|
||||||
}
|
}
|
||||||
|
|
||||||
KnownBlockVersions::~KnownBlockVersions() {
|
KnownBlockVersions::~KnownBlockVersions() {
|
||||||
unique_lock<mutex> lock(_mutex);
|
const unique_lock<mutex> lock(_mutex);
|
||||||
if (_valid) {
|
if (_valid) {
|
||||||
_saveStateFile();
|
_saveStateFile();
|
||||||
}
|
}
|
||||||
@ -56,7 +58,7 @@ bool KnownBlockVersions::integrityViolationOnPreviousRun() const {
|
|||||||
}
|
}
|
||||||
|
|
||||||
bool KnownBlockVersions::checkAndUpdateVersion(uint32_t clientId, const BlockId &blockId, uint64_t version) {
|
bool KnownBlockVersions::checkAndUpdateVersion(uint32_t clientId, const BlockId &blockId, uint64_t version) {
|
||||||
unique_lock<mutex> lock(_mutex);
|
const unique_lock<mutex> lock(_mutex);
|
||||||
ASSERT(clientId != CLIENT_ID_FOR_DELETED_BLOCK, "This is not a valid client id");
|
ASSERT(clientId != CLIENT_ID_FOR_DELETED_BLOCK, "This is not a valid client id");
|
||||||
|
|
||||||
ASSERT(version > 0, "Version has to be >0"); // Otherwise we wouldn't handle notexisting entries correctly.
|
ASSERT(version > 0, "Version has to be >0"); // Otherwise we wouldn't handle notexisting entries correctly.
|
||||||
@ -81,9 +83,9 @@ bool KnownBlockVersions::checkAndUpdateVersion(uint32_t clientId, const BlockId
|
|||||||
}
|
}
|
||||||
|
|
||||||
uint64_t KnownBlockVersions::incrementVersion(const BlockId &blockId) {
|
uint64_t KnownBlockVersions::incrementVersion(const BlockId &blockId) {
|
||||||
unique_lock<mutex> lock(_mutex);
|
const unique_lock<mutex> lock(_mutex);
|
||||||
uint64_t &found = _knownVersions[{_myClientId, blockId}]; // If the entry doesn't exist, this creates it with value 0.
|
uint64_t &found = _knownVersions[{_myClientId, blockId}]; // If the entry doesn't exist, this creates it with value 0.
|
||||||
uint64_t newVersion = found + 1;
|
const uint64_t newVersion = found + 1;
|
||||||
if (newVersion == std::numeric_limits<uint64_t>::max()) {
|
if (newVersion == std::numeric_limits<uint64_t>::max()) {
|
||||||
// It's *very* unlikely we ever run out of version numbers in 64bit...but just to be sure...
|
// It's *very* unlikely we ever run out of version numbers in 64bit...but just to be sure...
|
||||||
throw std::runtime_error("Version overflow");
|
throw std::runtime_error("Version overflow");
|
||||||
@ -138,7 +140,7 @@ void KnownBlockVersions::_saveStateFile() const {
|
|||||||
}
|
}
|
||||||
|
|
||||||
std::unordered_map<ClientIdAndBlockId, uint64_t> KnownBlockVersions::_deserializeKnownVersions(Deserializer *deserializer) {
|
std::unordered_map<ClientIdAndBlockId, uint64_t> KnownBlockVersions::_deserializeKnownVersions(Deserializer *deserializer) {
|
||||||
uint64_t numEntries = deserializer->readUint64();
|
const uint64_t numEntries = deserializer->readUint64();
|
||||||
std::unordered_map<ClientIdAndBlockId, uint64_t> result;
|
std::unordered_map<ClientIdAndBlockId, uint64_t> result;
|
||||||
result.reserve(static_cast<uint64_t>(1.2 * numEntries)); // Reserve for factor 1.2 more, so the file system doesn't immediately have to resize it on the first new block.
|
result.reserve(static_cast<uint64_t>(1.2 * numEntries)); // Reserve for factor 1.2 more, so the file system doesn't immediately have to resize it on the first new block.
|
||||||
for (uint64_t i = 0 ; i < numEntries; ++i) {
|
for (uint64_t i = 0 ; i < numEntries; ++i) {
|
||||||
@ -150,7 +152,7 @@ std::unordered_map<ClientIdAndBlockId, uint64_t> KnownBlockVersions::_deserializ
|
|||||||
}
|
}
|
||||||
|
|
||||||
void KnownBlockVersions::_serializeKnownVersions(Serializer *serializer, const std::unordered_map<ClientIdAndBlockId, uint64_t>& knownVersions) {
|
void KnownBlockVersions::_serializeKnownVersions(Serializer *serializer, const std::unordered_map<ClientIdAndBlockId, uint64_t>& knownVersions) {
|
||||||
uint64_t numEntries = knownVersions.size();
|
const uint64_t numEntries = knownVersions.size();
|
||||||
serializer->writeUint64(numEntries);
|
serializer->writeUint64(numEntries);
|
||||||
|
|
||||||
for (const auto &entry : knownVersions) {
|
for (const auto &entry : knownVersions) {
|
||||||
@ -159,9 +161,9 @@ void KnownBlockVersions::_serializeKnownVersions(Serializer *serializer, const s
|
|||||||
}
|
}
|
||||||
|
|
||||||
pair<ClientIdAndBlockId, uint64_t> KnownBlockVersions::_deserializeKnownVersionsEntry(Deserializer *deserializer) {
|
pair<ClientIdAndBlockId, uint64_t> KnownBlockVersions::_deserializeKnownVersionsEntry(Deserializer *deserializer) {
|
||||||
uint32_t clientId = deserializer->readUint32();
|
const uint32_t clientId = deserializer->readUint32();
|
||||||
BlockId blockId(deserializer->readFixedSizeData<BlockId::BINARY_LENGTH>());
|
const BlockId blockId(deserializer->readFixedSizeData<BlockId::BINARY_LENGTH>());
|
||||||
uint64_t version = deserializer->readUint64();
|
const uint64_t version = deserializer->readUint64();
|
||||||
|
|
||||||
return {{clientId, blockId}, version};
|
return {{clientId, blockId}, version};
|
||||||
};
|
};
|
||||||
@ -173,7 +175,7 @@ void KnownBlockVersions::_serializeKnownVersionsEntry(Serializer *serializer, co
|
|||||||
}
|
}
|
||||||
|
|
||||||
std::unordered_map<BlockId, uint32_t> KnownBlockVersions::_deserializeLastUpdateClientIds(Deserializer *deserializer) {
|
std::unordered_map<BlockId, uint32_t> KnownBlockVersions::_deserializeLastUpdateClientIds(Deserializer *deserializer) {
|
||||||
uint64_t numEntries = deserializer->readUint64();
|
const uint64_t numEntries = deserializer->readUint64();
|
||||||
std::unordered_map<BlockId, uint32_t> result;
|
std::unordered_map<BlockId, uint32_t> result;
|
||||||
result.reserve(static_cast<uint64_t>(1.2 * numEntries)); // Reserve for factor 1.2 more, so the file system doesn't immediately have to resize it on the first new block.
|
result.reserve(static_cast<uint64_t>(1.2 * numEntries)); // Reserve for factor 1.2 more, so the file system doesn't immediately have to resize it on the first new block.
|
||||||
for (uint64_t i = 0 ; i < numEntries; ++i) {
|
for (uint64_t i = 0 ; i < numEntries; ++i) {
|
||||||
@ -184,7 +186,7 @@ std::unordered_map<BlockId, uint32_t> KnownBlockVersions::_deserializeLastUpdate
|
|||||||
}
|
}
|
||||||
|
|
||||||
void KnownBlockVersions::_serializeLastUpdateClientIds(Serializer *serializer, const std::unordered_map<BlockId, uint32_t>& lastUpdateClientId) {
|
void KnownBlockVersions::_serializeLastUpdateClientIds(Serializer *serializer, const std::unordered_map<BlockId, uint32_t>& lastUpdateClientId) {
|
||||||
uint64_t numEntries = lastUpdateClientId.size();
|
const uint64_t numEntries = lastUpdateClientId.size();
|
||||||
serializer->writeUint64(numEntries);
|
serializer->writeUint64(numEntries);
|
||||||
|
|
||||||
for (const auto &entry : lastUpdateClientId) {
|
for (const auto &entry : lastUpdateClientId) {
|
||||||
@ -193,8 +195,8 @@ void KnownBlockVersions::_serializeLastUpdateClientIds(Serializer *serializer, c
|
|||||||
}
|
}
|
||||||
|
|
||||||
pair<BlockId, uint32_t> KnownBlockVersions::_deserializeLastUpdateClientIdEntry(Deserializer *deserializer) {
|
pair<BlockId, uint32_t> KnownBlockVersions::_deserializeLastUpdateClientIdEntry(Deserializer *deserializer) {
|
||||||
BlockId blockId(deserializer->readFixedSizeData<BlockId::BINARY_LENGTH>());
|
const BlockId blockId(deserializer->readFixedSizeData<BlockId::BINARY_LENGTH>());
|
||||||
uint32_t clientId = deserializer->readUint32();
|
const uint32_t clientId = deserializer->readUint32();
|
||||||
|
|
||||||
return {blockId, clientId};
|
return {blockId, clientId};
|
||||||
};
|
};
|
||||||
@ -209,7 +211,7 @@ uint32_t KnownBlockVersions::myClientId() const {
|
|||||||
}
|
}
|
||||||
|
|
||||||
uint64_t KnownBlockVersions::getBlockVersion(uint32_t clientId, const BlockId &blockId) const {
|
uint64_t KnownBlockVersions::getBlockVersion(uint32_t clientId, const BlockId &blockId) const {
|
||||||
unique_lock<mutex> lock(_mutex);
|
const unique_lock<mutex> lock(_mutex);
|
||||||
return _knownVersions.at({clientId, blockId});
|
return _knownVersions.at({clientId, blockId});
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -13,7 +13,7 @@ namespace blockstore {
|
|||||||
namespace lowtohighlevel {
|
namespace lowtohighlevel {
|
||||||
|
|
||||||
optional<unique_ref<LowToHighLevelBlock>> LowToHighLevelBlock::TryCreateNew(BlockStore2 *baseBlockStore, const BlockId &blockId, Data data) {
|
optional<unique_ref<LowToHighLevelBlock>> LowToHighLevelBlock::TryCreateNew(BlockStore2 *baseBlockStore, const BlockId &blockId, Data data) {
|
||||||
bool success = baseBlockStore->tryCreate(blockId, data);
|
const bool success = baseBlockStore->tryCreate(blockId, data);
|
||||||
if (!success) {
|
if (!success) {
|
||||||
return none;
|
return none;
|
||||||
}
|
}
|
||||||
@ -43,7 +43,7 @@ LowToHighLevelBlock::LowToHighLevelBlock(const BlockId &blockId, Data data, Bloc
|
|||||||
}
|
}
|
||||||
|
|
||||||
LowToHighLevelBlock::~LowToHighLevelBlock() {
|
LowToHighLevelBlock::~LowToHighLevelBlock() {
|
||||||
unique_lock<mutex> lock(_mutex);
|
const unique_lock<mutex> lock(_mutex);
|
||||||
_storeToBaseBlock();
|
_storeToBaseBlock();
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -58,7 +58,7 @@ void LowToHighLevelBlock::write(const void *source, uint64_t offset, uint64_t co
|
|||||||
}
|
}
|
||||||
|
|
||||||
void LowToHighLevelBlock::flush() {
|
void LowToHighLevelBlock::flush() {
|
||||||
unique_lock<mutex> lock(_mutex);
|
const unique_lock<mutex> lock(_mutex);
|
||||||
_storeToBaseBlock();
|
_storeToBaseBlock();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -28,7 +28,7 @@ public:
|
|||||||
static boost::optional<cpputils::unique_ref<LowToHighLevelBlock>> Load(BlockStore2 *baseBlockStore, const BlockId &blockId);
|
static boost::optional<cpputils::unique_ref<LowToHighLevelBlock>> Load(BlockStore2 *baseBlockStore, const BlockId &blockId);
|
||||||
|
|
||||||
LowToHighLevelBlock(const BlockId &blockId, cpputils::Data data, BlockStore2 *baseBlockStore);
|
LowToHighLevelBlock(const BlockId &blockId, cpputils::Data data, BlockStore2 *baseBlockStore);
|
||||||
~LowToHighLevelBlock();
|
~LowToHighLevelBlock() override;
|
||||||
|
|
||||||
const void *data() const override;
|
const void *data() const override;
|
||||||
void write(const void *source, uint64_t offset, uint64_t count) override;
|
void write(const void *source, uint64_t offset, uint64_t count) override;
|
||||||
|
@ -44,7 +44,7 @@ optional<unique_ref<Block>> LowToHighLevelBlockStore::load(const BlockId &blockI
|
|||||||
}
|
}
|
||||||
|
|
||||||
void LowToHighLevelBlockStore::remove(const BlockId &blockId) {
|
void LowToHighLevelBlockStore::remove(const BlockId &blockId) {
|
||||||
bool success = _baseBlockStore->remove(blockId);
|
const bool success = _baseBlockStore->remove(blockId);
|
||||||
if (!success) {
|
if (!success) {
|
||||||
throw std::runtime_error("Couldn't delete block with id " + blockId.ToString());
|
throw std::runtime_error("Couldn't delete block with id " + blockId.ToString());
|
||||||
}
|
}
|
||||||
|
@ -113,27 +113,27 @@ namespace blockstore {
|
|||||||
|
|
||||||
private:
|
private:
|
||||||
void _increaseNumCreatedBlocks() {
|
void _increaseNumCreatedBlocks() {
|
||||||
std::unique_lock<std::mutex> lock(_mutex);
|
const std::unique_lock<std::mutex> lock(_mutex);
|
||||||
_createdBlocks += 1;
|
_createdBlocks += 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
void _increaseNumLoadedBlocks(const BlockId &blockId) {
|
void _increaseNumLoadedBlocks(const BlockId &blockId) {
|
||||||
std::unique_lock<std::mutex> lock(_mutex);
|
const std::unique_lock<std::mutex> lock(_mutex);
|
||||||
_loadedBlocks.push_back(blockId);
|
_loadedBlocks.push_back(blockId);
|
||||||
}
|
}
|
||||||
|
|
||||||
void _increaseNumRemovedBlocks(const BlockId &blockId) {
|
void _increaseNumRemovedBlocks(const BlockId &blockId) {
|
||||||
std::unique_lock<std::mutex> lock(_mutex);
|
const std::unique_lock<std::mutex> lock(_mutex);
|
||||||
_removedBlocks.push_back(blockId);
|
_removedBlocks.push_back(blockId);
|
||||||
}
|
}
|
||||||
|
|
||||||
void _increaseNumResizedBlocks(const BlockId &blockId) {
|
void _increaseNumResizedBlocks(const BlockId &blockId) {
|
||||||
std::unique_lock<std::mutex> lock(_mutex);
|
const std::unique_lock<std::mutex> lock(_mutex);
|
||||||
_resizedBlocks.push_back(blockId);
|
_resizedBlocks.push_back(blockId);
|
||||||
}
|
}
|
||||||
|
|
||||||
void _increaseNumWrittenBlocks(const BlockId &blockId) {
|
void _increaseNumWrittenBlocks(const BlockId &blockId) {
|
||||||
std::unique_lock<std::mutex> lock(_mutex);
|
const std::unique_lock<std::mutex> lock(_mutex);
|
||||||
_writtenBlocks.push_back(blockId);
|
_writtenBlocks.push_back(blockId);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -19,7 +19,7 @@ constexpr const char* ALLOWED_BLOCKID_CHARACTERS = "0123456789ABCDEF";
|
|||||||
}
|
}
|
||||||
|
|
||||||
boost::filesystem::path OnDiskBlockStore2::_getFilepath(const BlockId &blockId) const {
|
boost::filesystem::path OnDiskBlockStore2::_getFilepath(const BlockId &blockId) const {
|
||||||
std::string blockIdStr = blockId.ToString();
|
const std::string blockIdStr = blockId.ToString();
|
||||||
return _rootDir / blockIdStr.substr(0, PREFIX_LENGTH) / blockIdStr.substr(PREFIX_LENGTH);
|
return _rootDir / blockIdStr.substr(0, PREFIX_LENGTH) / blockIdStr.substr(PREFIX_LENGTH);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -66,7 +66,7 @@ bool OnDiskBlockStore2::remove(const BlockId &blockId) {
|
|||||||
if (!boost::filesystem::is_regular_file(filepath)) { // TODO Is this branch necessary?
|
if (!boost::filesystem::is_regular_file(filepath)) { // TODO Is this branch necessary?
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
bool retval = boost::filesystem::remove(filepath);
|
const bool retval = boost::filesystem::remove(filepath);
|
||||||
if (!retval) {
|
if (!retval) {
|
||||||
cpputils::logging::LOG(cpputils::logging::ERR, "Couldn't find block {} to remove", blockId.ToString());
|
cpputils::logging::LOG(cpputils::logging::ERR, "Couldn't find block {} to remove", blockId.ToString());
|
||||||
return false;
|
return false;
|
||||||
@ -121,14 +121,14 @@ void OnDiskBlockStore2::forEachBlock(std::function<void (const BlockId &)> callb
|
|||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
std::string blockIdPrefix = prefixDir->path().filename().string();
|
const std::string blockIdPrefix = prefixDir->path().filename().string();
|
||||||
if (blockIdPrefix.size() != PREFIX_LENGTH || std::string::npos != blockIdPrefix.find_first_not_of(ALLOWED_BLOCKID_CHARACTERS)) {
|
if (blockIdPrefix.size() != PREFIX_LENGTH || std::string::npos != blockIdPrefix.find_first_not_of(ALLOWED_BLOCKID_CHARACTERS)) {
|
||||||
// directory has wrong length or an invalid character
|
// directory has wrong length or an invalid character
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
for (auto block = boost::filesystem::directory_iterator(prefixDir->path()); block != boost::filesystem::directory_iterator(); ++block) {
|
for (auto block = boost::filesystem::directory_iterator(prefixDir->path()); block != boost::filesystem::directory_iterator(); ++block) {
|
||||||
std::string blockIdPostfix = block->path().filename().string();
|
const std::string blockIdPostfix = block->path().filename().string();
|
||||||
if (blockIdPostfix.size() != POSTFIX_LENGTH || std::string::npos != blockIdPostfix.find_first_not_of(ALLOWED_BLOCKID_CHARACTERS)) {
|
if (blockIdPostfix.size() != POSTFIX_LENGTH || std::string::npos != blockIdPostfix.find_first_not_of(ALLOWED_BLOCKID_CHARACTERS)) {
|
||||||
// filename has wrong length or an invalid character
|
// filename has wrong length or an invalid character
|
||||||
continue;
|
continue;
|
||||||
|
@ -60,7 +60,7 @@ unique_ref<Block> ParallelAccessBlockStore::overwrite(const BlockId &blockId, Da
|
|||||||
}
|
}
|
||||||
|
|
||||||
void ParallelAccessBlockStore::remove(unique_ref<Block> block) {
|
void ParallelAccessBlockStore::remove(unique_ref<Block> block) {
|
||||||
BlockId blockId = block->blockId();
|
const BlockId blockId = block->blockId();
|
||||||
auto block_ref = dynamic_pointer_move<BlockRef>(block);
|
auto block_ref = dynamic_pointer_move<BlockRef>(block);
|
||||||
ASSERT(block_ref != none, "Block is not a BlockRef");
|
ASSERT(block_ref != none, "Block is not a BlockRef");
|
||||||
return _parallelAccessStore.remove(blockId, std::move(*block_ref));
|
return _parallelAccessStore.remove(blockId, std::move(*block_ref));
|
||||||
|
@ -14,7 +14,7 @@ class FakeBlockStore;
|
|||||||
class FakeBlock final: public Block {
|
class FakeBlock final: public Block {
|
||||||
public:
|
public:
|
||||||
FakeBlock(FakeBlockStore *store, const BlockId &blockId, std::shared_ptr<cpputils::Data> data, bool dirty);
|
FakeBlock(FakeBlockStore *store, const BlockId &blockId, std::shared_ptr<cpputils::Data> data, bool dirty);
|
||||||
~FakeBlock();
|
~FakeBlock() override;
|
||||||
|
|
||||||
const void *data() const override;
|
const void *data() const override;
|
||||||
void write(const void *source, uint64_t offset, uint64_t size) override;
|
void write(const void *source, uint64_t offset, uint64_t size) override;
|
||||||
|
@ -23,7 +23,7 @@ BlockId FakeBlockStore::createBlockId() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
optional<unique_ref<Block>> FakeBlockStore::tryCreate(const BlockId &blockId, Data data) {
|
optional<unique_ref<Block>> FakeBlockStore::tryCreate(const BlockId &blockId, Data data) {
|
||||||
std::unique_lock<std::mutex> lock(_mutex);
|
const std::unique_lock<std::mutex> lock(_mutex);
|
||||||
auto insert_result = _blocks.emplace(blockId, std::move(data));
|
auto insert_result = _blocks.emplace(blockId, std::move(data));
|
||||||
|
|
||||||
if (!insert_result.second) {
|
if (!insert_result.second) {
|
||||||
@ -35,7 +35,7 @@ optional<unique_ref<Block>> FakeBlockStore::tryCreate(const BlockId &blockId, Da
|
|||||||
}
|
}
|
||||||
|
|
||||||
unique_ref<Block> FakeBlockStore::overwrite(const BlockId &blockId, Data data) {
|
unique_ref<Block> FakeBlockStore::overwrite(const BlockId &blockId, Data data) {
|
||||||
std::unique_lock<std::mutex> lock(_mutex);
|
const std::unique_lock<std::mutex> lock(_mutex);
|
||||||
auto insert_result = _blocks.emplace(blockId, data.copy());
|
auto insert_result = _blocks.emplace(blockId, data.copy());
|
||||||
|
|
||||||
if (!insert_result.second) {
|
if (!insert_result.second) {
|
||||||
@ -50,7 +50,7 @@ unique_ref<Block> FakeBlockStore::overwrite(const BlockId &blockId, Data data) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
optional<unique_ref<Block>> FakeBlockStore::load(const BlockId &blockId) {
|
optional<unique_ref<Block>> FakeBlockStore::load(const BlockId &blockId) {
|
||||||
std::unique_lock<std::mutex> lock(_mutex);
|
const std::unique_lock<std::mutex> lock(_mutex);
|
||||||
return _load(blockId);
|
return _load(blockId);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -64,8 +64,8 @@ optional<unique_ref<Block>> FakeBlockStore::_load(const BlockId &blockId) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
void FakeBlockStore::remove(const BlockId &blockId) {
|
void FakeBlockStore::remove(const BlockId &blockId) {
|
||||||
std::unique_lock<std::mutex> lock(_mutex);
|
const std::unique_lock<std::mutex> lock(_mutex);
|
||||||
int numRemoved = _blocks.erase(blockId);
|
const size_t numRemoved = _blocks.erase(blockId);
|
||||||
ASSERT(numRemoved == 1, "Block not found");
|
ASSERT(numRemoved == 1, "Block not found");
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -76,7 +76,7 @@ unique_ref<Block> FakeBlockStore::makeFakeBlockFromData(const BlockId &blockId,
|
|||||||
}
|
}
|
||||||
|
|
||||||
void FakeBlockStore::updateData(const BlockId &blockId, const Data &data) {
|
void FakeBlockStore::updateData(const BlockId &blockId, const Data &data) {
|
||||||
std::unique_lock<std::mutex> lock(_mutex);
|
const std::unique_lock<std::mutex> lock(_mutex);
|
||||||
auto found = _blocks.find(blockId);
|
auto found = _blocks.find(blockId);
|
||||||
if (found == _blocks.end()) {
|
if (found == _blocks.end()) {
|
||||||
auto insertResult = _blocks.emplace(blockId, data.copy());
|
auto insertResult = _blocks.emplace(blockId, data.copy());
|
||||||
@ -88,7 +88,7 @@ void FakeBlockStore::updateData(const BlockId &blockId, const Data &data) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
uint64_t FakeBlockStore::numBlocks() const {
|
uint64_t FakeBlockStore::numBlocks() const {
|
||||||
std::unique_lock<std::mutex> lock(_mutex);
|
const std::unique_lock<std::mutex> lock(_mutex);
|
||||||
return _blocks.size();
|
return _blocks.size();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -34,7 +34,7 @@ public:
|
|||||||
virtual void forEachBlock(std::function<void (const BlockId &)> callback) const = 0;
|
virtual void forEachBlock(std::function<void (const BlockId &)> callback) const = 0;
|
||||||
|
|
||||||
virtual void remove(cpputils::unique_ref<Block> block) {
|
virtual void remove(cpputils::unique_ref<Block> block) {
|
||||||
BlockId blockId = block->blockId();
|
const BlockId blockId = block->blockId();
|
||||||
cpputils::destruct(std::move(block));
|
cpputils::destruct(std::move(block));
|
||||||
remove(blockId);
|
remove(blockId);
|
||||||
}
|
}
|
||||||
|
@ -33,7 +33,7 @@ public:
|
|||||||
BlockId create(const cpputils::Data& data) {
|
BlockId create(const cpputils::Data& data) {
|
||||||
while (true) {
|
while (true) {
|
||||||
BlockId blockId = createBlockId();
|
BlockId blockId = createBlockId();
|
||||||
bool success = tryCreate(blockId, data);
|
const bool success = tryCreate(blockId, data);
|
||||||
if (success) {
|
if (success) {
|
||||||
return blockId;
|
return blockId;
|
||||||
}
|
}
|
||||||
|
@ -2,7 +2,6 @@
|
|||||||
|
|
||||||
namespace bf = boost::filesystem;
|
namespace bf = boost::filesystem;
|
||||||
|
|
||||||
using std::runtime_error;
|
|
||||||
using std::string;
|
using std::string;
|
||||||
|
|
||||||
namespace blockstore {
|
namespace blockstore {
|
||||||
|
@ -11,7 +11,7 @@ namespace blockstore {
|
|||||||
class FileDoesntExistException final: public std::runtime_error {
|
class FileDoesntExistException final: public std::runtime_error {
|
||||||
public:
|
public:
|
||||||
explicit FileDoesntExistException(const boost::filesystem::path &filepath);
|
explicit FileDoesntExistException(const boost::filesystem::path &filepath);
|
||||||
~FileDoesntExistException();
|
~FileDoesntExistException() override;
|
||||||
};
|
};
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -61,17 +61,7 @@ if(MSVC)
|
|||||||
target_link_libraries(${PROJECT_NAME} PUBLIC DbgHelp)
|
target_link_libraries(${PROJECT_NAME} PUBLIC DbgHelp)
|
||||||
elseif (APPLE)
|
elseif (APPLE)
|
||||||
target_compile_definitions(${PROJECT_NAME} PRIVATE BOOST_STACKTRACE_GNU_SOURCE_NOT_REQUIRED)
|
target_compile_definitions(${PROJECT_NAME} PRIVATE BOOST_STACKTRACE_GNU_SOURCE_NOT_REQUIRED)
|
||||||
else()
|
|
||||||
find_program(ADDR2LINE addr2line)
|
|
||||||
if ("${ADDR2LINE}" STREQUAL "ADDR2LINE-NOTFOUND")
|
|
||||||
message(WARNING "addr2line not found. Backtraces will be reduced.")
|
|
||||||
else()
|
|
||||||
message(STATUS "addr2line found. Using it for backtraces.")
|
|
||||||
target_compile_definitions(${PROJECT_NAME} PRIVATE BOOST_STACKTRACE_USE_ADDR2LINE)
|
|
||||||
target_compile_definitions(${PROJECT_NAME} PRIVATE BOOST_STACKTRACE_ADDR2LINE_LOCATION=${ADDR2LINE})
|
|
||||||
endif()
|
endif()
|
||||||
endif()
|
|
||||||
|
|
||||||
|
|
||||||
find_package(Threads REQUIRED)
|
find_package(Threads REQUIRED)
|
||||||
target_link_libraries(${PROJECT_NAME} PUBLIC ${CMAKE_THREAD_LIBS_INIT})
|
target_link_libraries(${PROJECT_NAME} PUBLIC ${CMAKE_THREAD_LIBS_INIT})
|
||||||
|
@ -39,9 +39,9 @@ void showBacktraceOnCrash() {
|
|||||||
// the signal handler RAII objects will be initialized on first call (which will register the signal handler)
|
// the signal handler RAII objects will be initialized on first call (which will register the signal handler)
|
||||||
// and destroyed on program exit (which will unregister the signal handler)
|
// and destroyed on program exit (which will unregister the signal handler)
|
||||||
|
|
||||||
static SignalHandlerRAII<&sigsegv_handler> segv(SIGSEGV);
|
static const SignalHandlerRAII<&sigsegv_handler> segv(SIGSEGV);
|
||||||
static SignalHandlerRAII<&sigabrt_handler> abrt(SIGABRT);
|
static const SignalHandlerRAII<&sigabrt_handler> abrt(SIGABRT);
|
||||||
static SignalHandlerRAII<&sigill_handler> ill(SIGILL);
|
static const SignalHandlerRAII<&sigill_handler> ill(SIGILL);
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -8,7 +8,7 @@ using namespace cpputils::logging;
|
|||||||
|
|
||||||
namespace cpputils {
|
namespace cpputils {
|
||||||
Data RandomPadding::add(const Data &data, size_t targetSize) {
|
Data RandomPadding::add(const Data &data, size_t targetSize) {
|
||||||
uint32_t size = data.size();
|
const uint32_t size = data.size();
|
||||||
if (size >= targetSize - sizeof(size)) {
|
if (size >= targetSize - sizeof(size)) {
|
||||||
throw std::runtime_error("Data too large. We should increase padding target size.");
|
throw std::runtime_error("Data too large. We should increase padding target size.");
|
||||||
}
|
}
|
||||||
@ -22,7 +22,7 @@ namespace cpputils {
|
|||||||
}
|
}
|
||||||
|
|
||||||
optional<Data> RandomPadding::remove(const Data &data) {
|
optional<Data> RandomPadding::remove(const Data &data) {
|
||||||
uint32_t size = deserialize<uint32_t>(data.data());
|
const uint32_t size = deserialize<uint32_t>(data.data());
|
||||||
if(sizeof(size) + size >= data.size()) {
|
if(sizeof(size) + size >= data.size()) {
|
||||||
LOG(ERR, "Config file is invalid: Invalid padding.");
|
LOG(ERR, "Config file is invalid: Invalid padding.");
|
||||||
return boost::none;
|
return boost::none;
|
||||||
|
@ -2,7 +2,6 @@
|
|||||||
#include <cpp-utils/random/Random.h>
|
#include <cpp-utils/random/Random.h>
|
||||||
#include <vendor_cryptopp/sha.h>
|
#include <vendor_cryptopp/sha.h>
|
||||||
|
|
||||||
using cpputils::Random;
|
|
||||||
using CryptoPP::SHA512;
|
using CryptoPP::SHA512;
|
||||||
|
|
||||||
namespace cpputils {
|
namespace cpputils {
|
||||||
|
@ -2,7 +2,6 @@
|
|||||||
|
|
||||||
using std::istream;
|
using std::istream;
|
||||||
using std::ostream;
|
using std::ostream;
|
||||||
using cpputils::Data;
|
|
||||||
|
|
||||||
namespace cpputils {
|
namespace cpputils {
|
||||||
Data SCryptParameters::serialize() const {
|
Data SCryptParameters::serialize() const {
|
||||||
@ -20,9 +19,9 @@ namespace cpputils {
|
|||||||
|
|
||||||
SCryptParameters SCryptParameters::deserialize(const cpputils::Data &data) {
|
SCryptParameters SCryptParameters::deserialize(const cpputils::Data &data) {
|
||||||
Deserializer deserializer(&data);
|
Deserializer deserializer(&data);
|
||||||
uint64_t n = deserializer.readUint64();
|
const uint64_t n = deserializer.readUint64();
|
||||||
uint32_t r = deserializer.readUint32();
|
const uint32_t r = deserializer.readUint32();
|
||||||
uint32_t p = deserializer.readUint32();
|
const uint32_t p = deserializer.readUint32();
|
||||||
Data salt = deserializer.readTailData();
|
Data salt = deserializer.readTailData();
|
||||||
deserializer.finished();
|
deserializer.finished();
|
||||||
return SCryptParameters(std::move(salt), n, r, p);
|
return SCryptParameters(std::move(salt), n, r, p);
|
||||||
@ -30,9 +29,9 @@ namespace cpputils {
|
|||||||
|
|
||||||
#ifndef CRYFS_NO_COMPATIBILITY
|
#ifndef CRYFS_NO_COMPATIBILITY
|
||||||
SCryptParameters SCryptParameters::deserializeOldFormat(Deserializer *source) {
|
SCryptParameters SCryptParameters::deserializeOldFormat(Deserializer *source) {
|
||||||
uint64_t n = source->readUint64();
|
const uint64_t n = source->readUint64();
|
||||||
uint32_t r = source->readUint32();
|
const uint32_t r = source->readUint32();
|
||||||
uint32_t p = source->readUint32();
|
const uint32_t p = source->readUint32();
|
||||||
Data salt = source->readData();
|
Data salt = source->readData();
|
||||||
return SCryptParameters(std::move(salt), n, r, p);
|
return SCryptParameters(std::move(salt), n, r, p);
|
||||||
}
|
}
|
||||||
|
@ -13,7 +13,7 @@ namespace {
|
|||||||
EncryptionKey _derive(size_t keySize, const std::string& password, const SCryptParameters& kdfParameters) {
|
EncryptionKey _derive(size_t keySize, const std::string& password, const SCryptParameters& kdfParameters) {
|
||||||
auto result = EncryptionKey::Null(keySize);
|
auto result = EncryptionKey::Null(keySize);
|
||||||
|
|
||||||
size_t status = CryptoPP::Scrypt().DeriveKey(
|
const size_t status = CryptoPP::Scrypt().DeriveKey(
|
||||||
static_cast<uint8_t*>(result.data()), result.binaryLength(),
|
static_cast<uint8_t*>(result.data()), result.binaryLength(),
|
||||||
reinterpret_cast<const uint8_t*>(password.c_str()), password.size(),
|
reinterpret_cast<const uint8_t*>(password.c_str()), password.size(),
|
||||||
static_cast<const uint8_t*>(kdfParameters.salt().data()), kdfParameters.salt().size(),
|
static_cast<const uint8_t*>(kdfParameters.salt().data()), kdfParameters.salt().size(),
|
||||||
@ -36,13 +36,13 @@ SCrypt::SCrypt(const SCryptSettings& settingsForNewKeys)
|
|||||||
}
|
}
|
||||||
|
|
||||||
EncryptionKey SCrypt::deriveExistingKey(size_t keySize, const std::string& password, const Data& kdfParameters) {
|
EncryptionKey SCrypt::deriveExistingKey(size_t keySize, const std::string& password, const Data& kdfParameters) {
|
||||||
SCryptParameters parameters = SCryptParameters::deserialize(kdfParameters);
|
const SCryptParameters parameters = SCryptParameters::deserialize(kdfParameters);
|
||||||
auto key = _derive(keySize, password, parameters);
|
auto key = _derive(keySize, password, parameters);
|
||||||
return key;
|
return key;
|
||||||
}
|
}
|
||||||
|
|
||||||
SCrypt::KeyResult SCrypt::deriveNewKey(size_t keySize, const std::string& password) {
|
SCrypt::KeyResult SCrypt::deriveNewKey(size_t keySize, const std::string& password) {
|
||||||
SCryptParameters kdfParameters = _createNewSCryptParameters(_settingsForNewKeys);
|
const SCryptParameters kdfParameters = _createNewSCryptParameters(_settingsForNewKeys);
|
||||||
auto key = _derive(keySize, password, kdfParameters);
|
auto key = _derive(keySize, password, kdfParameters);
|
||||||
return SCrypt::KeyResult {
|
return SCrypt::KeyResult {
|
||||||
key,
|
key,
|
||||||
|
@ -23,7 +23,7 @@ namespace cpputils {
|
|||||||
public:
|
public:
|
||||||
static constexpr SCryptSettings ParanoidSettings = SCryptSettings {32, 1048576, 8, 16};
|
static constexpr SCryptSettings ParanoidSettings = SCryptSettings {32, 1048576, 8, 16};
|
||||||
static constexpr SCryptSettings DefaultSettings = SCryptSettings {32, 1048576, 4, 8};
|
static constexpr SCryptSettings DefaultSettings = SCryptSettings {32, 1048576, 4, 8};
|
||||||
static constexpr SCryptSettings TestSettings = SCryptSettings {32, 1024, 1, 1};
|
static constexpr SCryptSettings TestSettings = SCryptSettings {32, 1024, 1, 2};
|
||||||
|
|
||||||
explicit SCrypt(const SCryptSettings& settingsForNewKeys);
|
explicit SCrypt(const SCryptSettings& settingsForNewKeys);
|
||||||
|
|
||||||
|
@ -49,7 +49,7 @@ Data AEADCipher<CryptoPPCipher, KEYSIZE_, IV_SIZE_, TAG_SIZE_>::encrypt(const Cr
|
|||||||
Data ciphertext(ciphertextSize(plaintextSize));
|
Data ciphertext(ciphertextSize(plaintextSize));
|
||||||
|
|
||||||
iv.ToBinary(ciphertext.data());
|
iv.ToBinary(ciphertext.data());
|
||||||
CryptoPP::ArraySource(plaintext, plaintextSize, true,
|
const CryptoPP::ArraySource _1(plaintext, plaintextSize, true,
|
||||||
new CryptoPP::AuthenticatedEncryptionFilter(encryption,
|
new CryptoPP::AuthenticatedEncryptionFilter(encryption,
|
||||||
new CryptoPP::ArraySink(static_cast<CryptoPP::byte*>(ciphertext.data()) + IV_SIZE, ciphertext.size() - IV_SIZE),
|
new CryptoPP::ArraySink(static_cast<CryptoPP::byte*>(ciphertext.data()) + IV_SIZE, ciphertext.size() - IV_SIZE),
|
||||||
false, TAG_SIZE
|
false, TAG_SIZE
|
||||||
@ -73,7 +73,7 @@ boost::optional<Data> AEADCipher<CryptoPPCipher, KEYSIZE_, IV_SIZE_, TAG_SIZE_>:
|
|||||||
Data plaintext(plaintextSize(ciphertextSize));
|
Data plaintext(plaintextSize(ciphertextSize));
|
||||||
|
|
||||||
try {
|
try {
|
||||||
CryptoPP::ArraySource(static_cast<const CryptoPP::byte*>(ciphertextData), ciphertextSize - IV_SIZE, true,
|
const CryptoPP::ArraySource _1(static_cast<const CryptoPP::byte*>(ciphertextData), ciphertextSize - IV_SIZE, true,
|
||||||
new CryptoPP::AuthenticatedDecryptionFilter(decryption,
|
new CryptoPP::AuthenticatedDecryptionFilter(decryption,
|
||||||
new CryptoPP::ArraySink(static_cast<CryptoPP::byte*>(plaintext.data()), plaintext.size()),
|
new CryptoPP::ArraySink(static_cast<CryptoPP::byte*>(plaintext.data()), plaintext.size()),
|
||||||
CryptoPP::AuthenticatedDecryptionFilter::DEFAULT_FLAGS, TAG_SIZE
|
CryptoPP::AuthenticatedDecryptionFilter::DEFAULT_FLAGS, TAG_SIZE
|
||||||
|
@ -19,10 +19,10 @@ public:
|
|||||||
same_type(UINT32_C(0), X::plaintextSize(UINT32_C(5)));
|
same_type(UINT32_C(0), X::plaintextSize(UINT32_C(5)));
|
||||||
same_type(UINT32_C(0), X::KEYSIZE);
|
same_type(UINT32_C(0), X::KEYSIZE);
|
||||||
same_type(UINT32_C(0), X::STRING_KEYSIZE);
|
same_type(UINT32_C(0), X::STRING_KEYSIZE);
|
||||||
typename X::EncryptionKey key = X::EncryptionKey::CreateKey(Random::OSRandom(), X::KEYSIZE);
|
const typename X::EncryptionKey key = X::EncryptionKey::CreateKey(Random::OSRandom(), X::KEYSIZE);
|
||||||
same_type(Data(0), X::encrypt(static_cast<uint8_t*>(nullptr), UINT32_C(0), key));
|
same_type(Data(0), X::encrypt(static_cast<uint8_t*>(nullptr), UINT32_C(0), key));
|
||||||
same_type(boost::optional<Data>(Data(0)), X::decrypt(static_cast<uint8_t*>(nullptr), UINT32_C(0), key));
|
same_type(boost::optional<Data>(Data(0)), X::decrypt(static_cast<uint8_t*>(nullptr), UINT32_C(0), key));
|
||||||
string name = X::NAME;
|
const string name = X::NAME;
|
||||||
}
|
}
|
||||||
|
|
||||||
private:
|
private:
|
||||||
|
@ -59,14 +59,14 @@ namespace cpputils {
|
|||||||
Data result(ciphertextSize(plaintextSize));
|
Data result(ciphertextSize(plaintextSize));
|
||||||
|
|
||||||
//Add a random IV
|
//Add a random IV
|
||||||
uint64_t iv = std::uniform_int_distribution<uint64_t>()(random_);
|
const uint64_t iv = std::uniform_int_distribution<uint64_t>()(random_);
|
||||||
serialize<uint64_t>(result.data(), iv);
|
serialize<uint64_t>(result.data(), iv);
|
||||||
|
|
||||||
//Use xor chiffre on plaintext
|
//Use xor chiffre on plaintext
|
||||||
_xor(static_cast<CryptoPP::byte*>(result.dataOffset(sizeof(uint64_t))), plaintext, plaintextSize, encKey.value ^ iv);
|
_xor(static_cast<CryptoPP::byte*>(result.dataOffset(sizeof(uint64_t))), plaintext, plaintextSize, encKey.value ^ iv);
|
||||||
|
|
||||||
//Add checksum information
|
//Add checksum information
|
||||||
uint64_t checksum = _checksum(static_cast<const CryptoPP::byte*>(result.data()), encKey, plaintextSize + sizeof(uint64_t));
|
const uint64_t checksum = _checksum(static_cast<const CryptoPP::byte*>(result.data()), encKey, plaintextSize + sizeof(uint64_t));
|
||||||
serialize<uint64_t>(result.dataOffset(plaintextSize + sizeof(uint64_t)), checksum);
|
serialize<uint64_t>(result.dataOffset(plaintextSize + sizeof(uint64_t)), checksum);
|
||||||
|
|
||||||
return result;
|
return result;
|
||||||
@ -80,14 +80,14 @@ namespace cpputils {
|
|||||||
}
|
}
|
||||||
|
|
||||||
//Check checksum
|
//Check checksum
|
||||||
uint64_t expectedParity = _checksum(ciphertext, encKey, plaintextSize(ciphertextSize) + sizeof(uint64_t));
|
const uint64_t expectedParity = _checksum(ciphertext, encKey, plaintextSize(ciphertextSize) + sizeof(uint64_t));
|
||||||
uint64_t actualParity = deserialize<uint64_t>(ciphertext + plaintextSize(ciphertextSize) + sizeof(uint64_t));
|
const uint64_t actualParity = deserialize<uint64_t>(ciphertext + plaintextSize(ciphertextSize) + sizeof(uint64_t));
|
||||||
if (expectedParity != actualParity) {
|
if (expectedParity != actualParity) {
|
||||||
return boost::none;
|
return boost::none;
|
||||||
}
|
}
|
||||||
|
|
||||||
//Decrypt xor chiffre from ciphertext
|
//Decrypt xor chiffre from ciphertext
|
||||||
uint64_t iv = deserialize<uint64_t>(ciphertext);
|
const uint64_t iv = deserialize<uint64_t>(ciphertext);
|
||||||
Data result(plaintextSize(ciphertextSize));
|
Data result(plaintextSize(ciphertextSize));
|
||||||
_xor(static_cast<CryptoPP::byte *>(result.data()), ciphertext + sizeof(uint64_t), plaintextSize(ciphertextSize), encKey.value ^ iv);
|
_xor(static_cast<CryptoPP::byte *>(result.data()), ciphertext + sizeof(uint64_t), plaintextSize(ciphertextSize), encKey.value ^ iv);
|
||||||
|
|
||||||
|
@ -39,7 +39,7 @@ std::streampos Data::_getStreamSize(istream &stream) {
|
|||||||
|
|
||||||
Data Data::LoadFromStream(istream &stream, size_t size) {
|
Data Data::LoadFromStream(istream &stream, size_t size) {
|
||||||
Data result(size);
|
Data result(size);
|
||||||
stream.read(static_cast<char*>(result.data()), result.size());
|
stream.read(static_cast<char*>(result.data()), static_cast<std::streamsize>(result.size()));
|
||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -47,7 +47,7 @@ Data Data::FromString(const std::string &data, unique_ref<Allocator> allocator)
|
|||||||
ASSERT(data.size() % 2 == 0, "hex encoded data cannot have odd number of characters");
|
ASSERT(data.size() % 2 == 0, "hex encoded data cannot have odd number of characters");
|
||||||
Data result(data.size() / 2, std::move(allocator));
|
Data result(data.size() / 2, std::move(allocator));
|
||||||
{
|
{
|
||||||
CryptoPP::StringSource _1(data, true,
|
const CryptoPP::StringSource _1(data, true,
|
||||||
new CryptoPP::HexDecoder(
|
new CryptoPP::HexDecoder(
|
||||||
new CryptoPP::ArraySink(static_cast<CryptoPP::byte*>(result._data), result.size())
|
new CryptoPP::ArraySink(static_cast<CryptoPP::byte*>(result._data), result.size())
|
||||||
)
|
)
|
||||||
@ -59,7 +59,7 @@ Data Data::FromString(const std::string &data, unique_ref<Allocator> allocator)
|
|||||||
std::string Data::ToString() const {
|
std::string Data::ToString() const {
|
||||||
std::string result;
|
std::string result;
|
||||||
{
|
{
|
||||||
CryptoPP::ArraySource _1(static_cast<const CryptoPP::byte*>(_data), _size, true,
|
const CryptoPP::ArraySource _1(static_cast<const CryptoPP::byte*>(_data), _size, true,
|
||||||
new CryptoPP::HexEncoder(
|
new CryptoPP::HexEncoder(
|
||||||
new CryptoPP::StringSink(result)
|
new CryptoPP::StringSink(result)
|
||||||
)
|
)
|
||||||
|
@ -184,7 +184,7 @@ inline void Data::StoreToFile(const boost::filesystem::path &filepath) const {
|
|||||||
}
|
}
|
||||||
|
|
||||||
inline void Data::StoreToStream(std::ostream &stream) const {
|
inline void Data::StoreToStream(std::ostream &stream) const {
|
||||||
stream.write(static_cast<const char*>(_data), _size);
|
stream.write(static_cast<const char*>(_data), static_cast<std::streamsize>(_size));
|
||||||
}
|
}
|
||||||
|
|
||||||
inline Data Data::LoadFromStream(std::istream &stream) {
|
inline Data Data::LoadFromStream(std::istream &stream) {
|
||||||
|
@ -11,7 +11,7 @@ namespace cpputils {
|
|||||||
val += 1442695040888963407;
|
val += 1442695040888963407;
|
||||||
serialize<unsigned long long int>(result.dataOffset(i*sizeof(unsigned long long int)), val);
|
serialize<unsigned long long int>(result.dataOffset(i*sizeof(unsigned long long int)), val);
|
||||||
}
|
}
|
||||||
uint64_t alreadyWritten = (size/sizeof(unsigned long long int))*sizeof(unsigned long long int);
|
const uint64_t alreadyWritten = (size/sizeof(unsigned long long int))*sizeof(unsigned long long int);
|
||||||
val *= 6364136223846793005L;
|
val *= 6364136223846793005L;
|
||||||
val += 1442695040888963407;
|
val += 1442695040888963407;
|
||||||
unsigned char *remainingBytes = reinterpret_cast<unsigned char*>(&val);
|
unsigned char *remainingBytes = reinterpret_cast<unsigned char*>(&val);
|
||||||
|
@ -44,7 +44,7 @@ namespace cpputils {
|
|||||||
}
|
}
|
||||||
|
|
||||||
inline bool Deserializer::readBool() {
|
inline bool Deserializer::readBool() {
|
||||||
uint8_t read = readUint8();
|
const uint8_t read = readUint8();
|
||||||
if (read == 1) {
|
if (read == 1) {
|
||||||
return true;
|
return true;
|
||||||
} else if (read == 0) {
|
} else if (read == 0) {
|
||||||
@ -98,7 +98,7 @@ namespace cpputils {
|
|||||||
}
|
}
|
||||||
|
|
||||||
inline Data Deserializer::readData() {
|
inline Data Deserializer::readData() {
|
||||||
uint64_t size = readUint64();
|
const uint64_t size = readUint64();
|
||||||
if (_pos + size > _source->size()) {
|
if (_pos + size > _source->size()) {
|
||||||
throw std::runtime_error("Deserialization failed - size overflow");
|
throw std::runtime_error("Deserialization failed - size overflow");
|
||||||
}
|
}
|
||||||
@ -106,7 +106,7 @@ namespace cpputils {
|
|||||||
}
|
}
|
||||||
|
|
||||||
inline Data Deserializer::readTailData() {
|
inline Data Deserializer::readTailData() {
|
||||||
uint64_t size = _source->size() - _pos;
|
const uint64_t size = _source->size() - _pos;
|
||||||
return _readData(size);
|
return _readData(size);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -134,7 +134,7 @@ namespace cpputils {
|
|||||||
if (nullbytepos == nullptr) {
|
if (nullbytepos == nullptr) {
|
||||||
throw std::runtime_error("Deserialization failed - missing nullbyte for string termination");
|
throw std::runtime_error("Deserialization failed - missing nullbyte for string termination");
|
||||||
}
|
}
|
||||||
uint64_t size = static_cast<const uint8_t*>(nullbytepos) - static_cast<const uint8_t*>(_source->dataOffset(_pos));
|
const uint64_t size = static_cast<const uint8_t*>(nullbytepos) - static_cast<const uint8_t*>(_source->dataOffset(_pos));
|
||||||
std::string result(static_cast<const char*>(_source->dataOffset(_pos)), size);
|
std::string result(static_cast<const char*>(_source->dataOffset(_pos)), size);
|
||||||
_pos += size + 1;
|
_pos += size + 1;
|
||||||
return result;
|
return result;
|
||||||
|
@ -61,7 +61,7 @@ FixedSizeData<SIZE> FixedSizeData<SIZE>::FromString(const std::string &data) {
|
|||||||
ASSERT(data.size() == STRING_LENGTH, "Wrong string size for parsing FixedSizeData");
|
ASSERT(data.size() == STRING_LENGTH, "Wrong string size for parsing FixedSizeData");
|
||||||
FixedSizeData<SIZE> result;
|
FixedSizeData<SIZE> result;
|
||||||
{
|
{
|
||||||
CryptoPP::StringSource _1(data, true,
|
const CryptoPP::StringSource _1(data, true,
|
||||||
new CryptoPP::HexDecoder(
|
new CryptoPP::HexDecoder(
|
||||||
new CryptoPP::ArraySink(result._data.data(), BINARY_LENGTH)
|
new CryptoPP::ArraySink(result._data.data(), BINARY_LENGTH)
|
||||||
)
|
)
|
||||||
@ -73,7 +73,7 @@ FixedSizeData<SIZE> FixedSizeData<SIZE>::FromString(const std::string &data) {
|
|||||||
template<size_t SIZE>
|
template<size_t SIZE>
|
||||||
std::string FixedSizeData<SIZE>::ToString() const {
|
std::string FixedSizeData<SIZE>::ToString() const {
|
||||||
std::string result;
|
std::string result;
|
||||||
CryptoPP::ArraySource(_data.data(), BINARY_LENGTH, true,
|
const CryptoPP::ArraySource _1(_data.data(), BINARY_LENGTH, true,
|
||||||
new CryptoPP::HexEncoder(
|
new CryptoPP::HexEncoder(
|
||||||
new CryptoPP::StringSink(result)
|
new CryptoPP::StringSink(result)
|
||||||
)
|
)
|
||||||
|
@ -71,7 +71,6 @@ namespace cpputils
|
|||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
using cpputils::make_unique_ref;
|
|
||||||
|
|
||||||
namespace cpputils
|
namespace cpputils
|
||||||
{
|
{
|
||||||
|
@ -99,7 +99,7 @@ namespace cpputils {
|
|||||||
this->d_condition.wait(lock);
|
this->d_condition.wait(lock);
|
||||||
}
|
}
|
||||||
if (&this->d_tmp[0] != this->d_current) {
|
if (&this->d_tmp[0] != this->d_current) {
|
||||||
std::streamsize size(this->d_current - &this->d_tmp[0]);
|
const std::streamsize size(this->d_current - &this->d_tmp[0]);
|
||||||
traits_type::copy(this->eback(), &this->d_tmp[0],
|
traits_type::copy(this->eback(), &this->d_tmp[0],
|
||||||
this->d_current - &this->d_tmp[0]);
|
this->d_current - &this->d_tmp[0]);
|
||||||
this->setg(this->eback(), this->eback(), this->eback() + size);
|
this->setg(this->eback(), this->eback(), this->eback() + size);
|
||||||
@ -134,14 +134,14 @@ namespace cpputils {
|
|||||||
this->d_condition.wait(lock);
|
this->d_condition.wait(lock);
|
||||||
}
|
}
|
||||||
if (this->d_current != end) {
|
if (this->d_current != end) {
|
||||||
std::streamsize size(std::min(end - d_current,
|
const std::streamsize size(std::min(end - d_current,
|
||||||
this->pptr() - this->pbase()));
|
this->pptr() - this->pbase()));
|
||||||
traits_type::copy(d_current, this->pbase(), size);
|
traits_type::copy(d_current, this->pbase(), size);
|
||||||
this->d_current += size;
|
this->d_current += size;
|
||||||
std::streamsize remain((this->pptr() - this->pbase()) - size);
|
const std::streamsize remain((this->pptr() - this->pbase()) - size);
|
||||||
traits_type::move(this->pbase(), this->pptr(), remain);
|
traits_type::move(this->pbase(), this->pptr(), remain);
|
||||||
this->setp(this->pbase(), this->epptr());
|
this->setp(this->pbase(), this->epptr());
|
||||||
this->pbump(remain);
|
this->pbump(static_cast<int>(remain));
|
||||||
this->d_condition.notify_one();
|
this->d_condition.notify_one();
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -26,7 +26,7 @@ namespace cpputils {
|
|||||||
}
|
}
|
||||||
|
|
||||||
void release() {
|
void release() {
|
||||||
std::unique_lock<std::mutex> lock(_mutex);
|
const std::unique_lock<std::mutex> lock(_mutex);
|
||||||
_triggered = true;
|
_triggered = true;
|
||||||
_cv.notify_all();
|
_cv.notify_all();
|
||||||
}
|
}
|
||||||
|
@ -79,7 +79,7 @@ namespace cpputils {
|
|||||||
|
|
||||||
template<class LockName>
|
template<class LockName>
|
||||||
inline void LockPool<LockName>::release(const LockName &lockName) {
|
inline void LockPool<LockName>::release(const LockName &lockName) {
|
||||||
std::unique_lock<std::mutex> mutexLock(_mutex);
|
const std::unique_lock<std::mutex> mutexLock(_mutex);
|
||||||
auto found = std::find(_lockedLocks.begin(), _lockedLocks.end(), lockName);
|
auto found = std::find(_lockedLocks.begin(), _lockedLocks.end(), lockName);
|
||||||
ASSERT(found != _lockedLocks.end(), "Lock given to release() was not locked");
|
ASSERT(found != _lockedLocks.end(), "Lock given to release() was not locked");
|
||||||
_lockedLocks.erase(found);
|
_lockedLocks.erase(found);
|
||||||
|
@ -2,7 +2,6 @@
|
|||||||
|
|
||||||
using std::string;
|
using std::string;
|
||||||
using boost::optional;
|
using boost::optional;
|
||||||
using boost::none;
|
|
||||||
|
|
||||||
namespace cpputils {
|
namespace cpputils {
|
||||||
FakeHttpClient::FakeHttpClient(): _sites() {
|
FakeHttpClient::FakeHttpClient(): _sites() {
|
||||||
|
@ -161,7 +161,8 @@ inline bool operator!=(const unique_ref<T, D> &lhs, const unique_ref<T, D> &rhs)
|
|||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
namespace std { // NOLINT (intentional change of namespace std)
|
// NOLINTBEGIN(cert-dcl58-cpp) -- intentional change of namespace std
|
||||||
|
namespace std {
|
||||||
template<class T, class D>
|
template<class T, class D>
|
||||||
inline void swap(cpputils::unique_ref<T, D>& lhs, cpputils::unique_ref<T, D>& rhs) noexcept {
|
inline void swap(cpputils::unique_ref<T, D>& lhs, cpputils::unique_ref<T, D>& rhs) noexcept {
|
||||||
lhs.swap(rhs);
|
lhs.swap(rhs);
|
||||||
@ -191,5 +192,6 @@ namespace std { // NOLINT (intentional change of namespace std)
|
|||||||
}
|
}
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
// NOLINTEND(cert-dcl58-cpp)
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
@ -123,7 +123,7 @@ SignalCatcher::SignalCatcher(std::initializer_list<int> signals)
|
|||||||
// - the _signal_occurred flag will not be destructed as long as the signal handler might be called (i.e. as long as _impls lives)
|
// - the _signal_occurred flag will not be destructed as long as the signal handler might be called (i.e. as long as _impls lives)
|
||||||
|
|
||||||
_impls.reserve(signals.size());
|
_impls.reserve(signals.size());
|
||||||
for (int signal : signals) {
|
for (const int signal : signals) {
|
||||||
_impls.emplace_back(make_unique<details::SignalCatcherImpl>(signal, &_signal_occurred));
|
_impls.emplace_back(make_unique<details::SignalCatcherImpl>(signal, &_signal_occurred));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -29,7 +29,7 @@ public:
|
|||||||
std::memset(&new_signal_handler, 0, sizeof(new_signal_handler));
|
std::memset(&new_signal_handler, 0, sizeof(new_signal_handler));
|
||||||
new_signal_handler.sa_handler = handler; // NOLINT(cppcoreguidelines-pro-type-union-access)
|
new_signal_handler.sa_handler = handler; // NOLINT(cppcoreguidelines-pro-type-union-access)
|
||||||
new_signal_handler.sa_flags = SA_RESTART;
|
new_signal_handler.sa_flags = SA_RESTART;
|
||||||
int error = sigfillset(&new_signal_handler.sa_mask); // block all signals while signal handler is running
|
const int error = sigfillset(&new_signal_handler.sa_mask); // block all signals while signal handler is running
|
||||||
if (0 != error) {
|
if (0 != error) {
|
||||||
throw std::runtime_error("Error calling sigfillset. Errno: " + std::to_string(errno));
|
throw std::runtime_error("Error calling sigfillset. Errno: " + std::to_string(errno));
|
||||||
}
|
}
|
||||||
@ -47,7 +47,7 @@ public:
|
|||||||
|
|
||||||
private:
|
private:
|
||||||
static void _sigaction(int signal, struct sigaction *new_handler, struct sigaction *old_handler) {
|
static void _sigaction(int signal, struct sigaction *new_handler, struct sigaction *old_handler) {
|
||||||
int error = sigaction(signal, new_handler, old_handler);
|
const int error = sigaction(signal, new_handler, old_handler);
|
||||||
if (0 != error) {
|
if (0 != error) {
|
||||||
throw std::runtime_error("Error calling sigaction. Errno: " + std::to_string(errno));
|
throw std::runtime_error("Error calling sigaction. Errno: " + std::to_string(errno));
|
||||||
}
|
}
|
||||||
|
@ -21,7 +21,7 @@ using namespace cpputils::logging;
|
|||||||
namespace cpputils {
|
namespace cpputils {
|
||||||
|
|
||||||
void daemonize() {
|
void daemonize() {
|
||||||
pid_t pid = fork();
|
const pid_t pid = fork();
|
||||||
if (pid < 0) {
|
if (pid < 0) {
|
||||||
exit(EXIT_FAILURE);
|
exit(EXIT_FAILURE);
|
||||||
}
|
}
|
||||||
@ -34,7 +34,7 @@ void daemonize() {
|
|||||||
umask(0);
|
umask(0);
|
||||||
|
|
||||||
// Create a new SID for the child process
|
// Create a new SID for the child process
|
||||||
pid_t sid = setsid();
|
const pid_t sid = setsid();
|
||||||
if (sid < 0) {
|
if (sid < 0) {
|
||||||
LOG(ERR, "Failed to get SID for daemon process");
|
LOG(ERR, "Failed to get SID for daemon process");
|
||||||
exit(EXIT_FAILURE);
|
exit(EXIT_FAILURE);
|
||||||
|
@ -1,10 +1,11 @@
|
|||||||
#include "subprocess.h"
|
#include "subprocess.h"
|
||||||
|
#include <array>
|
||||||
|
#include <boost/asio.hpp>
|
||||||
|
#include <boost/process.hpp>
|
||||||
|
#include <cerrno>
|
||||||
|
#include <cstddef>
|
||||||
#include <cstdio>
|
#include <cstdio>
|
||||||
#include <stdexcept>
|
#include <stdexcept>
|
||||||
#include <cerrno>
|
|
||||||
#include <array>
|
|
||||||
#include <boost/process.hpp>
|
|
||||||
#include <boost/asio.hpp>
|
|
||||||
|
|
||||||
using std::string;
|
using std::string;
|
||||||
using std::vector;
|
using std::vector;
|
||||||
|
@ -2,10 +2,11 @@
|
|||||||
#ifndef MESSMER_CPPUTILS_RANDOM_PSEUDORANDOMPOOL_H
|
#ifndef MESSMER_CPPUTILS_RANDOM_PSEUDORANDOMPOOL_H
|
||||||
#define MESSMER_CPPUTILS_RANDOM_PSEUDORANDOMPOOL_H
|
#define MESSMER_CPPUTILS_RANDOM_PSEUDORANDOMPOOL_H
|
||||||
|
|
||||||
#include <boost/thread.hpp>
|
|
||||||
#include "RandomGenerator.h"
|
#include "RandomGenerator.h"
|
||||||
#include "ThreadsafeRandomDataBuffer.h"
|
|
||||||
#include "RandomGeneratorThread.h"
|
#include "RandomGeneratorThread.h"
|
||||||
|
#include "ThreadsafeRandomDataBuffer.h"
|
||||||
|
#include <boost/thread.hpp>
|
||||||
|
#include <cstddef>
|
||||||
#include <mutex>
|
#include <mutex>
|
||||||
|
|
||||||
namespace cpputils {
|
namespace cpputils {
|
||||||
|
@ -12,13 +12,13 @@ namespace cpputils {
|
|||||||
class Random final {
|
class Random final {
|
||||||
public:
|
public:
|
||||||
static PseudoRandomPool &PseudoRandom() {
|
static PseudoRandomPool &PseudoRandom() {
|
||||||
std::unique_lock <std::mutex> lock(_mutex);
|
const std::unique_lock <std::mutex> lock(_mutex);
|
||||||
static PseudoRandomPool random;
|
static PseudoRandomPool random;
|
||||||
return random;
|
return random;
|
||||||
}
|
}
|
||||||
|
|
||||||
static OSRandomGenerator &OSRandom() {
|
static OSRandomGenerator &OSRandom() {
|
||||||
std::unique_lock <std::mutex> lock(_mutex);
|
const std::unique_lock <std::mutex> lock(_mutex);
|
||||||
static OSRandomGenerator random;
|
static OSRandomGenerator random;
|
||||||
return random;
|
return random;
|
||||||
}
|
}
|
||||||
|
@ -39,7 +39,7 @@ namespace cpputils {
|
|||||||
|
|
||||||
inline void RandomDataBuffer::add(const Data& newData) {
|
inline void RandomDataBuffer::add(const Data& newData) {
|
||||||
// Concatenate old and new random data
|
// Concatenate old and new random data
|
||||||
size_t oldSize = size();
|
const size_t oldSize = size();
|
||||||
Data combined(oldSize + newData.size());
|
Data combined(oldSize + newData.size());
|
||||||
get(combined.data(), oldSize);
|
get(combined.data(), oldSize);
|
||||||
std::memcpy(combined.dataOffset(oldSize), newData.data(), newData.size());
|
std::memcpy(combined.dataOffset(oldSize), newData.data(), newData.size());
|
||||||
|
@ -8,6 +8,7 @@ namespace cpputils {
|
|||||||
class RandomGenerator {
|
class RandomGenerator {
|
||||||
public:
|
public:
|
||||||
RandomGenerator();
|
RandomGenerator();
|
||||||
|
virtual ~RandomGenerator() = default;
|
||||||
|
|
||||||
template<size_t SIZE> FixedSizeData<SIZE> getFixedSize();
|
template<size_t SIZE> FixedSizeData<SIZE> getFixedSize();
|
||||||
Data get(size_t size);
|
Data get(size_t size);
|
||||||
|
@ -17,9 +17,9 @@ namespace cpputils {
|
|||||||
|
|
||||||
bool RandomGeneratorThread::_loopIteration() {
|
bool RandomGeneratorThread::_loopIteration() {
|
||||||
_buffer->waitUntilSizeIsLessThan(_minSize);
|
_buffer->waitUntilSizeIsLessThan(_minSize);
|
||||||
size_t neededRandomDataSize = _maxSize - _buffer->size();
|
const size_t neededRandomDataSize = _maxSize - _buffer->size();
|
||||||
ASSERT(_maxSize > _buffer->size(), "This could theoretically fail if another thread refilled the buffer. But we should be the only refilling thread.");
|
ASSERT(_maxSize > _buffer->size(), "This could theoretically fail if another thread refilled the buffer. But we should be the only refilling thread.");
|
||||||
Data randomData = _generateRandomData(neededRandomDataSize);
|
const Data randomData = _generateRandomData(neededRandomDataSize);
|
||||||
_buffer->add(randomData);
|
_buffer->add(randomData);
|
||||||
return true; // Run another iteration (don't terminate thread)
|
return true; // Run another iteration (don't terminate thread)
|
||||||
}
|
}
|
||||||
|
@ -39,14 +39,14 @@ namespace cpputils {
|
|||||||
}
|
}
|
||||||
|
|
||||||
inline size_t ThreadsafeRandomDataBuffer::size() const {
|
inline size_t ThreadsafeRandomDataBuffer::size() const {
|
||||||
boost::unique_lock<boost::mutex> lock(_mutex);
|
const boost::unique_lock<boost::mutex> lock(_mutex);
|
||||||
return _buffer.size();
|
return _buffer.size();
|
||||||
}
|
}
|
||||||
|
|
||||||
inline void ThreadsafeRandomDataBuffer::get(void *target, size_t numBytes) {
|
inline void ThreadsafeRandomDataBuffer::get(void *target, size_t numBytes) {
|
||||||
size_t alreadyGotten = 0;
|
size_t alreadyGotten = 0;
|
||||||
while (alreadyGotten < numBytes) {
|
while (alreadyGotten < numBytes) {
|
||||||
size_t got = _get(static_cast<uint8_t*>(target)+alreadyGotten, numBytes);
|
const size_t got = _get(static_cast<uint8_t*>(target)+alreadyGotten, numBytes);
|
||||||
alreadyGotten += got;
|
alreadyGotten += got;
|
||||||
ASSERT(alreadyGotten <= numBytes, "Got too many bytes");
|
ASSERT(alreadyGotten <= numBytes, "Got too many bytes");
|
||||||
}
|
}
|
||||||
@ -57,14 +57,14 @@ namespace cpputils {
|
|||||||
_dataAddedCv.wait(lock, [this] {
|
_dataAddedCv.wait(lock, [this] {
|
||||||
return _buffer.size() > 0;
|
return _buffer.size() > 0;
|
||||||
});
|
});
|
||||||
size_t gettableBytes = (std::min)(_buffer.size(), numBytes);
|
const size_t gettableBytes = (std::min)(_buffer.size(), numBytes);
|
||||||
_buffer.get(target, gettableBytes);
|
_buffer.get(target, gettableBytes);
|
||||||
_dataGottenCv.notify_all();
|
_dataGottenCv.notify_all();
|
||||||
return gettableBytes;
|
return gettableBytes;
|
||||||
}
|
}
|
||||||
|
|
||||||
inline void ThreadsafeRandomDataBuffer::add(const Data& data) {
|
inline void ThreadsafeRandomDataBuffer::add(const Data& data) {
|
||||||
boost::unique_lock<boost::mutex> lock(_mutex);
|
const boost::unique_lock<boost::mutex> lock(_mutex);
|
||||||
_buffer.add(data);
|
_buffer.add(data);
|
||||||
_dataAddedCv.notify_all();
|
_dataAddedCv.notify_all();
|
||||||
}
|
}
|
||||||
|
@ -11,7 +11,7 @@ namespace cpputils {
|
|||||||
|
|
||||||
uint64_t free_disk_space_in_bytes(const bf::path& location) {
|
uint64_t free_disk_space_in_bytes(const bf::path& location) {
|
||||||
struct statvfs stat {};
|
struct statvfs stat {};
|
||||||
int result = ::statvfs(location.string().c_str(), &stat);
|
const int result = ::statvfs(location.string().c_str(), &stat);
|
||||||
if (0 != result) {
|
if (0 != result) {
|
||||||
throw std::runtime_error("Error calling statvfs(). Errno: " + std::to_string(errno));
|
throw std::runtime_error("Error calling statvfs(). Errno: " + std::to_string(errno));
|
||||||
}
|
}
|
||||||
|
@ -10,14 +10,14 @@
|
|||||||
namespace cpputils {
|
namespace cpputils {
|
||||||
|
|
||||||
void setenv(const char* key, const char* value) {
|
void setenv(const char* key, const char* value) {
|
||||||
int retval = ::setenv(key, value, 1);
|
const int retval = ::setenv(key, value, 1);
|
||||||
if (0 != retval) {
|
if (0 != retval) {
|
||||||
throw std::runtime_error("Error setting environment variable. Errno: " + std::to_string(errno));
|
throw std::runtime_error("Error setting environment variable. Errno: " + std::to_string(errno));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void unsetenv(const char* key) {
|
void unsetenv(const char* key) {
|
||||||
int retval = ::unsetenv(key);
|
const int retval = ::unsetenv(key);
|
||||||
if (0 != retval) {
|
if (0 != retval) {
|
||||||
throw std::runtime_error("Error unsetting environment variable. Errno: " + std::to_string(errno));
|
throw std::runtime_error("Error unsetting environment variable. Errno: " + std::to_string(errno));
|
||||||
}
|
}
|
||||||
@ -34,7 +34,7 @@ namespace cpputils {
|
|||||||
void setenv(const char* key, const char* value) {
|
void setenv(const char* key, const char* value) {
|
||||||
std::ostringstream command;
|
std::ostringstream command;
|
||||||
command << key << "=" << value;
|
command << key << "=" << value;
|
||||||
int retval = _putenv(command.str().c_str());
|
const int retval = _putenv(command.str().c_str());
|
||||||
if (0 != retval) {
|
if (0 != retval) {
|
||||||
throw std::runtime_error("Error setting environment variable. Errno: " + std::to_string(errno));
|
throw std::runtime_error("Error setting environment variable. Errno: " + std::to_string(errno));
|
||||||
}
|
}
|
||||||
|
@ -14,7 +14,7 @@ int set_filetime(const char *filepath, timespec lastAccessTime, timespec lastMod
|
|||||||
std::array<struct timeval, 2> casted_times{};
|
std::array<struct timeval, 2> casted_times{};
|
||||||
TIMESPEC_TO_TIMEVAL(&casted_times[0], &lastAccessTime);
|
TIMESPEC_TO_TIMEVAL(&casted_times[0], &lastAccessTime);
|
||||||
TIMESPEC_TO_TIMEVAL(&casted_times[1], &lastModificationTime);
|
TIMESPEC_TO_TIMEVAL(&casted_times[1], &lastModificationTime);
|
||||||
int retval = ::utimes(filepath, casted_times.data());
|
const int retval = ::utimes(filepath, casted_times.data());
|
||||||
if (0 == retval) {
|
if (0 == retval) {
|
||||||
return 0;
|
return 0;
|
||||||
} else {
|
} else {
|
||||||
@ -24,7 +24,7 @@ int set_filetime(const char *filepath, timespec lastAccessTime, timespec lastMod
|
|||||||
|
|
||||||
int get_filetime(const char *filepath, timespec* lastAccessTime, timespec* lastModificationTime) {
|
int get_filetime(const char *filepath, timespec* lastAccessTime, timespec* lastModificationTime) {
|
||||||
struct ::stat attrib{};
|
struct ::stat attrib{};
|
||||||
int retval = ::stat(filepath, &attrib);
|
const int retval = ::stat(filepath, &attrib);
|
||||||
if (retval != 0) {
|
if (retval != 0) {
|
||||||
return errno;
|
return errno;
|
||||||
}
|
}
|
||||||
|
@ -28,8 +28,8 @@ namespace cpputils {
|
|||||||
namespace cpputils {
|
namespace cpputils {
|
||||||
namespace system {
|
namespace system {
|
||||||
uint64_t get_total_memory() {
|
uint64_t get_total_memory() {
|
||||||
long numRAMPages = sysconf(_SC_PHYS_PAGES);
|
const long numRAMPages = sysconf(_SC_PHYS_PAGES);
|
||||||
long pageSize = sysconf(_SC_PAGESIZE);
|
const long pageSize = sysconf(_SC_PAGESIZE);
|
||||||
return numRAMPages * pageSize;
|
return numRAMPages * pageSize;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -11,7 +11,7 @@ namespace cpputils {
|
|||||||
TempFile::TempFile(const bf::path &path, bool create)
|
TempFile::TempFile(const bf::path &path, bool create)
|
||||||
: _path(path) {
|
: _path(path) {
|
||||||
if (create) {
|
if (create) {
|
||||||
ofstream file(_path.string().c_str());
|
const ofstream file(_path.string().c_str());
|
||||||
if (!file.good()) {
|
if (!file.good()) {
|
||||||
throw std::runtime_error("Could not create tempfile");
|
throw std::runtime_error("Could not create tempfile");
|
||||||
}
|
}
|
||||||
|
@ -11,8 +11,8 @@ namespace cpputils {
|
|||||||
|
|
||||||
class CaptureStderrRAII final {
|
class CaptureStderrRAII final {
|
||||||
public:
|
public:
|
||||||
CaptureStderrRAII() {
|
CaptureStderrRAII() : _oldBuffer(std::cerr.rdbuf()) {
|
||||||
_oldBuffer = std::cerr.rdbuf();
|
|
||||||
|
|
||||||
// Capture stderr to _buffer
|
// Capture stderr to _buffer
|
||||||
std::cerr.rdbuf(_buffer.rdbuf());
|
std::cerr.rdbuf(_buffer.rdbuf());
|
||||||
|
@ -38,7 +38,7 @@ public:
|
|||||||
|
|
||||||
// wait until any potentially running writers are finished
|
// wait until any potentially running writers are finished
|
||||||
{
|
{
|
||||||
std::unique_lock<std::mutex> lock(_writeMutex);
|
const std::unique_lock<std::mutex> lock(_writeMutex);
|
||||||
}
|
}
|
||||||
|
|
||||||
// wait until any potentially running readers are finished
|
// wait until any potentially running readers are finished
|
||||||
@ -49,7 +49,7 @@ public:
|
|||||||
|
|
||||||
template <typename F>
|
template <typename F>
|
||||||
auto read(F&& readFunc) const {
|
auto read(F&& readFunc) const {
|
||||||
detail::IncrementRAII _increment_counter(&_counters[_foregroundCounterIndex.load()]); // NOLINT(cppcoreguidelines-pro-bounds-constant-array-index)
|
const detail::IncrementRAII _increment_counter(&_counters[_foregroundCounterIndex.load()]); // NOLINT(cppcoreguidelines-pro-bounds-constant-array-index)
|
||||||
|
|
||||||
if(_inDestruction.load()) {
|
if(_inDestruction.load()) {
|
||||||
throw std::logic_error("Issued LeftRight::read() after the destructor started running");
|
throw std::logic_error("Issued LeftRight::read() after the destructor started running");
|
||||||
@ -62,7 +62,7 @@ public:
|
|||||||
// depending on if the first or the second call to writeFunc threw.
|
// depending on if the first or the second call to writeFunc threw.
|
||||||
template <typename F>
|
template <typename F>
|
||||||
auto write(F&& writeFunc) {
|
auto write(F&& writeFunc) {
|
||||||
std::unique_lock<std::mutex> lock(_writeMutex);
|
const std::unique_lock<std::mutex> lock(_writeMutex);
|
||||||
|
|
||||||
if(_inDestruction.load()) {
|
if(_inDestruction.load()) {
|
||||||
throw std::logic_error("Issued LeftRight::read() after the destructor started running");
|
throw std::logic_error("Issued LeftRight::read() after the destructor started running");
|
||||||
|
@ -24,7 +24,7 @@ namespace cpputils {
|
|||||||
}
|
}
|
||||||
|
|
||||||
ThreadSystem::Handle ThreadSystem::start(function<bool()> loopIteration, string threadName) {
|
ThreadSystem::Handle ThreadSystem::start(function<bool()> loopIteration, string threadName) {
|
||||||
boost::unique_lock<boost::mutex> lock(_mutex);
|
const boost::unique_lock<boost::mutex> lock(_mutex);
|
||||||
auto thread = _startThread(loopIteration, threadName);
|
auto thread = _startThread(loopIteration, threadName);
|
||||||
_runningThreads.push_back(RunningThread{std::move(threadName), std::move(loopIteration), std::move(thread)});
|
_runningThreads.push_back(RunningThread{std::move(threadName), std::move(loopIteration), std::move(thread)});
|
||||||
return std::prev(_runningThreads.end());
|
return std::prev(_runningThreads.end());
|
||||||
|
@ -28,9 +28,9 @@ void set_thread_name(const char* name) {
|
|||||||
name_.resize(MAX_NAME_LEN - 1);
|
name_.resize(MAX_NAME_LEN - 1);
|
||||||
}
|
}
|
||||||
#if defined(__APPLE__)
|
#if defined(__APPLE__)
|
||||||
int result = pthread_setname_np(name_.c_str());
|
const int result = pthread_setname_np(name_.c_str());
|
||||||
#else
|
#else
|
||||||
int result = pthread_setname_np(pthread_self(), name_.c_str());
|
const int result = pthread_setname_np(pthread_self(), name_.c_str());
|
||||||
#endif
|
#endif
|
||||||
if (0 != result) {
|
if (0 != result) {
|
||||||
throw std::runtime_error("Error setting thread name with pthread_setname_np. Code: " + std::to_string(result));
|
throw std::runtime_error("Error setting thread name with pthread_setname_np. Code: " + std::to_string(result));
|
||||||
@ -75,9 +75,9 @@ int pthread_getname_np_gcompat(pthread_t thread, char *name, size_t len) {
|
|||||||
std::string get_thread_name(pthread_t thread) {
|
std::string get_thread_name(pthread_t thread) {
|
||||||
std::array<char, MAX_NAME_LEN> name{};
|
std::array<char, MAX_NAME_LEN> name{};
|
||||||
#if defined(__GLIBC__) || defined(__APPLE__)
|
#if defined(__GLIBC__) || defined(__APPLE__)
|
||||||
int result = pthread_getname_np(thread, name.data(), MAX_NAME_LEN);
|
const int result = pthread_getname_np(thread, name.data(), MAX_NAME_LEN);
|
||||||
#else
|
#else
|
||||||
int result = pthread_getname_np_gcompat(thread, name.data(), MAX_NAME_LEN);
|
const int result = pthread_getname_np_gcompat(thread, name.data(), MAX_NAME_LEN);
|
||||||
#endif
|
#endif
|
||||||
if (0 != result) {
|
if (0 != result) {
|
||||||
throw std::runtime_error("Error getting thread name with pthread_getname_np. Code: " + std::to_string(result));
|
throw std::runtime_error("Error getting thread name with pthread_getname_np. Code: " + std::to_string(result));
|
||||||
|
@ -32,7 +32,7 @@ namespace cryfs_cli {
|
|||||||
}
|
}
|
||||||
|
|
||||||
inline void CallAfterTimeout::resetTimer() {
|
inline void CallAfterTimeout::resetTimer() {
|
||||||
std::unique_lock<std::mutex> lock(_mutex);
|
const std::unique_lock<std::mutex> lock(_mutex);
|
||||||
_start = boost::chrono::steady_clock::now();
|
_start = boost::chrono::steady_clock::now();
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -42,12 +42,12 @@ namespace cryfs_cli {
|
|||||||
}
|
}
|
||||||
|
|
||||||
inline boost::chrono::time_point<boost::chrono::steady_clock> CallAfterTimeout::_targetTime() {
|
inline boost::chrono::time_point<boost::chrono::steady_clock> CallAfterTimeout::_targetTime() {
|
||||||
std::unique_lock<std::mutex> lock(_mutex);
|
const std::unique_lock<std::mutex> lock(_mutex);
|
||||||
return _start + _timeout;
|
return _start + _timeout;
|
||||||
}
|
}
|
||||||
|
|
||||||
inline bool CallAfterTimeout::_callCallbackIfTimeout() {
|
inline bool CallAfterTimeout::_callCallbackIfTimeout() {
|
||||||
std::unique_lock<std::mutex> lock(_mutex);
|
const std::unique_lock<std::mutex> lock(_mutex);
|
||||||
if (boost::chrono::steady_clock::now() >= _start + _timeout) {
|
if (boost::chrono::steady_clock::now() >= _start + _timeout) {
|
||||||
_callback();
|
_callback();
|
||||||
return false; // Stop thread
|
return false; // Stop thread
|
||||||
|
@ -126,7 +126,7 @@ namespace cryfs_cli {
|
|||||||
cpputils::set_thread_name("cryfs");
|
cpputils::set_thread_name("cryfs");
|
||||||
try {
|
try {
|
||||||
_sanityChecks(options);
|
_sanityChecks(options);
|
||||||
LocalStateDir localStateDir(options.localStateDir());
|
const LocalStateDir localStateDir(options.localStateDir());
|
||||||
auto blockStore = make_unique_ref<OnDiskBlockStore2>(options.baseDir());
|
auto blockStore = make_unique_ref<OnDiskBlockStore2>(options.baseDir());
|
||||||
auto config = _loadOrCreateConfig(options, localStateDir, credentials);
|
auto config = _loadOrCreateConfig(options, localStateDir, credentials);
|
||||||
fspp::fuse::Fuse* fuse = nullptr;
|
fspp::fuse::Fuse* fuse = nullptr;
|
||||||
|
@ -46,7 +46,7 @@ namespace cryfs_cli {
|
|||||||
}
|
}
|
||||||
|
|
||||||
optional<ptree> VersionChecker::_getVersionInfo(HttpClient* httpClient) {
|
optional<ptree> VersionChecker::_getVersionInfo(HttpClient* httpClient) {
|
||||||
long timeoutMsec = 2000;
|
const long timeoutMsec = 2000;
|
||||||
string response;
|
string response;
|
||||||
try {
|
try {
|
||||||
response = httpClient->get("https://www.cryfs.org/version_info.json", timeoutMsec);
|
response = httpClient->get("https://www.cryfs.org/version_info.json", timeoutMsec);
|
||||||
|
@ -12,7 +12,7 @@ namespace cryfs_cli {
|
|||||||
namespace program_options {
|
namespace program_options {
|
||||||
pair<vector<string>, vector<string>> splitAtDoubleDash(const vector<string> &options) {
|
pair<vector<string>, vector<string>> splitAtDoubleDash(const vector<string> &options) {
|
||||||
auto doubleDashIterator = std::find(options.begin(), options.end(), string("--"));
|
auto doubleDashIterator = std::find(options.begin(), options.end(), string("--"));
|
||||||
vector<string> beforeDoubleDash(options.begin(), doubleDashIterator);
|
const vector<string> beforeDoubleDash(options.begin(), doubleDashIterator);
|
||||||
vector<string> afterDoubleDash;
|
vector<string> afterDoubleDash;
|
||||||
if (doubleDashIterator != options.end() && doubleDashIterator + 1 != options.end()) {
|
if (doubleDashIterator != options.end() && doubleDashIterator + 1 != options.end()) {
|
||||||
afterDoubleDash.reserve(options.size() - beforeDoubleDash.size() - 1);
|
afterDoubleDash.reserve(options.size() - beforeDoubleDash.size() - 1);
|
||||||
|
@ -36,6 +36,7 @@ set(LIB_SOURCES
|
|||||||
impl/filesystem/parallelaccessfsblobstore/FsBlobRef.cpp
|
impl/filesystem/parallelaccessfsblobstore/FsBlobRef.cpp
|
||||||
impl/filesystem/parallelaccessfsblobstore/FileBlobRef.cpp
|
impl/filesystem/parallelaccessfsblobstore/FileBlobRef.cpp
|
||||||
impl/filesystem/parallelaccessfsblobstore/SymlinkBlobRef.cpp
|
impl/filesystem/parallelaccessfsblobstore/SymlinkBlobRef.cpp
|
||||||
|
impl/filesystem/entry_helper.cpp
|
||||||
impl/filesystem/CrySymlink.cpp
|
impl/filesystem/CrySymlink.cpp
|
||||||
impl/filesystem/CryDir.cpp
|
impl/filesystem/CryDir.cpp
|
||||||
impl/filesystem/cachingfsblobstore/DirBlobRef.cpp
|
impl/filesystem/cachingfsblobstore/DirBlobRef.cpp
|
||||||
|
@ -100,6 +100,6 @@ vector<string> CryCiphers::_buildSupportedCipherNames() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
const vector<string>& CryCiphers::supportedCipherNames() {
|
const vector<string>& CryCiphers::supportedCipherNames() {
|
||||||
static vector<string> supportedCipherNames = _buildSupportedCipherNames();
|
static const vector<string> supportedCipherNames = _buildSupportedCipherNames();
|
||||||
return supportedCipherNames;
|
return supportedCipherNames;
|
||||||
}
|
}
|
||||||
|
@ -30,7 +30,7 @@ namespace cryfs {
|
|||||||
config.SetFilesystemId(_generateFilesystemID());
|
config.SetFilesystemId(_generateFilesystemID());
|
||||||
auto encryptionKey = _generateEncKey(config.Cipher());
|
auto encryptionKey = _generateEncKey(config.Cipher());
|
||||||
auto localState = LocalStateMetadata::loadOrGenerate(_localStateDir.forFilesystemId(config.FilesystemId()), cpputils::Data::FromString(encryptionKey), allowReplacedFilesystem);
|
auto localState = LocalStateMetadata::loadOrGenerate(_localStateDir.forFilesystemId(config.FilesystemId()), cpputils::Data::FromString(encryptionKey), allowReplacedFilesystem);
|
||||||
uint32_t myClientId = localState.myClientId();
|
const uint32_t myClientId = localState.myClientId();
|
||||||
config.SetEncryptionKey(std::move(encryptionKey));
|
config.SetEncryptionKey(std::move(encryptionKey));
|
||||||
config.SetExclusiveClientId(_generateExclusiveClientId(missingBlockIsIntegrityViolationFromCommandLine, myClientId));
|
config.SetExclusiveClientId(_generateExclusiveClientId(missingBlockIsIntegrityViolationFromCommandLine, myClientId));
|
||||||
#ifndef CRYFS_NO_COMPATIBILITY
|
#ifndef CRYFS_NO_COMPATIBILITY
|
||||||
|
@ -74,7 +74,7 @@ void CryConfigFile::save() const {
|
|||||||
if (_access == Access::ReadOnly) {
|
if (_access == Access::ReadOnly) {
|
||||||
throw std::logic_error("Tried to save the cryfs.config file while being in read only mode");
|
throw std::logic_error("Tried to save the cryfs.config file while being in read only mode");
|
||||||
}
|
}
|
||||||
Data configData = _config.save();
|
const Data configData = _config.save();
|
||||||
auto encrypted = _encryptor->encrypt(configData, _config.Cipher());
|
auto encrypted = _encryptor->encrypt(configData, _config.Cipher());
|
||||||
encrypted.StoreToFile(_path);
|
encrypted.StoreToFile(_path);
|
||||||
}
|
}
|
||||||
|
@ -19,7 +19,6 @@ using boost::none;
|
|||||||
using std::shared_ptr;
|
using std::shared_ptr;
|
||||||
using std::string;
|
using std::string;
|
||||||
using std::shared_ptr;
|
using std::shared_ptr;
|
||||||
using gitversion::VersionCompare;
|
|
||||||
using namespace cpputils::logging;
|
using namespace cpputils::logging;
|
||||||
|
|
||||||
namespace cryfs {
|
namespace cryfs {
|
||||||
@ -58,7 +57,7 @@ either<CryConfigFile::LoadError, CryConfigLoader::ConfigLoadResult> CryConfigLoa
|
|||||||
}
|
}
|
||||||
_checkCipher(*config.right()->config());
|
_checkCipher(*config.right()->config());
|
||||||
auto localState = LocalStateMetadata::loadOrGenerate(_localStateDir.forFilesystemId(config.right()->config()->FilesystemId()), cpputils::Data::FromString(config.right()->config()->EncryptionKey()), allowReplacedFilesystem);
|
auto localState = LocalStateMetadata::loadOrGenerate(_localStateDir.forFilesystemId(config.right()->config()->FilesystemId()), cpputils::Data::FromString(config.right()->config()->EncryptionKey()), allowReplacedFilesystem);
|
||||||
uint32_t myClientId = localState.myClientId();
|
const uint32_t myClientId = localState.myClientId();
|
||||||
_checkMissingBlocksAreIntegrityViolations(config.right().get(), myClientId);
|
_checkMissingBlocksAreIntegrityViolations(config.right().get(), myClientId);
|
||||||
return ConfigLoadResult {std::move(oldConfig), std::move(config.right()), myClientId};
|
return ConfigLoadResult {std::move(oldConfig), std::move(config.right()), myClientId};
|
||||||
}
|
}
|
||||||
|
@ -19,9 +19,9 @@ namespace cryfs {
|
|||||||
}
|
}
|
||||||
|
|
||||||
Data CryConfigEncryptor::encrypt(const Data &plaintext, const string &cipherName) const {
|
Data CryConfigEncryptor::encrypt(const Data &plaintext, const string &cipherName) const {
|
||||||
InnerConfig innerConfig = _innerEncryptor(cipherName)->encrypt(plaintext);
|
const InnerConfig innerConfig = _innerEncryptor(cipherName)->encrypt(plaintext);
|
||||||
Data serializedInnerConfig = innerConfig.serialize();
|
const Data serializedInnerConfig = innerConfig.serialize();
|
||||||
OuterConfig outerConfig = _outerEncryptor()->encrypt(serializedInnerConfig);
|
const OuterConfig outerConfig = _outerEncryptor()->encrypt(serializedInnerConfig);
|
||||||
return outerConfig.serialize();
|
return outerConfig.serialize();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -32,7 +32,7 @@ namespace cryfs {
|
|||||||
Deserializer deserializer(&data);
|
Deserializer deserializer(&data);
|
||||||
try {
|
try {
|
||||||
_checkHeader(&deserializer);
|
_checkHeader(&deserializer);
|
||||||
string cipherName = deserializer.readString();
|
const string cipherName = deserializer.readString();
|
||||||
auto result = deserializer.readTailData();
|
auto result = deserializer.readTailData();
|
||||||
deserializer.finished();
|
deserializer.finished();
|
||||||
return InnerConfig {cipherName, std::move(result)};
|
return InnerConfig {cipherName, std::move(result)};
|
||||||
@ -43,7 +43,7 @@ namespace cryfs {
|
|||||||
}
|
}
|
||||||
|
|
||||||
void InnerConfig::_checkHeader(Deserializer *deserializer) {
|
void InnerConfig::_checkHeader(Deserializer *deserializer) {
|
||||||
string header = deserializer->readString();
|
const string header = deserializer->readString();
|
||||||
if (header != HEADER) {
|
if (header != HEADER) {
|
||||||
throw std::runtime_error("Invalid header. Maybe this filesystem was created with a different version of CryFS?");
|
throw std::runtime_error("Invalid header. Maybe this filesystem was created with a different version of CryFS?");
|
||||||
}
|
}
|
||||||
|
@ -18,7 +18,7 @@ namespace cryfs {
|
|||||||
const string OuterConfig::HEADER = "cryfs.config;1;scrypt";
|
const string OuterConfig::HEADER = "cryfs.config;1;scrypt";
|
||||||
|
|
||||||
void OuterConfig::_checkHeader(Deserializer *deserializer) {
|
void OuterConfig::_checkHeader(Deserializer *deserializer) {
|
||||||
string header = deserializer->readString();
|
const string header = deserializer->readString();
|
||||||
if (header != HEADER) {
|
if (header != HEADER) {
|
||||||
throw std::runtime_error("Invalid header");
|
throw std::runtime_error("Invalid header");
|
||||||
}
|
}
|
||||||
@ -47,7 +47,7 @@ namespace cryfs {
|
|||||||
Deserializer deserializer(&data);
|
Deserializer deserializer(&data);
|
||||||
try {
|
try {
|
||||||
#ifndef CRYFS_NO_COMPATIBILITY
|
#ifndef CRYFS_NO_COMPATIBILITY
|
||||||
string header = deserializer.readString();
|
const string header = deserializer.readString();
|
||||||
if (header == OLD_HEADER) {
|
if (header == OLD_HEADER) {
|
||||||
return _deserializeOldFormat(&deserializer);
|
return _deserializeOldFormat(&deserializer);
|
||||||
} else if (header == HEADER) {
|
} else if (header == HEADER) {
|
||||||
|
@ -78,7 +78,7 @@ unique_ref<parallelaccessfsblobstore::ParallelAccessFsBlobStore> CryDevice::Crea
|
|||||||
|
|
||||||
#ifndef CRYFS_NO_COMPATIBILITY
|
#ifndef CRYFS_NO_COMPATIBILITY
|
||||||
unique_ref<fsblobstore::FsBlobStore> CryDevice::MigrateOrCreateFsBlobStore(unique_ref<BlobStore> blobStore, CryConfigFile *configFile) {
|
unique_ref<fsblobstore::FsBlobStore> CryDevice::MigrateOrCreateFsBlobStore(unique_ref<BlobStore> blobStore, CryConfigFile *configFile) {
|
||||||
string rootBlobId = configFile->config()->RootBlob();
|
const string rootBlobId = configFile->config()->RootBlob();
|
||||||
if ("" == rootBlobId) {
|
if ("" == rootBlobId) {
|
||||||
return make_unique_ref<FsBlobStore>(std::move(blobStore));
|
return make_unique_ref<FsBlobStore>(std::move(blobStore));
|
||||||
}
|
}
|
||||||
@ -194,9 +194,12 @@ optional<unique_ref<fspp::Node>> CryDevice::Load(const bf::path &path) {
|
|||||||
return optional<unique_ref<fspp::Node>>(make_unique_ref<CryDir>(this, none, none, _rootBlobId));
|
return optional<unique_ref<fspp::Node>>(make_unique_ref<CryDir>(this, none, none, _rootBlobId));
|
||||||
}
|
}
|
||||||
|
|
||||||
auto parentWithGrandparent = LoadDirBlobWithParent(path.parent_path());
|
auto parentWithAncestors = LoadDirBlobWithAncestors(path.parent_path(), [](const BlockId&){});
|
||||||
auto parent = std::move(parentWithGrandparent.blob);
|
if (parentWithAncestors == none) {
|
||||||
auto grandparent = std::move(parentWithGrandparent.parent);
|
return none;
|
||||||
|
}
|
||||||
|
auto parent = std::move(parentWithAncestors->blob);
|
||||||
|
auto grandparent = std::move(parentWithAncestors->parent);
|
||||||
|
|
||||||
auto optEntry = parent->GetChild(path.filename().string());
|
auto optEntry = parent->GetChild(path.filename().string());
|
||||||
if (optEntry == boost::none) {
|
if (optEntry == boost::none) {
|
||||||
@ -215,16 +218,19 @@ optional<unique_ref<fspp::Node>> CryDevice::Load(const bf::path &path) {
|
|||||||
ASSERT(false, "Switch/case not exhaustive");
|
ASSERT(false, "Switch/case not exhaustive");
|
||||||
}
|
}
|
||||||
|
|
||||||
CryDevice::DirBlobWithParent CryDevice::LoadDirBlobWithParent(const bf::path &path) {
|
optional<CryDevice::DirBlobWithAncestors> CryDevice::LoadDirBlobWithAncestors(const bf::path &path, std::function<void (const blockstore::BlockId&)> ancestor_callback) {
|
||||||
auto blob = LoadBlobWithParent(path);
|
auto blob = LoadBlobWithAncestors(path, std::move(ancestor_callback));
|
||||||
auto dir = dynamic_pointer_move<DirBlobRef>(blob.blob);
|
if (blob == none) {
|
||||||
|
return none;
|
||||||
|
}
|
||||||
|
auto dir = dynamic_pointer_move<DirBlobRef>(blob->blob);
|
||||||
if (dir == none) {
|
if (dir == none) {
|
||||||
throw FuseErrnoException(ENOTDIR); // Loaded blob is not a directory
|
throw FuseErrnoException(ENOTDIR); // Loaded blob is not a directory
|
||||||
}
|
}
|
||||||
return DirBlobWithParent{std::move(*dir), std::move(blob.parent)};
|
return DirBlobWithAncestors{std::move(*dir), std::move(blob->parent)};
|
||||||
}
|
}
|
||||||
|
|
||||||
CryDevice::BlobWithParent CryDevice::LoadBlobWithParent(const bf::path &path) {
|
optional<CryDevice::BlobWithAncestors> CryDevice::LoadBlobWithAncestors(const bf::path &path, std::function<void (const blockstore::BlockId&)> ancestor_callback) {
|
||||||
optional<unique_ref<DirBlobRef>> parentBlob = none;
|
optional<unique_ref<DirBlobRef>> parentBlob = none;
|
||||||
optional<unique_ref<FsBlobRef>> currentBlobOpt = _fsBlobStore->load(_rootBlobId);
|
optional<unique_ref<FsBlobRef>> currentBlobOpt = _fsBlobStore->load(_rootBlobId);
|
||||||
if (currentBlobOpt == none) {
|
if (currentBlobOpt == none) {
|
||||||
@ -235,6 +241,7 @@ CryDevice::BlobWithParent CryDevice::LoadBlobWithParent(const bf::path &path) {
|
|||||||
ASSERT(currentBlob->parentPointer() == BlockId::Null(), "Root Blob should have a nullptr as parent");
|
ASSERT(currentBlob->parentPointer() == BlockId::Null(), "Root Blob should have a nullptr as parent");
|
||||||
|
|
||||||
for (const bf::path &component : path.relative_path()) {
|
for (const bf::path &component : path.relative_path()) {
|
||||||
|
ancestor_callback(currentBlob->blockId());
|
||||||
auto currentDir = dynamic_pointer_move<DirBlobRef>(currentBlob);
|
auto currentDir = dynamic_pointer_move<DirBlobRef>(currentBlob);
|
||||||
if (currentDir == none) {
|
if (currentDir == none) {
|
||||||
throw FuseErrnoException(ENOTDIR); // Path component is not a dir
|
throw FuseErrnoException(ENOTDIR); // Path component is not a dir
|
||||||
@ -242,19 +249,20 @@ CryDevice::BlobWithParent CryDevice::LoadBlobWithParent(const bf::path &path) {
|
|||||||
|
|
||||||
auto childOpt = (*currentDir)->GetChild(component.string());
|
auto childOpt = (*currentDir)->GetChild(component.string());
|
||||||
if (childOpt == boost::none) {
|
if (childOpt == boost::none) {
|
||||||
throw FuseErrnoException(ENOENT); // Child entry in directory not found
|
// Child entry in directory not found
|
||||||
|
return none;
|
||||||
}
|
}
|
||||||
BlockId childId = childOpt->blockId();
|
const BlockId childId = childOpt->blockId();
|
||||||
auto nextBlob = _fsBlobStore->load(childId);
|
auto nextBlob = _fsBlobStore->load(childId);
|
||||||
if (nextBlob == none) {
|
if (nextBlob == none) {
|
||||||
throw FuseErrnoException(ENOENT); // Blob for directory entry not found
|
throw FuseErrnoException(EIO); // Blob for directory entry not found
|
||||||
}
|
}
|
||||||
parentBlob = std::move(*currentDir);
|
parentBlob = std::move(*currentDir);
|
||||||
currentBlob = std::move(*nextBlob);
|
currentBlob = std::move(*nextBlob);
|
||||||
ASSERT(currentBlob->parentPointer() == (*parentBlob)->blockId(), "Blob has wrong parent pointer");
|
ASSERT(currentBlob->parentPointer() == (*parentBlob)->blockId(), "Blob has wrong parent pointer");
|
||||||
}
|
}
|
||||||
|
|
||||||
return BlobWithParent{std::move(currentBlob), std::move(parentBlob)};
|
return BlobWithAncestors{std::move(currentBlob), std::move(parentBlob)};
|
||||||
|
|
||||||
//TODO (I think this is resolved, but I should test it)
|
//TODO (I think this is resolved, but I should test it)
|
||||||
// Running the python script, waiting for "Create files in sequential order...", then going into dir ~/tmp/cryfs-mount-.../Bonnie.../ and calling "ls"
|
// Running the python script, waiting for "Create files in sequential order...", then going into dir ~/tmp/cryfs-mount-.../Bonnie.../ and calling "ls"
|
||||||
@ -265,8 +273,8 @@ CryDevice::BlobWithParent CryDevice::LoadBlobWithParent(const bf::path &path) {
|
|||||||
CryDevice::statvfs CryDevice::statfs() {
|
CryDevice::statvfs CryDevice::statfs() {
|
||||||
callFsActionCallbacks();
|
callFsActionCallbacks();
|
||||||
|
|
||||||
uint64_t numUsedBlocks = _fsBlobStore->numBlocks();
|
const uint64_t numUsedBlocks = _fsBlobStore->numBlocks();
|
||||||
uint64_t numFreeBlocks = _fsBlobStore->estimateSpaceForNumBlocksLeft();
|
const uint64_t numFreeBlocks = _fsBlobStore->estimateSpaceForNumBlocksLeft();
|
||||||
|
|
||||||
statvfs result;
|
statvfs result;
|
||||||
result.max_filename_length = 255; // We theoretically support unlimited file name length, but this is default for many Linux file systems, so probably also makes sense for CryFS.
|
result.max_filename_length = 255; // We theoretically support unlimited file name length, but this is default for many Linux file systems, so probably also makes sense for CryFS.
|
||||||
@ -314,7 +322,7 @@ void CryDevice::RemoveBlob(const blockstore::BlockId &blockId) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
BlockId CryDevice::GetOrCreateRootBlobId(CryConfigFile *configFile) {
|
BlockId CryDevice::GetOrCreateRootBlobId(CryConfigFile *configFile) {
|
||||||
string root_blockId = configFile->config()->RootBlob();
|
const string root_blockId = configFile->config()->RootBlob();
|
||||||
if (root_blockId == "") { // NOLINT (workaround https://gcc.gnu.org/bugzilla/show_bug.cgi?id=82481 )
|
if (root_blockId == "") { // NOLINT (workaround https://gcc.gnu.org/bugzilla/show_bug.cgi?id=82481 )
|
||||||
auto new_blockId = CreateRootBlobAndReturnId();
|
auto new_blockId = CreateRootBlobAndReturnId();
|
||||||
configFile->config()->SetRootBlob(new_blockId.ToString());
|
configFile->config()->SetRootBlob(new_blockId.ToString());
|
||||||
|
@ -28,11 +28,11 @@ public:
|
|||||||
cpputils::unique_ref<parallelaccessfsblobstore::DirBlobRef> CreateDirBlob(const blockstore::BlockId &parent);
|
cpputils::unique_ref<parallelaccessfsblobstore::DirBlobRef> CreateDirBlob(const blockstore::BlockId &parent);
|
||||||
cpputils::unique_ref<parallelaccessfsblobstore::SymlinkBlobRef> CreateSymlinkBlob(const boost::filesystem::path &target, const blockstore::BlockId &parent);
|
cpputils::unique_ref<parallelaccessfsblobstore::SymlinkBlobRef> CreateSymlinkBlob(const boost::filesystem::path &target, const blockstore::BlockId &parent);
|
||||||
cpputils::unique_ref<parallelaccessfsblobstore::FsBlobRef> LoadBlob(const blockstore::BlockId &blockId);
|
cpputils::unique_ref<parallelaccessfsblobstore::FsBlobRef> LoadBlob(const blockstore::BlockId &blockId);
|
||||||
struct DirBlobWithParent {
|
struct DirBlobWithAncestors {
|
||||||
cpputils::unique_ref<parallelaccessfsblobstore::DirBlobRef> blob;
|
cpputils::unique_ref<parallelaccessfsblobstore::DirBlobRef> blob;
|
||||||
boost::optional<cpputils::unique_ref<parallelaccessfsblobstore::DirBlobRef>> parent;
|
boost::optional<cpputils::unique_ref<parallelaccessfsblobstore::DirBlobRef>> parent;
|
||||||
};
|
};
|
||||||
DirBlobWithParent LoadDirBlobWithParent(const boost::filesystem::path &path);
|
boost::optional<DirBlobWithAncestors> LoadDirBlobWithAncestors(const boost::filesystem::path &path, std::function<void (const blockstore::BlockId&)> ancestor_callback);
|
||||||
void RemoveBlob(const blockstore::BlockId &blockId);
|
void RemoveBlob(const blockstore::BlockId &blockId);
|
||||||
|
|
||||||
void onFsAction(std::function<void()> callback);
|
void onFsAction(std::function<void()> callback);
|
||||||
@ -65,11 +65,11 @@ private:
|
|||||||
static cpputils::unique_ref<blockstore::BlockStore2> CreateIntegrityEncryptedBlockStore(cpputils::unique_ref<blockstore::BlockStore2> blockStore, const LocalStateDir& localStateDir, CryConfigFile *configFile, uint32_t myClientId, bool allowIntegrityViolations, bool missingBlockIsIntegrityViolation, std::function<void()> onIntegrityViolation);
|
static cpputils::unique_ref<blockstore::BlockStore2> CreateIntegrityEncryptedBlockStore(cpputils::unique_ref<blockstore::BlockStore2> blockStore, const LocalStateDir& localStateDir, CryConfigFile *configFile, uint32_t myClientId, bool allowIntegrityViolations, bool missingBlockIsIntegrityViolation, std::function<void()> onIntegrityViolation);
|
||||||
static cpputils::unique_ref<blockstore::BlockStore2> CreateEncryptedBlockStore(const CryConfig &config, cpputils::unique_ref<blockstore::BlockStore2> baseBlockStore);
|
static cpputils::unique_ref<blockstore::BlockStore2> CreateEncryptedBlockStore(const CryConfig &config, cpputils::unique_ref<blockstore::BlockStore2> baseBlockStore);
|
||||||
|
|
||||||
struct BlobWithParent {
|
struct BlobWithAncestors {
|
||||||
cpputils::unique_ref<parallelaccessfsblobstore::FsBlobRef> blob;
|
cpputils::unique_ref<parallelaccessfsblobstore::FsBlobRef> blob;
|
||||||
boost::optional<cpputils::unique_ref<parallelaccessfsblobstore::DirBlobRef>> parent;
|
boost::optional<cpputils::unique_ref<parallelaccessfsblobstore::DirBlobRef>> parent;
|
||||||
};
|
};
|
||||||
BlobWithParent LoadBlobWithParent(const boost::filesystem::path &path);
|
boost::optional<BlobWithAncestors> LoadBlobWithAncestors(const boost::filesystem::path &path, std::function<void (const blockstore::BlockId&)> ancestor_callback);
|
||||||
|
|
||||||
DISALLOW_COPY_AND_ASSIGN(CryDevice);
|
DISALLOW_COPY_AND_ASSIGN(CryDevice);
|
||||||
};
|
};
|
||||||
|
@ -81,6 +81,11 @@ vector<fspp::Dir::Entry> CryDir::children() {
|
|||||||
return children;
|
return children;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
size_t CryDir::numChildren() {
|
||||||
|
auto blob = LoadBlob();
|
||||||
|
return blob->NumChildren();
|
||||||
|
}
|
||||||
|
|
||||||
fspp::Dir::EntryType CryDir::getType() const {
|
fspp::Dir::EntryType CryDir::getType() const {
|
||||||
device()->callFsActionCallbacks();
|
device()->callFsActionCallbacks();
|
||||||
return fspp::Dir::EntryType::DIR;
|
return fspp::Dir::EntryType::DIR;
|
||||||
|
@ -11,7 +11,7 @@ namespace cryfs {
|
|||||||
class CryDir final: public fspp::Dir, public CryNode {
|
class CryDir final: public fspp::Dir, public CryNode {
|
||||||
public:
|
public:
|
||||||
CryDir(CryDevice *device, boost::optional<cpputils::unique_ref<parallelaccessfsblobstore::DirBlobRef>> parent, boost::optional<cpputils::unique_ref<parallelaccessfsblobstore::DirBlobRef>> grandparent, const blockstore::BlockId &blockId);
|
CryDir(CryDevice *device, boost::optional<cpputils::unique_ref<parallelaccessfsblobstore::DirBlobRef>> parent, boost::optional<cpputils::unique_ref<parallelaccessfsblobstore::DirBlobRef>> grandparent, const blockstore::BlockId &blockId);
|
||||||
~CryDir();
|
~CryDir() override;
|
||||||
|
|
||||||
//TODO return type variance to CryFile/CryDir?
|
//TODO return type variance to CryFile/CryDir?
|
||||||
cpputils::unique_ref<fspp::OpenFile> createAndOpenFile(const std::string &name, fspp::mode_t mode, fspp::uid_t uid, fspp::gid_t gid) override;
|
cpputils::unique_ref<fspp::OpenFile> createAndOpenFile(const std::string &name, fspp::mode_t mode, fspp::uid_t uid, fspp::gid_t gid) override;
|
||||||
@ -20,6 +20,7 @@ public:
|
|||||||
|
|
||||||
//TODO Make Entry a public class instead of hidden in DirBlob (which is not publicly visible)
|
//TODO Make Entry a public class instead of hidden in DirBlob (which is not publicly visible)
|
||||||
std::vector<fspp::Dir::Entry> children() override;
|
std::vector<fspp::Dir::Entry> children() override;
|
||||||
|
size_t numChildren();
|
||||||
|
|
||||||
fspp::Dir::EntryType getType() const override;
|
fspp::Dir::EntryType getType() const override;
|
||||||
|
|
||||||
|
@ -12,7 +12,7 @@ namespace cryfs {
|
|||||||
class CryFile final: public fspp::File, public CryNode {
|
class CryFile final: public fspp::File, public CryNode {
|
||||||
public:
|
public:
|
||||||
CryFile(CryDevice *device, cpputils::unique_ref<parallelaccessfsblobstore::DirBlobRef> parent, boost::optional<cpputils::unique_ref<parallelaccessfsblobstore::DirBlobRef>> grandparent, const blockstore::BlockId &blockId);
|
CryFile(CryDevice *device, cpputils::unique_ref<parallelaccessfsblobstore::DirBlobRef> parent, boost::optional<cpputils::unique_ref<parallelaccessfsblobstore::DirBlobRef>> grandparent, const blockstore::BlockId &blockId);
|
||||||
~CryFile();
|
~CryFile() override;
|
||||||
|
|
||||||
cpputils::unique_ref<fspp::OpenFile> open(fspp::openflags_t flags) override;
|
cpputils::unique_ref<fspp::OpenFile> open(fspp::openflags_t flags) override;
|
||||||
void truncate(fspp::num_bytes_t size) override;
|
void truncate(fspp::num_bytes_t size) override;
|
||||||
|
@ -8,11 +8,13 @@
|
|||||||
#include <cpp-utils/system/time.h>
|
#include <cpp-utils/system/time.h>
|
||||||
#include <cpp-utils/system/stat.h>
|
#include <cpp-utils/system/stat.h>
|
||||||
#include <cpp-utils/logging/logging.h>
|
#include <cpp-utils/logging/logging.h>
|
||||||
|
#include "entry_helper.h"
|
||||||
|
|
||||||
namespace bf = boost::filesystem;
|
namespace bf = boost::filesystem;
|
||||||
|
|
||||||
using blockstore::BlockId;
|
using blockstore::BlockId;
|
||||||
using cpputils::unique_ref;
|
using cpputils::unique_ref;
|
||||||
|
using cpputils::dynamic_pointer_move;
|
||||||
using boost::optional;
|
using boost::optional;
|
||||||
using boost::none;
|
using boost::none;
|
||||||
using std::shared_ptr;
|
using std::shared_ptr;
|
||||||
@ -80,29 +82,66 @@ void CryNode::rename(const bf::path &to) {
|
|||||||
// We are the root direcory.
|
// We are the root direcory.
|
||||||
throw FuseErrnoException(EBUSY);
|
throw FuseErrnoException(EBUSY);
|
||||||
}
|
}
|
||||||
auto targetDirWithParent = _device->LoadDirBlobWithParent(to.parent_path());
|
if (!to.has_parent_path()) {
|
||||||
auto targetDir = std::move(targetDirWithParent.blob);
|
// Target is the root directory
|
||||||
auto targetDirParent = std::move(targetDirWithParent.parent);
|
throw FuseErrnoException(EBUSY);
|
||||||
|
}
|
||||||
|
|
||||||
|
auto targetParentAndAncestors = _device->LoadDirBlobWithAncestors(to.parent_path(), [&] (const BlockId& ancestorId) {
|
||||||
|
if (ancestorId == _blockId) {
|
||||||
|
// We are trying to move a node into one of its subdirectories. This is not allowed.
|
||||||
|
throw FuseErrnoException(EINVAL);
|
||||||
|
}
|
||||||
|
});
|
||||||
|
if (targetParentAndAncestors == none) {
|
||||||
|
// Target parent directory doesn't exist
|
||||||
|
throw FuseErrnoException(ENOENT);
|
||||||
|
}
|
||||||
|
auto targetParent = std::move(targetParentAndAncestors->blob);
|
||||||
|
auto targetGrandparent = std::move(targetParentAndAncestors->parent);
|
||||||
|
if (targetParent->blockId() == _blockId) {
|
||||||
|
// We are trying to move a node into one of its subdirectories. This is not allowed.
|
||||||
|
throw FuseErrnoException(EINVAL);
|
||||||
|
}
|
||||||
|
|
||||||
auto old = (*_parent)->GetChild(_blockId);
|
auto old = (*_parent)->GetChild(_blockId);
|
||||||
if (old == boost::none) {
|
if (old == boost::none) {
|
||||||
throw FuseErrnoException(EIO);
|
throw FuseErrnoException(EIO);
|
||||||
}
|
}
|
||||||
fsblobstore::DirEntry oldEntry = *old; // Copying this (instead of only keeping the reference) is necessary, because the operations below (i.e. RenameChild()) might make a reference invalid.
|
const fsblobstore::DirEntry oldEntry = *old; // Copying this (instead of only keeping the reference) is necessary, because the operations below (i.e. RenameChild()) might make a reference invalid.
|
||||||
auto onOverwritten = [this] (const blockstore::BlockId &blockId) {
|
auto onOverwritten = [this] (const blockstore::BlockId &blockId) {
|
||||||
device()->RemoveBlob(blockId);
|
device()->RemoveBlob(blockId);
|
||||||
};
|
};
|
||||||
|
if (targetParent->blockId() == (*_parent)->blockId()) {
|
||||||
_updateParentModificationTimestamp();
|
_updateParentModificationTimestamp();
|
||||||
if (targetDir->blockId() == (*_parent)->blockId()) {
|
targetParent->RenameChild(oldEntry.blockId(), to.filename().string(), onOverwritten);
|
||||||
targetDir->RenameChild(oldEntry.blockId(), to.filename().string(), onOverwritten);
|
|
||||||
} else {
|
} else {
|
||||||
_updateTargetDirModificationTimestamp(*targetDir, std::move(targetDirParent));
|
auto preexistingTargetEntry = targetParent->GetChild(to.filename().string());
|
||||||
targetDir->AddOrOverwriteChild(to.filename().string(), oldEntry.blockId(), oldEntry.type(), oldEntry.mode(), oldEntry.uid(), oldEntry.gid(),
|
if (preexistingTargetEntry != boost::none && preexistingTargetEntry->type() == fspp::Dir::EntryType::DIR) {
|
||||||
|
if (getType() != fspp::Dir::EntryType::DIR) {
|
||||||
|
// A directory cannot be overwritten with a non-directory
|
||||||
|
throw FuseErrnoException(EISDIR);
|
||||||
|
}
|
||||||
|
auto preexistingTarget = device()->LoadBlob(preexistingTargetEntry->blockId());
|
||||||
|
auto preexistingTargetDir = dynamic_pointer_move<DirBlobRef>(preexistingTarget);
|
||||||
|
if (preexistingTargetDir == none) {
|
||||||
|
LOG(ERR, "Preexisting target is not a directory. But its parent dir entry says it's a directory");
|
||||||
|
throw FuseErrnoException(EIO);
|
||||||
|
}
|
||||||
|
if ((*preexistingTargetDir)->NumChildren() > 0) {
|
||||||
|
// Cannot overwrite a non-empty dir with a rename operation.
|
||||||
|
throw FuseErrnoException(ENOTEMPTY);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
_updateParentModificationTimestamp();
|
||||||
|
_updateTargetDirModificationTimestamp(*targetParent, std::move(targetGrandparent));
|
||||||
|
targetParent->AddOrOverwriteChild(to.filename().string(), oldEntry.blockId(), oldEntry.type(), oldEntry.mode(), oldEntry.uid(), oldEntry.gid(),
|
||||||
oldEntry.lastAccessTime(), oldEntry.lastModificationTime(), onOverwritten);
|
oldEntry.lastAccessTime(), oldEntry.lastModificationTime(), onOverwritten);
|
||||||
(*_parent)->RemoveChild(oldEntry.name());
|
(*_parent)->RemoveChild(oldEntry.name());
|
||||||
// targetDir is now the new parent for this node. Adapt to it, so we can call further operations on this node object.
|
// targetParent is now the new parent for this node. Adapt to it, so we can call further operations on this node object.
|
||||||
LoadBlob()->setParentPointer(targetDir->blockId());
|
LoadBlob()->setParentPointer(targetParent->blockId());
|
||||||
_parent = std::move(targetDir);
|
_parent = std::move(targetParent);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -179,13 +218,17 @@ CryNode::stat_info CryNode::stat() const {
|
|||||||
result.size = fsblobstore::DirBlob::DIR_LSTAT_SIZE;
|
result.size = fsblobstore::DirBlob::DIR_LSTAT_SIZE;
|
||||||
//TODO If possible without performance loss, then for a directory, st_nlink should return number of dir entries (including "." and "..")
|
//TODO If possible without performance loss, then for a directory, st_nlink should return number of dir entries (including "." and "..")
|
||||||
result.nlink = 1;
|
result.nlink = 1;
|
||||||
struct timespec now = cpputils::time::now();
|
const struct timespec now = cpputils::time::now();
|
||||||
result.atime = now;
|
result.atime = now;
|
||||||
result.mtime = now;
|
result.mtime = now;
|
||||||
result.ctime = now;
|
result.ctime = now;
|
||||||
return result;
|
return result;
|
||||||
} else {
|
} else {
|
||||||
return (*_parent)->statChild(_blockId);
|
auto childOpt = (*_parent)->GetChild(_blockId);
|
||||||
|
if (childOpt == boost::none) {
|
||||||
|
throw fspp::fuse::FuseErrnoException(ENOENT);
|
||||||
|
}
|
||||||
|
return dirEntryToStatInfo(*childOpt, LoadBlob()->lstat_size());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -12,7 +12,7 @@ namespace cryfs {
|
|||||||
|
|
||||||
class CryNode: public fspp::Node {
|
class CryNode: public fspp::Node {
|
||||||
public:
|
public:
|
||||||
virtual ~CryNode();
|
~CryNode() override;
|
||||||
|
|
||||||
// TODO grandparent is only needed to set the timestamps of the parent directory on rename and remove. Delete grandparent parameter once we store timestamps in the blob itself instead of in the directory listing.
|
// TODO grandparent is only needed to set the timestamps of the parent directory on rename and remove. Delete grandparent parameter once we store timestamps in the blob itself instead of in the directory listing.
|
||||||
CryNode(CryDevice *device, boost::optional<cpputils::unique_ref<parallelaccessfsblobstore::DirBlobRef>> parent, boost::optional<cpputils::unique_ref<parallelaccessfsblobstore::DirBlobRef>> grandparent, const blockstore::BlockId &blockId);
|
CryNode(CryDevice *device, boost::optional<cpputils::unique_ref<parallelaccessfsblobstore::DirBlobRef>> parent, boost::optional<cpputils::unique_ref<parallelaccessfsblobstore::DirBlobRef>> grandparent, const blockstore::BlockId &blockId);
|
||||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user