Merge all git repositories into one

This commit is contained in:
Sebastian Messmer 2016-02-11 16:39:42 +01:00
commit c6e8052d93
989 changed files with 204181 additions and 393 deletions

36
.gitignore vendored
View File

@ -1,35 +1,5 @@
# Compiled Object files
*.slo
*.lo
*.o
*.obj
# Precompiled Headers
*.gch
*.pch
# Compiled Dynamic libraries
*.so
*.dylib
*.dll
# Fortran module files
*.mod
# Compiled Static libraries
*.lai
*.la
*.a
*.lib
# Executables
*.exe
*.out
*.app
# Biicode directory
bii
bin
umltest.inner.sh
umltest.status
/build
/cmake
/.idea

6
.idea/vcs.xml generated Normal file
View File

@ -0,0 +1,6 @@
<?xml version="1.0" encoding="UTF-8"?>
<project version="4">
<component name="VcsDirectoryMappings">
<mapping directory="$PROJECT_DIR$" vcs="Git" />
</component>
</project>

View File

@ -1,17 +1,37 @@
language: cpp
sudo: required
dist: trusty
compiler:
- gcc
- clang
os:
- linux
- osx
matrix:
allow_failures:
- os: osx
- compiler: clang
addons:
apt:
packages:
- libcrypto++-dev
- libfuse-dev
install:
- wget https://raw.githubusercontent.com/smessmer/travis-utils/master/update_gcc_version.sh
&& chmod +x update_gcc_version.sh
&& ./update_gcc_version.sh 4.8
&& rm update_gcc_version.sh
- sudo apt-get install libfuse-dev
# This is needed for packaging 7z distribution packages
- sudo apt-get install software-properties-common && sudo add-apt-repository ppa:george-edison55/precise-backports -y && sudo apt-get update
- sudo apt-get install cmake cmake-data rpm
# CryFS needs cmake >= 3.3, install it.
# Install boost
- wget -O boost.tar.bz2 https://sourceforge.net/projects/boost/files/boost/1.56.0/boost_1_56_0.tar.bz2/download
- tar -xf boost.tar.bz2
- cd boost_1_56_0
# TODO We should use clang as toolchain for building boost when clang is used for building our code
- ./bootstrap.sh --with-libraries=filesystem,thread,chrono
- sudo ./b2 -d0 install
- cd ..
- sudo rm -rf boost.tar.bz2 boost_1_56_0
# Install run_with_fuse.sh
- mkdir cmake
- cd cmake
- wget https://raw.githubusercontent.com/smessmer/travis-utils/master/run_with_fuse.sh
- chmod +x run_with_fuse.sh
# Install cmake >= 3.3
- wget --no-check-certificate https://cmake.org/files/v3.3/cmake-3.3.2-Linux-x86_64.tar.gz
&& tar -xf cmake-3.3.2-Linux-x86_64.tar.gz
&& sudo cp -R cmake-3.3.2-Linux-x86_64/* /usr
@ -19,35 +39,14 @@ install:
- cmake --version
# Use /dev/urandom when /dev/random is accessed, because travis doesn't have enough entropy
- sudo cp -a /dev/urandom /dev/random
before_script:
- wget https://raw.githubusercontent.com/smessmer/travis-utils/master/setup_biicode_project.sh
&& chmod +x setup_biicode_project.sh
&& ./setup_biicode_project.sh
&& rm setup_biicode_project.sh
script:
#The configure line is needed as a workaround for the following link, otherwise we wouldn't need "bii configure" at all because "bii build" calls it: http://forum.biicode.com/t/error-could-not-find-the-following-static-boost-libraries-boost-thread/374
- bii cpp:configure || bii cpp:configure
# Build cryfs executable
- bii cpp:build -- -j2
# Build and run test cases
- bii cpp:build --target messmer_cryfs_test_main -- -j2
- wget https://raw.githubusercontent.com/smessmer/travis-utils/master/run_with_fuse.sh
&& chmod +x run_with_fuse.sh
&& ./run_with_fuse.sh "./bin/messmer_cryfs_test_main"
&& rm run_with_fuse.sh
# Make distribution packages
- bii clean
- bii cpp:configure -D CMAKE_BUILD_TYPE=Release
- bii build -- -j2
- cd bii/build/messmer_cryfs && make package && cd ../../..
after_success:
- bii user ${BII_USERNAME} -p ${BII_PASSWORD}
- bii publish
#deploy:
# provider: biicode
# user: ${BII_USERNAME}
# password:
# secure: ${BII_PASSWORD}
# on:
# branch: develop
- cmake ..
- make -j2
- make package -j2
- ./test/cpp-utils/cpp-utils-test
- ./run_with_fuse.sh ./test/fspp/fspp-test
- ./test/parallelaccessstore/parallelaccessstore-test
- ./test/blockstore/blockstore-test
- ./test/blobstore/blobstore-test
after_script:
- rm run_with_fuse.sh

View File

@ -1,26 +1,15 @@
# Earlier cmake versions generate .deb packages for which the package manager says they're bad quality
# and asks the user whether they really want to install it. Cmake 3.3 fixes this.
CMAKE_MINIMUM_REQUIRED(VERSION 3.3)
cmake_minimum_required(VERSION 3.3)
INCLUDE(messmer/cmake/tools)
INCLUDE(messmer/gitversion/cmake)
include(utils.cmake)
SETUP_GOOGLETEST()
require_gcc_version(4.8)
# Actually create targets: EXEcutables and libraries.
ADD_BII_TARGETS()
ACTIVATE_CPP14()
ADD_BOOST(program_options chrono)
ADD_DEFINITIONS(-D_FILE_OFFSET_BITS=64)
GIT_VERSION_INIT()
ENABLE_STYLE_WARNINGS()
SET_TARGET_PROPERTIES(${BII_src_main_TARGET} PROPERTIES OUTPUT_NAME cryfs)
add_subdirectory(vendor)
add_subdirectory(gitversion)
add_subdirectory(src)
add_subdirectory(test)
# Fix debfiles permissions. Unfortunately, git doesn't store file permissions.
# When installing the .deb package and these files have the wrong permissions, the package manager complains.
@ -64,88 +53,3 @@ SET(CPACK_DEBIAN_PACKAGE_CONTROL_EXTRA "${CMAKE_CURRENT_SOURCE_DIR}/debfiles/pos
INCLUDE(CPack)
# You can safely delete lines from here...
###############################################################################
# REFERENCE #
###############################################################################
#
# This CMakeLists.txt file helps defining your block building and compiling
# To learn more about the CMake use with biicode, visit http://docs.biicode.com/c++.html
#
# ----------------------------------------------------
# NEW FEATURE! Include cmake files from remote blocks:
# -----------------------------------------------------
# Now you can handle cmake dependencies alike you do with c/c++:
#
# INCLUDE(user/block/myrecipe) # include myrecipe.cmake from remote user/block
#
# > EXAMPLE: Include our recipes and activate C++11 in your block (http://www.biicode.com/biicode/cmake)
#
# INCLUDE(biicode/cmake/tools) # Include tools.cmake file from "cmake" block from the "biicode" user
# ACTIVATE_CPP11(INTERFACE ${BII_BLOCK_TARGET})
#
# Remember to run "bii find" to download out cmake tools file
#
# ---------------------
# INIT_BIICODE_BLOCK()
# ---------------------
# This function creates several helper variables as ${BII_BLOCK_NAME} and ${BII_BLOCK_USER}
# Also it loads variables from the cmake/bii_user_block_vars.cmake
# ${BII_LIB_SRC} File list to create the library
# ${BII_LIB_TYPE} Empty (default, STATIC most casess) STATIC or SHARED
# ${BII_LIB_DEPS} Dependencies to other libraries (user2_block2, user3_blockX)
# ${BII_LIB_SYSTEM_HEADERS} System linking requirements as windows.h, pthread.h, etc
#
# You can use or modify them here, for example, to add or remove files from targets based on OS
# Or use typical cmake configurations done BEFORE defining targets. Examples:
# ADD_DEFINITIONS(-DFOO)
# FIND_PACKAGE(OpenGL QUIET)
# You can add INCLUDE_DIRECTORIES here too
#
# ---------------------
# ADD_BIICODE_TARGETS()
# ---------------------
#
# This function creates the following variables:
# ${BII_BLOCK_TARGET} Interface (no files) target for convenient configuration of all
# targets in this block, as the rest of targets always depend on it
# has name in the form "user_block_interface"
# ${BII_LIB_TARGET} Target library name, usually in the form "user_block". May not exist
# if BII_LIB_SRC is empty
# ${BII_BLOCK_TARGETS} List of all targets defined in this block
# ${BII_BLOCK_EXES} List of executables targets defined in this block
# ${BII_exe_name_TARGET}: Executable target (e.g. ${BII_main_TARGET}. You can also use
# directly the name of the executable target (e.g. user_block_main)
#
# > EXAMPLE: Add include directories to all targets of this block
#
# TARGET_INCLUDE_DIRECTORIES(${BII_BLOCK_TARGET} INTERFACE myincludedir)
#
# You can add private include directories to the Lib (if existing)
#
# > EXAMPLE: Link with pthread:
#
# TARGET_LINK_LIBRARIES(${BII_BLOCK_TARGET} INTERFACE pthread)
# or link against library:
# TARGET_LINK_LIBRARIES(${BII_LIB_TARGET} PUBLIC pthread)
# or directly use the library target name:
# TARGET_LINK_LIBRARIES(user_block PUBLIC pthread)
#
# NOTE: This can be also done adding pthread to ${BII_LIB_DEPS}
# BEFORE calling ADD_BIICODE_TARGETS()
#
# > EXAMPLE: how to activate C++11
#
# IF(APPLE)
# TARGET_COMPILE_OPTIONS(${BII_BLOCK_TARGET} INTERFACE "-std=c++11 -stdlib=libc++")
# ELSEIF (WIN32 OR UNIX)
# TARGET_COMPILE_OPTIONS(${BII_BLOCK_TARGET} INTERFACE "-std=c++11")
# ENDIF(APPLE)
#
# > EXAMPLE: Set properties to target
#
# SET_TARGET_PROPERTIES(${BII_BLOCK_TARGET} PROPERTIES COMPILE_DEFINITIONS "IOV_MAX=255")
#

165
CMakeLists.txt~ Normal file
View File

@ -0,0 +1,165 @@
<<<<<<< HEAD
# Earlier cmake versions generate .deb packages for which the package manager says they're bad quality
# and asks the user whether they really want to install it. Cmake 3.3 fixes this.
CMAKE_MINIMUM_REQUIRED(VERSION 3.3)
INCLUDE(messmer/cmake/tools)
INCLUDE(messmer/gitversion/cmake)
SETUP_GOOGLETEST()
# Actually create targets: EXEcutables and libraries.
ADD_BII_TARGETS()
ACTIVATE_CPP14()
ADD_BOOST(program_options chrono)
ADD_DEFINITIONS(-D_FILE_OFFSET_BITS=64)
GIT_VERSION_INIT()
ENABLE_STYLE_WARNINGS()
SET_TARGET_PROPERTIES(${BII_src_main_TARGET} PROPERTIES OUTPUT_NAME cryfs)
# Fix debfiles permissions. Unfortunately, git doesn't store file permissions.
# When installing the .deb package and these files have the wrong permissions, the package manager complains.
EXECUTE_PROCESS(COMMAND /bin/bash -c "chmod 0755 ${CMAKE_CURRENT_SOURCE_DIR}/debfiles/*")
INSTALL(TARGETS ${BII_src_main_TARGET}
DESTINATION bin
CONFIGURATIONS Release)
SET(CPACK_GENERATOR TGZ DEB RPM)
SET(CPACK_PACKAGE_NAME "cryfs")
SET(CPACK_PACKAGE_VERSION "${GITVERSION_VERSION_STRING}")
SET(CPACK_PACKAGE_DESCRIPTION_SUMMARY "Encrypt your files and store them in the cloud.")
SET(CPACK_PACKAGE_DESCRIPTION "CryFS encrypts your files, so you can safely store them anywhere. It works well together with cloud services like Dropbox, iCloud, OneDrive and others.")
SET(CPACK_PACKAGE_CONTACT "Sebastian Messmer <messmer@cryfs.org>")
SET(CPACK_PACKAGE_VENDOR "Sebastian Messmer")
SET(CPACK_RESOURCE_FILE_LICENSE "${CMAKE_CURRENT_SOURCE_DIR}/LICENSE")
SET(CPACK_PACKAGE_INSTALL_DIRECTORY "CMake ${CMake_VERSION_MAJOR}.${CMake_VERSION_MINOR}")
IF(WIN32 AND NOT UNIX)
# There is a bug in NSI that does not handle full unix paths properly. Make
# sure there is at least one set of four (4) backlasshes.
#SET(CPACK_PACKAGE_ICON "${CMake_SOURCE_DIR}/Utilities/Release\\\\InstallIcon.bmp")
#SET(CPACK_NSIS_INSTALLED_ICON_NAME "bin\\\\cryfs.exe")
#SET(CPACK_NSIS_DISPLAY_NAME "${CPACK_PACKAGE_INSTALL_DIRECTORY} CryFS")
#SET(CPACK_NSIS_HELP_LINK "http:\\\\\\\\www.cryfs.org")
#SET(CPACK_NSIS_URL_INFO_ABOUT "http:\\\\\\\\www.cryfs.org")
#SET(CPACK_NSIS_CONTACT "messmer@cryfs.org")
#SET(CPACK_NSIS_MODIFY_PATH ON)
ELSE(WIN32 AND NOT UNIX)
SET(CPACK_STRIP_FILES "bin/cryfs")
SET(CPACK_SOURCE_STRIP_FILES "")
ENDIF(WIN32 AND NOT UNIX)
SET(CPACK_PACKAGE_EXECUTABLES "cryfs" "CryFS")
SET(CPACK_DEBIAN_PACKAGE_SECTION "utils")
SET(CPACK_DEBIAN_PACKAGE_SHLIBDEPS ON)
SET(CPACK_DEBIAN_PACKAGE_RECOMMENDS "fuse")
SET(CPACK_DEBIAN_PACKAGE_HOMEPAGE "https://www.cryfs.org")
SET(CPACK_RPM_PACKAGE_LICENSE "LGPLv3")
SET(CPACK_DEBIAN_PACKAGE_CONTROL_EXTRA "${CMAKE_CURRENT_SOURCE_DIR}/debfiles/postinst;${CMAKE_CURRENT_SOURCE_DIR}/debfiles/postrm")
INCLUDE(CPack)
# You can safely delete lines from here...
###############################################################################
# REFERENCE #
###############################################################################
#
# This CMakeLists.txt file helps defining your block building and compiling
# To learn more about the CMake use with biicode, visit http://docs.biicode.com/c++.html
#
# ----------------------------------------------------
# NEW FEATURE! Include cmake files from remote blocks:
# -----------------------------------------------------
# Now you can handle cmake dependencies alike you do with c/c++:
#
# INCLUDE(user/block/myrecipe) # include myrecipe.cmake from remote user/block
#
# > EXAMPLE: Include our recipes and activate C++11 in your block (http://www.biicode.com/biicode/cmake)
#
# INCLUDE(biicode/cmake/tools) # Include tools.cmake file from "cmake" block from the "biicode" user
# ACTIVATE_CPP11(INTERFACE ${BII_BLOCK_TARGET})
#
# Remember to run "bii find" to download out cmake tools file
#
# ---------------------
# INIT_BIICODE_BLOCK()
# ---------------------
# This function creates several helper variables as ${BII_BLOCK_NAME} and ${BII_BLOCK_USER}
# Also it loads variables from the cmake/bii_user_block_vars.cmake
# ${BII_LIB_SRC} File list to create the library
# ${BII_LIB_TYPE} Empty (default, STATIC most casess) STATIC or SHARED
# ${BII_LIB_DEPS} Dependencies to other libraries (user2_block2, user3_blockX)
# ${BII_LIB_SYSTEM_HEADERS} System linking requirements as windows.h, pthread.h, etc
#
# You can use or modify them here, for example, to add or remove files from targets based on OS
# Or use typical cmake configurations done BEFORE defining targets. Examples:
# ADD_DEFINITIONS(-DFOO)
# FIND_PACKAGE(OpenGL QUIET)
# You can add INCLUDE_DIRECTORIES here too
#
# ---------------------
# ADD_BIICODE_TARGETS()
# ---------------------
#
# This function creates the following variables:
# ${BII_BLOCK_TARGET} Interface (no files) target for convenient configuration of all
# targets in this block, as the rest of targets always depend on it
# has name in the form "user_block_interface"
# ${BII_LIB_TARGET} Target library name, usually in the form "user_block". May not exist
# if BII_LIB_SRC is empty
# ${BII_BLOCK_TARGETS} List of all targets defined in this block
# ${BII_BLOCK_EXES} List of executables targets defined in this block
# ${BII_exe_name_TARGET}: Executable target (e.g. ${BII_main_TARGET}. You can also use
# directly the name of the executable target (e.g. user_block_main)
#
# > EXAMPLE: Add include directories to all targets of this block
#
# TARGET_INCLUDE_DIRECTORIES(${BII_BLOCK_TARGET} INTERFACE myincludedir)
#
# You can add private include directories to the Lib (if existing)
#
# > EXAMPLE: Link with pthread:
#
# TARGET_LINK_LIBRARIES(${BII_BLOCK_TARGET} INTERFACE pthread)
# or link against library:
# TARGET_LINK_LIBRARIES(${BII_LIB_TARGET} PUBLIC pthread)
# or directly use the library target name:
# TARGET_LINK_LIBRARIES(user_block PUBLIC pthread)
#
# NOTE: This can be also done adding pthread to ${BII_LIB_DEPS}
# BEFORE calling ADD_BIICODE_TARGETS()
#
# > EXAMPLE: how to activate C++11
#
# IF(APPLE)
# TARGET_COMPILE_OPTIONS(${BII_BLOCK_TARGET} INTERFACE "-std=c++11 -stdlib=libc++")
# ELSEIF (WIN32 OR UNIX)
# TARGET_COMPILE_OPTIONS(${BII_BLOCK_TARGET} INTERFACE "-std=c++11")
# ENDIF(APPLE)
#
# > EXAMPLE: Set properties to target
#
# SET_TARGET_PROPERTIES(${BII_BLOCK_TARGET} PROPERTIES COMPILE_DEFINITIONS "IOV_MAX=255")
#
=======
# Earlier cmake versions generate .deb packages for which the package manager says they're bad quality
# and asks the user whether they really want to install it. Cmake 3.3 fixes this.
cmake_minimum_required(VERSION 3.3)
include(utils.cmake)
require_gcc_version(4.8)
add_subdirectory(vendor)
add_subdirectory(src)
add_subdirectory(test)
>>>>>>> cpu/cmake

View File

@ -51,7 +51,7 @@ Requirements
- GCC version >= 4.8 or Clang (TODO which minimal version?)
- CMake version >= 3.3
- libcurl4 (including development headers)
- Boost libraries filesystem, system, chrono, thread in version >= 1.56
- Boost libraries filesystem, system, chrono, program_options, thread in version >= 1.56
- Crypto++ >= 5.6.3 (TODO Lower minimal version possible?)
- libFUSE >= 2.8.6 (including development headers)

7
src/CMakeLists.txt Normal file
View File

@ -0,0 +1,7 @@
include_directories(${CMAKE_CURRENT_SOURCE_DIR})
add_subdirectory(cpp-utils)
add_subdirectory(fspp)
add_subdirectory(blockstore)
add_subdirectory(blobstore)
add_subdirectory(cryfs)

View File

@ -0,0 +1,29 @@
project (blobstore)
set(SOURCES
implementations/onblocks/parallelaccessdatatreestore/ParallelAccessDataTreeStoreAdapter.cpp
implementations/onblocks/parallelaccessdatatreestore/DataTreeRef.cpp
implementations/onblocks/parallelaccessdatatreestore/ParallelAccessDataTreeStore.cpp
implementations/onblocks/utils/Math.cpp
implementations/onblocks/BlobStoreOnBlocks.cpp
implementations/onblocks/datanodestore/DataNode.cpp
implementations/onblocks/datanodestore/DataLeafNode.cpp
implementations/onblocks/datanodestore/DataInnerNode.cpp
implementations/onblocks/datanodestore/DataNodeStore.cpp
implementations/onblocks/datatreestore/impl/algorithms.cpp
implementations/onblocks/datatreestore/DataTree.cpp
implementations/onblocks/datatreestore/DataTreeStore.cpp
implementations/onblocks/BlobOnBlocks.cpp
)
add_library(${PROJECT_NAME} STATIC ${SOURCES})
# This is needed by boost thread
if(${CMAKE_SYSTEM_NAME} MATCHES "Linux")
target_link_libraries(${PROJECT_NAME} PRIVATE rt)
endif(${CMAKE_SYSTEM_NAME} MATCHES "Linux")
target_link_libraries(${PROJECT_NAME} PUBLIC cpp-utils blockstore)
target_add_boost(${PROJECT_NAME} filesystem system thread)
target_enable_style_warnings(${PROJECT_NAME})
target_activate_cpp14(${PROJECT_NAME})

View File

@ -0,0 +1,109 @@
#include "parallelaccessdatatreestore/DataTreeRef.h"
#include "BlobOnBlocks.h"
#include "datanodestore/DataLeafNode.h"
#include "utils/Math.h"
#include <cmath>
#include <cpp-utils/assert/assert.h>
using std::function;
using cpputils::unique_ref;
using cpputils::Data;
using blobstore::onblocks::datanodestore::DataLeafNode;
using blobstore::onblocks::datanodestore::DataNodeLayout;
using blockstore::Key;
namespace blobstore {
namespace onblocks {
using parallelaccessdatatreestore::DataTreeRef;
BlobOnBlocks::BlobOnBlocks(unique_ref<DataTreeRef> datatree)
: _datatree(std::move(datatree)), _sizeCache(boost::none) {
}
BlobOnBlocks::~BlobOnBlocks() {
}
uint64_t BlobOnBlocks::size() const {
if (_sizeCache == boost::none) {
_sizeCache = _datatree->numStoredBytes();
}
return *_sizeCache;
}
void BlobOnBlocks::resize(uint64_t numBytes) {
_datatree->resizeNumBytes(numBytes);
_sizeCache = numBytes;
}
void BlobOnBlocks::traverseLeaves(uint64_t beginByte, uint64_t sizeBytes, function<void (uint64_t, DataLeafNode *leaf, uint32_t, uint32_t)> func) const {
uint64_t endByte = beginByte + sizeBytes;
uint32_t firstLeaf = beginByte / _datatree->maxBytesPerLeaf();
uint32_t endLeaf = utils::ceilDivision(endByte, _datatree->maxBytesPerLeaf());
bool writingOutside = size() < endByte; // TODO Calling size() is slow because it has to traverse the tree
_datatree->traverseLeaves(firstLeaf, endLeaf, [&func, beginByte, endByte, endLeaf, writingOutside](DataLeafNode *leaf, uint32_t leafIndex) {
uint64_t indexOfFirstLeafByte = leafIndex * leaf->maxStoreableBytes();
uint32_t dataBegin = utils::maxZeroSubtraction(beginByte, indexOfFirstLeafByte);
uint32_t dataEnd = std::min(leaf->maxStoreableBytes(), endByte - indexOfFirstLeafByte);
if (leafIndex == endLeaf-1 && writingOutside) {
// If we are traversing an area that didn't exist before, then the last leaf was just created with a wrong size. We have to fix it.
leaf->resize(dataEnd);
}
func(indexOfFirstLeafByte, leaf, dataBegin, dataEnd-dataBegin);
});
if (writingOutside) {
ASSERT(_datatree->numStoredBytes() == endByte, "Writing didn't grow by the correct number of bytes");
_sizeCache = endByte;
}
}
Data BlobOnBlocks::readAll() const {
//TODO Querying size is inefficient. Is this possible without a call to size()?
uint64_t count = size();
Data result(count);
_read(result.data(), 0, count);
return result;
}
void BlobOnBlocks::read(void *target, uint64_t offset, uint64_t count) const {
ASSERT(offset <= size() && offset + count <= size(), "BlobOnBlocks::read() read outside blob. Use BlobOnBlocks::tryRead() if this should be allowed.");
uint64_t read = tryRead(target, offset, count);
ASSERT(read == count, "BlobOnBlocks::read() couldn't read all requested bytes. Use BlobOnBlocks::tryRead() if this should be allowed.");
}
uint64_t BlobOnBlocks::tryRead(void *target, uint64_t offset, uint64_t count) const {
//TODO Quite inefficient to call size() here, because that has to traverse the tree
uint64_t realCount = std::max(UINT64_C(0), std::min(count, size()-offset));
_read(target, offset, realCount);
return realCount;
}
void BlobOnBlocks::_read(void *target, uint64_t offset, uint64_t count) const {
traverseLeaves(offset, count, [target, offset] (uint64_t indexOfFirstLeafByte, const DataLeafNode *leaf, uint32_t leafDataOffset, uint32_t leafDataSize) {
//TODO Simplify formula, make it easier to understand
leaf->read((uint8_t*)target + indexOfFirstLeafByte - offset + leafDataOffset, leafDataOffset, leafDataSize);
});
}
void BlobOnBlocks::write(const void *source, uint64_t offset, uint64_t count) {
traverseLeaves(offset, count, [source, offset] (uint64_t indexOfFirstLeafByte, DataLeafNode *leaf, uint32_t leafDataOffset, uint32_t leafDataSize) {
//TODO Simplify formula, make it easier to understand
leaf->write((uint8_t*)source + indexOfFirstLeafByte - offset + leafDataOffset, leafDataOffset, leafDataSize);
});
}
void BlobOnBlocks::flush() {
_datatree->flush();
}
const Key &BlobOnBlocks::key() const {
return _datatree->key();
}
unique_ref<DataTreeRef> BlobOnBlocks::releaseTree() {
return std::move(_datatree);
}
}
}

View File

@ -0,0 +1,52 @@
#pragma once
#ifndef MESSMER_BLOBSTORE_IMPLEMENTATIONS_ONBLOCKS_BLOBONBLOCKS_H_
#define MESSMER_BLOBSTORE_IMPLEMENTATIONS_ONBLOCKS_BLOBONBLOCKS_H_
#include "../../interface/Blob.h"
#include <memory>
#include <boost/optional.hpp>
namespace blobstore {
namespace onblocks {
namespace datanodestore {
class DataLeafNode;
}
namespace parallelaccessdatatreestore {
class DataTreeRef;
}
class BlobOnBlocks final: public Blob {
public:
BlobOnBlocks(cpputils::unique_ref<parallelaccessdatatreestore::DataTreeRef> datatree);
~BlobOnBlocks();
const blockstore::Key &key() const override;
uint64_t size() const override;
void resize(uint64_t numBytes) override;
cpputils::Data readAll() const override;
void read(void *target, uint64_t offset, uint64_t size) const override;
uint64_t tryRead(void *target, uint64_t offset, uint64_t size) const override;
void write(const void *source, uint64_t offset, uint64_t size) override;
void flush() override;
cpputils::unique_ref<parallelaccessdatatreestore::DataTreeRef> releaseTree();
private:
void _read(void *target, uint64_t offset, uint64_t count) const;
void traverseLeaves(uint64_t offsetBytes, uint64_t sizeBytes, std::function<void (uint64_t, datanodestore::DataLeafNode *, uint32_t, uint32_t)>) const;
cpputils::unique_ref<parallelaccessdatatreestore::DataTreeRef> _datatree;
mutable boost::optional<uint64_t> _sizeCache;
DISALLOW_COPY_AND_ASSIGN(BlobOnBlocks);
};
}
}
#endif

View File

@ -0,0 +1,56 @@
#include "parallelaccessdatatreestore/DataTreeRef.h"
#include "parallelaccessdatatreestore/ParallelAccessDataTreeStore.h"
#include <blockstore/implementations/parallelaccess/ParallelAccessBlockStore.h>
#include "datanodestore/DataLeafNode.h"
#include "datanodestore/DataNodeStore.h"
#include "datatreestore/DataTreeStore.h"
#include "datatreestore/DataTree.h"
#include "BlobStoreOnBlocks.h"
#include "BlobOnBlocks.h"
#include <cpp-utils/pointer/cast.h>
#include <cpp-utils/assert/assert.h>
using cpputils::unique_ref;
using cpputils::make_unique_ref;
using blockstore::BlockStore;
using blockstore::parallelaccess::ParallelAccessBlockStore;
using blockstore::Key;
using cpputils::dynamic_pointer_move;
using boost::optional;
using boost::none;
namespace blobstore {
namespace onblocks {
using datanodestore::DataNodeStore;
using datatreestore::DataTreeStore;
using parallelaccessdatatreestore::ParallelAccessDataTreeStore;
BlobStoreOnBlocks::BlobStoreOnBlocks(unique_ref<BlockStore> blockStore, uint32_t blocksizeBytes)
: _dataTreeStore(make_unique_ref<ParallelAccessDataTreeStore>(make_unique_ref<DataTreeStore>(make_unique_ref<DataNodeStore>(make_unique_ref<ParallelAccessBlockStore>(std::move(blockStore)), blocksizeBytes)))) {
}
BlobStoreOnBlocks::~BlobStoreOnBlocks() {
}
unique_ref<Blob> BlobStoreOnBlocks::create() {
return make_unique_ref<BlobOnBlocks>(_dataTreeStore->createNewTree());
}
optional<unique_ref<Blob>> BlobStoreOnBlocks::load(const Key &key) {
auto tree = _dataTreeStore->load(key);
if (tree == none) {
return none;
}
return optional<unique_ref<Blob>>(make_unique_ref<BlobOnBlocks>(std::move(*tree)));
}
void BlobStoreOnBlocks::remove(unique_ref<Blob> blob) {
auto _blob = dynamic_pointer_move<BlobOnBlocks>(blob);
ASSERT(_blob != none, "Passed Blob in BlobStoreOnBlocks::remove() is not a BlobOnBlocks.");
_dataTreeStore->remove((*_blob)->releaseTree());
}
}
}

View File

@ -0,0 +1,35 @@
#pragma once
#ifndef MESSMER_BLOBSTORE_IMPLEMENTATIONS_BLOCKED_BLOBSTOREONBLOCKS_H_
#define MESSMER_BLOBSTORE_IMPLEMENTATIONS_BLOCKED_BLOBSTOREONBLOCKS_H_
#include "../../interface/BlobStore.h"
#include <blockstore/interface/BlockStore.h>
namespace blobstore {
namespace onblocks {
namespace parallelaccessdatatreestore {
class ParallelAccessDataTreeStore;
}
//TODO Make blobstore able to cope with incomplete data (some blocks missing, because they're not synchronized yet) and write test cases for that
class BlobStoreOnBlocks final: public BlobStore {
public:
BlobStoreOnBlocks(cpputils::unique_ref<blockstore::BlockStore> blockStore, uint32_t blocksizeBytes);
~BlobStoreOnBlocks();
cpputils::unique_ref<Blob> create() override;
boost::optional<cpputils::unique_ref<Blob>> load(const blockstore::Key &key) override;
void remove(cpputils::unique_ref<Blob> blob) override;
private:
cpputils::unique_ref<parallelaccessdatatreestore::ParallelAccessDataTreeStore> _dataTreeStore;
DISALLOW_COPY_AND_ASSIGN(BlobStoreOnBlocks);
};
}
}
#endif

View File

@ -0,0 +1,87 @@
#include "DataInnerNode.h"
#include "DataNodeStore.h"
#include <cpp-utils/assert/assert.h>
using blockstore::Block;
using cpputils::Data;
using cpputils::unique_ref;
using cpputils::make_unique_ref;
using blockstore::Key;
namespace blobstore {
namespace onblocks {
namespace datanodestore {
DataInnerNode::DataInnerNode(DataNodeView view)
: DataNode(std::move(view)) {
ASSERT(depth() > 0, "Inner node can't have depth 0. Is this a leaf maybe?");
}
DataInnerNode::~DataInnerNode() {
}
unique_ref<DataInnerNode> DataInnerNode::InitializeNewNode(unique_ref<Block> block, const DataNode &first_child) {
DataNodeView node(std::move(block));
node.setDepth(first_child.depth() + 1);
node.setSize(1);
auto result = make_unique_ref<DataInnerNode>(std::move(node));
result->ChildrenBegin()->setKey(first_child.key());
return result;
}
uint32_t DataInnerNode::numChildren() const {
return node().Size();
}
DataInnerNode::ChildEntry *DataInnerNode::ChildrenBegin() {
return const_cast<ChildEntry*>(const_cast<const DataInnerNode*>(this)->ChildrenBegin());
}
const DataInnerNode::ChildEntry *DataInnerNode::ChildrenBegin() const {
return node().DataBegin<ChildEntry>();
}
DataInnerNode::ChildEntry *DataInnerNode::ChildrenEnd() {
return const_cast<ChildEntry*>(const_cast<const DataInnerNode*>(this)->ChildrenEnd());
}
const DataInnerNode::ChildEntry *DataInnerNode::ChildrenEnd() const {
return ChildrenBegin() + node().Size();
}
DataInnerNode::ChildEntry *DataInnerNode::LastChild() {
return const_cast<ChildEntry*>(const_cast<const DataInnerNode*>(this)->LastChild());
}
const DataInnerNode::ChildEntry *DataInnerNode::LastChild() const {
return getChild(numChildren()-1);
}
DataInnerNode::ChildEntry *DataInnerNode::getChild(unsigned int index) {
return const_cast<ChildEntry*>(const_cast<const DataInnerNode*>(this)->getChild(index));
}
const DataInnerNode::ChildEntry *DataInnerNode::getChild(unsigned int index) const {
ASSERT(index < numChildren(), "Accessing child out of range");
return ChildrenBegin()+index;
}
void DataInnerNode::addChild(const DataNode &child) {
ASSERT(numChildren() < maxStoreableChildren(), "Adding more children than we can store");
ASSERT(child.depth() == depth()-1, "The child that should be added has wrong depth");
node().setSize(node().Size()+1);
LastChild()->setKey(child.key());
}
void DataInnerNode::removeLastChild() {
ASSERT(node().Size() > 1, "There is no child to remove");
node().setSize(node().Size()-1);
}
uint32_t DataInnerNode::maxStoreableChildren() const {
return node().layout().maxChildrenPerInnerNode();
}
}
}
}

View File

@ -0,0 +1,49 @@
#pragma once
#ifndef MESSMER_BLOBSTORE_IMPLEMENTATIONS_ONBLOCKS_DATANODESTORE_DATAINNERNODE_H_
#define MESSMER_BLOBSTORE_IMPLEMENTATIONS_ONBLOCKS_DATANODESTORE_DATAINNERNODE_H_
#include "DataNode.h"
#include "DataInnerNode_ChildEntry.h"
namespace blobstore {
namespace onblocks {
namespace datanodestore {
class DataInnerNode final: public DataNode {
public:
static cpputils::unique_ref<DataInnerNode> InitializeNewNode(cpputils::unique_ref<blockstore::Block> block, const DataNode &first_child_key);
DataInnerNode(DataNodeView block);
~DataInnerNode();
using ChildEntry = DataInnerNode_ChildEntry;
uint32_t maxStoreableChildren() const;
ChildEntry *getChild(unsigned int index);
const ChildEntry *getChild(unsigned int index) const;
uint32_t numChildren() const;
void addChild(const DataNode &child_key);
void removeLastChild();
ChildEntry *LastChild();
const ChildEntry *LastChild() const;
private:
ChildEntry *ChildrenBegin();
ChildEntry *ChildrenEnd();
const ChildEntry *ChildrenBegin() const;
const ChildEntry *ChildrenEnd() const;
DISALLOW_COPY_AND_ASSIGN(DataInnerNode);
};
}
}
}
#endif

View File

@ -0,0 +1,30 @@
#pragma once
#ifndef MESSMER_BLOBSTORE_IMPLEMENTATIONS_ONBLOCKS_DATANODESTORE_DATAINNERNODE_CHILDENTRY_H_
#define MESSMER_BLOBSTORE_IMPLEMENTATIONS_ONBLOCKS_DATANODESTORE_DATAINNERNODE_CHILDENTRY_H_
#include <cpp-utils/macros.h>
namespace blobstore{
namespace onblocks{
namespace datanodestore{
struct DataInnerNode_ChildEntry final {
public:
blockstore::Key key() const {
return blockstore::Key::FromBinary(_keydata);
}
private:
void setKey(const blockstore::Key &key) {
key.ToBinary(_keydata);
}
friend class DataInnerNode;
uint8_t _keydata[blockstore::Key::BINARY_LENGTH];
DISALLOW_COPY_AND_ASSIGN(DataInnerNode_ChildEntry);
};
}
}
}
#endif

View File

@ -0,0 +1,67 @@
#include "DataLeafNode.h"
#include "DataInnerNode.h"
#include <cpp-utils/assert/assert.h>
using blockstore::Block;
using cpputils::Data;
using blockstore::Key;
using cpputils::unique_ref;
using cpputils::make_unique_ref;
namespace blobstore {
namespace onblocks {
namespace datanodestore {
DataLeafNode::DataLeafNode(DataNodeView view)
: DataNode(std::move(view)) {
ASSERT(node().Depth() == 0, "Leaf node must have depth 0. Is it an inner node instead?");
ASSERT(numBytes() <= maxStoreableBytes(), "Leaf says it stores more bytes than it has space for");
}
DataLeafNode::~DataLeafNode() {
}
unique_ref<DataLeafNode> DataLeafNode::InitializeNewNode(unique_ref<Block> block) {
DataNodeView node(std::move(block));
node.setDepth(0);
node.setSize(0);
//fillDataWithZeroes(); not needed, because a newly created block will be zeroed out. DataLeafNodeTest.SpaceIsZeroFilledWhenGrowing ensures this.
return make_unique_ref<DataLeafNode>(std::move(node));
}
void DataLeafNode::read(void *target, uint64_t offset, uint64_t size) const {
ASSERT(offset <= node().Size() && offset + size <= node().Size(), "Read out of valid area"); // Also check offset, because the addition could lead to overflows
std::memcpy(target, (uint8_t*)node().data() + offset, size);
}
void DataLeafNode::write(const void *source, uint64_t offset, uint64_t size) {
ASSERT(offset <= node().Size() && offset + size <= node().Size(), "Write out of valid area"); // Also check offset, because the addition could lead to overflows
node().write(source, offset, size);
}
uint32_t DataLeafNode::numBytes() const {
return node().Size();
}
void DataLeafNode::resize(uint32_t new_size) {
ASSERT(new_size <= maxStoreableBytes(), "Trying to resize to a size larger than the maximal size");
uint32_t old_size = node().Size();
if (new_size < old_size) {
fillDataWithZeroesFromTo(new_size, old_size);
}
node().setSize(new_size);
}
void DataLeafNode::fillDataWithZeroesFromTo(off_t begin, off_t end) {
Data ZEROES(end-begin);
ZEROES.FillWithZeroes();
node().write(ZEROES.data(), begin, end-begin);
}
uint64_t DataLeafNode::maxStoreableBytes() const {
return node().layout().maxBytesPerLeaf();
}
}
}
}

View File

@ -0,0 +1,39 @@
#pragma once
#ifndef MESSMER_BLOBSTORE_IMPLEMENTATIONS_ONBLOCKS_DATANODESTORE_DATALEAFNODE_H_
#define MESSMER_BLOBSTORE_IMPLEMENTATIONS_ONBLOCKS_DATANODESTORE_DATALEAFNODE_H_
#include "DataNode.h"
namespace blobstore {
namespace onblocks {
namespace datanodestore {
class DataInnerNode;
class DataLeafNode final: public DataNode {
public:
static cpputils::unique_ref<DataLeafNode> InitializeNewNode(cpputils::unique_ref<blockstore::Block> block);
DataLeafNode(DataNodeView block);
~DataLeafNode();
//Returning uint64_t, because calculations handling this probably need to be done in 64bit to support >4GB blobs.
uint64_t maxStoreableBytes() const;
void read(void *target, uint64_t offset, uint64_t size) const;
void write(const void *source, uint64_t offset, uint64_t size);
uint32_t numBytes() const;
void resize(uint32_t size);
private:
void fillDataWithZeroesFromTo(off_t begin, off_t end);
DISALLOW_COPY_AND_ASSIGN(DataLeafNode);
};
}
}
}
#endif

View File

@ -0,0 +1,54 @@
#include "DataInnerNode.h"
#include "DataLeafNode.h"
#include "DataNode.h"
#include "DataNodeStore.h"
#include <blockstore/utils/BlockStoreUtils.h>
using blockstore::Block;
using blockstore::Key;
using std::runtime_error;
using cpputils::unique_ref;
namespace blobstore {
namespace onblocks {
namespace datanodestore {
DataNode::DataNode(DataNodeView node)
: _node(std::move(node)) {
}
DataNode::~DataNode() {
}
DataNodeView &DataNode::node() {
return const_cast<DataNodeView&>(const_cast<const DataNode*>(this)->node());
}
const DataNodeView &DataNode::node() const {
return _node;
}
const Key &DataNode::key() const {
return _node.key();
}
uint8_t DataNode::depth() const {
return _node.Depth();
}
unique_ref<DataInnerNode> DataNode::convertToNewInnerNode(unique_ref<DataNode> node, const DataNode &first_child) {
Key key = node->key();
auto block = node->_node.releaseBlock();
blockstore::utils::fillWithZeroes(block.get());
return DataInnerNode::InitializeNewNode(std::move(block), first_child);
}
void DataNode::flush() const {
_node.flush();
}
}
}
}

View File

@ -0,0 +1,44 @@
#pragma once
#ifndef MESSMER_BLOBSTORE_IMPLEMENTATIONS_ONBLOCKS_DATANODESTORE_DATANODE_H_
#define MESSMER_BLOBSTORE_IMPLEMENTATIONS_ONBLOCKS_DATANODESTORE_DATANODE_H_
#include "DataNodeView.h"
#include <cpp-utils/data/Data.h>
namespace blobstore {
namespace onblocks {
namespace datanodestore {
class DataNodeStore;
class DataInnerNode;
class DataNode {
public:
virtual ~DataNode();
const blockstore::Key &key() const;
uint8_t depth() const;
static cpputils::unique_ref<DataInnerNode> convertToNewInnerNode(cpputils::unique_ref<DataNode> node, const DataNode &first_child);
void flush() const;
protected:
DataNode(DataNodeView block);
DataNodeView &node();
const DataNodeView &node() const;
friend class DataNodeStore;
private:
DataNodeView _node;
DISALLOW_COPY_AND_ASSIGN(DataNode);
};
}
}
}
#endif

View File

@ -0,0 +1,113 @@
#include "DataInnerNode.h"
#include "DataLeafNode.h"
#include "DataNodeStore.h"
#include <blockstore/interface/BlockStore.h>
#include <blockstore/interface/Block.h>
#include <blockstore/utils/BlockStoreUtils.h>
#include <cpp-utils/assert/assert.h>
using blockstore::BlockStore;
using blockstore::Block;
using blockstore::Key;
using cpputils::Data;
using cpputils::unique_ref;
using cpputils::make_unique_ref;
using std::runtime_error;
using boost::optional;
using boost::none;
namespace blobstore {
namespace onblocks {
namespace datanodestore {
DataNodeStore::DataNodeStore(unique_ref<BlockStore> blockstore, uint32_t blocksizeBytes)
: _blockstore(std::move(blockstore)), _layout(blocksizeBytes) {
}
DataNodeStore::~DataNodeStore() {
}
unique_ref<DataNode> DataNodeStore::load(unique_ref<Block> block) {
ASSERT(block->size() == _layout.blocksizeBytes(), "Loading block of wrong size");
DataNodeView node(std::move(block));
if (node.Depth() == 0) {
return make_unique_ref<DataLeafNode>(std::move(node));
} else if (node.Depth() <= MAX_DEPTH) {
return make_unique_ref<DataInnerNode>(std::move(node));
} else {
throw runtime_error("Tree is to deep. Data corruption?");
}
}
unique_ref<DataInnerNode> DataNodeStore::createNewInnerNode(const DataNode &first_child) {
ASSERT(first_child.node().layout().blocksizeBytes() == _layout.blocksizeBytes(), "Source node has wrong layout. Is it from the same DataNodeStore?");
//TODO Initialize block and then create it in the blockstore - this is more efficient than creating it and then writing to it
auto block = _blockstore->create(Data(_layout.blocksizeBytes()).FillWithZeroes());
return DataInnerNode::InitializeNewNode(std::move(block), first_child);
}
unique_ref<DataLeafNode> DataNodeStore::createNewLeafNode() {
//TODO Initialize block and then create it in the blockstore - this is more efficient than creating it and then writing to it
auto block = _blockstore->create(Data(_layout.blocksizeBytes()).FillWithZeroes());
return DataLeafNode::InitializeNewNode(std::move(block));
}
optional<unique_ref<DataNode>> DataNodeStore::load(const Key &key) {
auto block = _blockstore->load(key);
if (block == none) {
return none;
} else {
return load(std::move(*block));
}
}
unique_ref<DataNode> DataNodeStore::createNewNodeAsCopyFrom(const DataNode &source) {
ASSERT(source.node().layout().blocksizeBytes() == _layout.blocksizeBytes(), "Source node has wrong layout. Is it from the same DataNodeStore?");
auto newBlock = blockstore::utils::copyToNewBlock(_blockstore.get(), source.node().block());
return load(std::move(newBlock));
}
unique_ref<DataNode> DataNodeStore::overwriteNodeWith(unique_ref<DataNode> target, const DataNode &source) {
ASSERT(target->node().layout().blocksizeBytes() == _layout.blocksizeBytes(), "Target node has wrong layout. Is it from the same DataNodeStore?");
ASSERT(source.node().layout().blocksizeBytes() == _layout.blocksizeBytes(), "Source node has wrong layout. Is it from the same DataNodeStore?");
Key key = target->key();
{
auto targetBlock = target->node().releaseBlock();
cpputils::destruct(std::move(target)); // Call destructor
blockstore::utils::copyTo(targetBlock.get(), source.node().block());
}
auto loaded = load(key);
ASSERT(loaded != none, "Couldn't load the target node after overwriting it");
return std::move(*loaded);
}
void DataNodeStore::remove(unique_ref<DataNode> node) {
auto block = node->node().releaseBlock();
cpputils::destruct(std::move(node)); // Call destructor
_blockstore->remove(std::move(block));
}
uint64_t DataNodeStore::numNodes() const {
return