Merge from next
This commit is contained in:
commit
1a7625ea9b
@ -7,7 +7,7 @@ references:
|
||||
run:
|
||||
name: Initialize Cache
|
||||
command: |
|
||||
echo "${APT_COMPILER_PACKAGE}_${BUILD_TOOLSET}_${CXX}_${CC}" > /tmp/_build_env_vars
|
||||
echo "${APT_COMPILER_PACKAGE}_${BUILD_TOOLSET}_${CXX}_${CC}_${BUILD_TYPE}_${CXXFLAGS}" > /tmp/_build_env_vars
|
||||
echo Build env vars used for cache keys:
|
||||
cat /tmp/_build_env_vars
|
||||
container_setup_pre: &container_setup_pre
|
||||
@ -49,7 +49,7 @@ references:
|
||||
sudo chmod o-w /etc/apt/sources.list.d/clang.list
|
||||
|
||||
DEBIAN_FRONTEND=noninteractive sudo apt-get update -qq
|
||||
DEBIAN_FRONTEND=noninteractive sudo apt-get install -y git ccache $APT_COMPILER_PACKAGE cmake make libcurl4-openssl-dev libboost-filesystem-dev libboost-system-dev libboost-chrono-dev libboost-program-options-dev libboost-thread-dev libcrypto++-dev libssl-dev libfuse-dev python
|
||||
DEBIAN_FRONTEND=noninteractive sudo apt-get install -y git ccache $APT_COMPILER_PACKAGE cmake3 make libcurl4-openssl-dev libssl-dev libfuse-dev python
|
||||
# Use /dev/urandom when /dev/random is accessed to use less entropy
|
||||
sudo cp -a /dev/urandom /dev/random
|
||||
|
||||
@ -150,26 +150,34 @@ references:
|
||||
ccache --max-size=512M
|
||||
ccache --show-stats
|
||||
|
||||
# Disable OpenMP if it is clang, because Ubuntu 14.04 doesn't have the libomp-dev package needed to support OpenMP for clang.
|
||||
if [[ ${APT_COMPILER_PACKAGE} == clang* ]]; then
|
||||
OPENMP_PARAMS="-DDISABLE_OPENMP=ON"
|
||||
else
|
||||
OPENMP_PARAMS=""
|
||||
fi
|
||||
|
||||
# Build
|
||||
mkdir cmake
|
||||
cd cmake
|
||||
cmake .. -DBUILD_TESTING=on -DCMAKE_BUILD_TYPE=Debug
|
||||
cmake .. -DBUILD_TESTING=on -DCMAKE_BUILD_TYPE=${BUILD_TYPE} ${OPENMP_PARAMS}
|
||||
make -j$NUMCORES
|
||||
|
||||
ccache --show-stats
|
||||
test: &test
|
||||
run:
|
||||
name: Test
|
||||
no_output_timeout: 120m
|
||||
command: |
|
||||
cd cmake
|
||||
./test/gitversion/gitversion-test
|
||||
./test/cpp-utils/cpp-utils-test
|
||||
./test/fspp/fspp-test
|
||||
if [ ! "$DISABLE_BROKEN_TSAN_TESTS" = true ] ; then ./test/cpp-utils/cpp-utils-test ; fi
|
||||
if [ ! "$DISABLE_BROKEN_TSAN_TESTS" = true ] && [ ! "$DISABLE_BROKEN_ASAN_TESTS" = true ] ; then ./test/fspp/fspp-test ; fi
|
||||
./test/parallelaccessstore/parallelaccessstore-test
|
||||
./test/blockstore/blockstore-test
|
||||
./test/blobstore/blobstore-test
|
||||
./test/cryfs/cryfs-test
|
||||
./test/cryfs-cli/cryfs-cli-test
|
||||
if [ ! "$DISABLE_BROKEN_TSAN_TESTS" = true ] ; then ./test/cryfs/cryfs-test ; fi
|
||||
if [ ! "$DISABLE_BROKEN_TSAN_TESTS" = true ] ; then ./test/cryfs-cli/cryfs-cli-test ; fi
|
||||
job_definition: &job_definition
|
||||
<<: *container_config
|
||||
steps:
|
||||
@ -191,82 +199,156 @@ references:
|
||||
only: /.*/
|
||||
|
||||
jobs:
|
||||
gcc_4_8:
|
||||
<<: *job_definition
|
||||
environment:
|
||||
CC: gcc-4.8
|
||||
CXX: g++-4.8
|
||||
BUILD_TOOLSET: gcc
|
||||
APT_COMPILER_PACKAGE: "g++-4.8"
|
||||
gcc_5:
|
||||
gcc_5_debug:
|
||||
<<: *job_definition
|
||||
environment:
|
||||
CC: gcc-5
|
||||
CXX: g++-5
|
||||
BUILD_TOOLSET: gcc
|
||||
APT_COMPILER_PACKAGE: "g++-5"
|
||||
gcc_6:
|
||||
CXXFLAGS: ""
|
||||
BUILD_TYPE: "Debug"
|
||||
gcc_5_release:
|
||||
<<: *job_definition
|
||||
environment:
|
||||
CC: gcc-5
|
||||
CXX: g++-5
|
||||
BUILD_TOOLSET: gcc
|
||||
APT_COMPILER_PACKAGE: "g++-5"
|
||||
CXXFLAGS: ""
|
||||
BUILD_TYPE: "Release"
|
||||
gcc_6_debug:
|
||||
<<: *job_definition
|
||||
environment:
|
||||
CC: gcc-6
|
||||
CXX: g++-6
|
||||
BUILD_TOOLSET: gcc
|
||||
APT_COMPILER_PACKAGE: "g++-6"
|
||||
gcc_7:
|
||||
CXXFLAGS: ""
|
||||
BUILD_TYPE: "Debug"
|
||||
gcc_6_release:
|
||||
<<: *job_definition
|
||||
environment:
|
||||
CC: gcc-6
|
||||
CXX: g++-6
|
||||
BUILD_TOOLSET: gcc
|
||||
APT_COMPILER_PACKAGE: "g++-6"
|
||||
CXXFLAGS: ""
|
||||
BUILD_TYPE: "Release"
|
||||
gcc_7_debug:
|
||||
<<: *job_definition
|
||||
environment:
|
||||
CC: gcc-7
|
||||
CXX: g++-7
|
||||
BUILD_TOOLSET: gcc
|
||||
APT_COMPILER_PACKAGE: "g++-7"
|
||||
clang_3_7:
|
||||
CXXFLAGS: ""
|
||||
BUILD_TYPE: "Debug"
|
||||
gcc_7_release:
|
||||
<<: *job_definition
|
||||
environment:
|
||||
CC: clang-3.7
|
||||
CXX: clang++-3.7
|
||||
BUILD_TOOLSET: clang
|
||||
APT_COMPILER_PACKAGE: clang-3.7
|
||||
clang_3_8:
|
||||
<<: *job_definition
|
||||
environment:
|
||||
CC: clang-3.8
|
||||
CXX: clang++-3.8
|
||||
BUILD_TOOLSET: clang
|
||||
APT_COMPILER_PACKAGE: clang-3.8
|
||||
clang_4_0:
|
||||
CC: gcc-7
|
||||
CXX: g++-7
|
||||
BUILD_TOOLSET: gcc
|
||||
APT_COMPILER_PACKAGE: "g++-7"
|
||||
CXXFLAGS: ""
|
||||
BUILD_TYPE: "Release"
|
||||
clang_4_0_debug:
|
||||
<<: *job_definition
|
||||
environment:
|
||||
CC: clang-4.0
|
||||
CXX: clang++-4.0
|
||||
BUILD_TOOLSET: clang
|
||||
APT_COMPILER_PACKAGE: clang-4.0
|
||||
clang_5_0:
|
||||
CXXFLAGS: ""
|
||||
BUILD_TYPE: "Debug"
|
||||
clang_4_0_release:
|
||||
<<: *job_definition
|
||||
environment:
|
||||
CC: clang-4.0
|
||||
CXX: clang++-4.0
|
||||
BUILD_TOOLSET: clang
|
||||
APT_COMPILER_PACKAGE: clang-4.0
|
||||
CXXFLAGS: ""
|
||||
BUILD_TYPE: "Release"
|
||||
clang_5_0_debug:
|
||||
<<: *job_definition
|
||||
environment:
|
||||
CC: clang-5.0
|
||||
CXX: clang++-5.0
|
||||
BUILD_TOOLSET: clang
|
||||
APT_COMPILER_PACKAGE: clang-5.0
|
||||
CXXFLAGS: ""
|
||||
BUILD_TYPE: "Debug"
|
||||
clang_5_0_release:
|
||||
<<: *job_definition
|
||||
environment:
|
||||
CC: clang-5.0
|
||||
CXX: clang++-5.0
|
||||
BUILD_TOOLSET: clang
|
||||
APT_COMPILER_PACKAGE: clang-5.0
|
||||
CXXFLAGS: ""
|
||||
BUILD_TYPE: "Release"
|
||||
no_compatibility:
|
||||
<<: *job_definition
|
||||
environment:
|
||||
CC: clang-5.0
|
||||
CXX: clang++-5.0
|
||||
BUILD_TOOLSET: clang
|
||||
APT_COMPILER_PACKAGE: clang-5.0
|
||||
CXXFLAGS: "-DCRYFS_NO_COMPATIBILITY"
|
||||
BUILD_TYPE: "Debug"
|
||||
address_sanitizer:
|
||||
<<: *job_definition
|
||||
environment:
|
||||
CC: clang-5.0
|
||||
CXX: clang++-5.0
|
||||
BUILD_TOOLSET: clang
|
||||
APT_COMPILER_PACKAGE: clang-5.0
|
||||
CXXFLAGS: "-O2 -fsanitize=address -fno-omit-frame-pointer -fno-common -fsanitize-address-use-after-scope"
|
||||
BUILD_TYPE: "Debug"
|
||||
# Note: Leak detection is disabled because libfuse itself is leaky...
|
||||
ASAN_OPTIONS: "detect_leaks=0 check_initialization_order=1 detect_stack_use_after_return=1 detect_invalid_pointer_pairs=1 atexit=1"
|
||||
DISABLE_BROKEN_ASAN_TESTS: true
|
||||
thread_sanitizer:
|
||||
<<: *job_definition
|
||||
environment:
|
||||
CC: clang-5.0
|
||||
CXX: clang++-5.0
|
||||
BUILD_TOOLSET: clang
|
||||
APT_COMPILER_PACKAGE: clang-5.0
|
||||
CXXFLAGS: "-O2 -fsanitize=thread -fno-omit-frame-pointer"
|
||||
BUILD_TYPE: "Debug"
|
||||
DISABLE_BROKEN_TSAN_TESTS: true
|
||||
|
||||
workflows:
|
||||
version: 2
|
||||
|
||||
build_and_test:
|
||||
jobs:
|
||||
- gcc_4_8:
|
||||
- gcc_5_debug:
|
||||
<<: *enable_for_tags
|
||||
- gcc_5:
|
||||
- gcc_5_release:
|
||||
<<: *enable_for_tags
|
||||
- gcc_6:
|
||||
- gcc_6_debug:
|
||||
<<: *enable_for_tags
|
||||
- gcc_7:
|
||||
- gcc_6_release:
|
||||
<<: *enable_for_tags
|
||||
- clang_3_7:
|
||||
- gcc_7_debug:
|
||||
<<: *enable_for_tags
|
||||
- clang_3_8:
|
||||
- gcc_7_release:
|
||||
<<: *enable_for_tags
|
||||
- clang_4_0:
|
||||
- clang_4_0_debug:
|
||||
<<: *enable_for_tags
|
||||
- clang_5_0:
|
||||
- clang_4_0_release:
|
||||
<<: *enable_for_tags
|
||||
- clang_5_0_debug:
|
||||
<<: *enable_for_tags
|
||||
- clang_5_0_release:
|
||||
<<: *enable_for_tags
|
||||
- no_compatibility:
|
||||
<<: *enable_for_tags
|
||||
- address_sanitizer:
|
||||
<<: *enable_for_tags
|
||||
- thread_sanitizer:
|
||||
<<: *enable_for_tags
|
||||
|
||||
|
50
.clang-tidy
Normal file
50
.clang-tidy
Normal file
@ -0,0 +1,50 @@
|
||||
---
|
||||
# TODO Enable (some of) the explicitly disabled checks. Possibly needs helper types from gsl library or similar to enable full cppcoreguidelines.
|
||||
# TODO Enable more checks (google-*, hicpp-*, llvm-*, modernize-*, mpi-*, performance-*, readability-*)
|
||||
# TODO Maybe just enable * and disable a list instead?
|
||||
Checks: |
|
||||
clang-diagnostic-*,
|
||||
clang-analyzer-*,
|
||||
bugprone-*,
|
||||
cert-*,
|
||||
cppcoreguidelines-*,
|
||||
misc-*,
|
||||
boost-use-to-string,
|
||||
-cert-env33-c,
|
||||
-cert-err58-cpp,
|
||||
-cppcoreguidelines-owning-memory,
|
||||
-cppcoreguidelines-no-malloc,
|
||||
-cppcoreguidelines-pro-type-const-cast,
|
||||
-cppcoreguidelines-pro-bounds-pointer-arithmetic,
|
||||
-cppcoreguidelines-pro-type-reinterpret-cast,
|
||||
-cppcoreguidelines-special-member-functions,
|
||||
-cppcoreguidelines-pro-type-cstyle-cast,
|
||||
-cppcoreguidelines-pro-bounds-array-to-pointer-decay,
|
||||
-cppcoreguidelines-pro-type-vararg,
|
||||
-misc-macro-parentheses,
|
||||
-misc-unused-raii
|
||||
WarningsAsErrors: ''
|
||||
HeaderFilterRegex: '/src/|/test/'
|
||||
CheckOptions:
|
||||
- key: google-readability-braces-around-statements.ShortStatementLines
|
||||
value: '1'
|
||||
- key: google-readability-function-size.StatementThreshold
|
||||
value: '800'
|
||||
- key: google-readability-namespace-comments.ShortNamespaceLines
|
||||
value: '10'
|
||||
- key: google-readability-namespace-comments.SpacesBeforeComments
|
||||
value: '2'
|
||||
- key: modernize-loop-convert.MaxCopySize
|
||||
value: '16'
|
||||
- key: modernize-loop-convert.MinConfidence
|
||||
value: reasonable
|
||||
- key: modernize-loop-convert.NamingStyle
|
||||
value: CamelCase
|
||||
- key: modernize-pass-by-value.IncludeStyle
|
||||
value: llvm
|
||||
- key: modernize-replace-auto-ptr.IncludeStyle
|
||||
value: llvm
|
||||
- key: modernize-use-nullptr.NullMacros
|
||||
value: 'NULL'
|
||||
...
|
||||
|
1
.gitignore
vendored
1
.gitignore
vendored
@ -2,6 +2,7 @@ umltest.inner.sh
|
||||
umltest.status
|
||||
/build
|
||||
/cmake
|
||||
/cmake-build-*
|
||||
/.idea
|
||||
*~
|
||||
|
||||
|
34
.travis.yml
34
.travis.yml
@ -5,7 +5,7 @@ compiler:
|
||||
- gcc
|
||||
- clang
|
||||
os:
|
||||
- linux
|
||||
#- linux
|
||||
- osx
|
||||
addons:
|
||||
apt:
|
||||
@ -18,34 +18,6 @@ addons:
|
||||
- libcrypto++-dev
|
||||
- libfuse-dev
|
||||
install:
|
||||
# Use new clang
|
||||
- if [ "${TRAVIS_OS_NAME}" == "linux" ] && [ "$CXX" = "clang++" ]; then export CXX="clang++-3.7" CC="clang-3.7"; fi
|
||||
# Detect number of CPU cores
|
||||
- export NUMCORES=`grep -c ^processor /proc/cpuinfo` && if [ ! -n "$NUMCORES" ]; then export NUMCORES=`sysctl -n hw.ncpu`; fi
|
||||
- echo Using $NUMCORES cores
|
||||
# Install dependencies
|
||||
- if [ "${TRAVIS_OS_NAME}" == "linux" ]; then ./travis.install_boost.sh; fi
|
||||
- if [ "${TRAVIS_OS_NAME}" == "osx" ]; then brew cask install osxfuse && brew install cryptopp; fi
|
||||
# Install run_with_fuse.sh
|
||||
- mkdir cmake
|
||||
- cd cmake
|
||||
- wget https://raw.githubusercontent.com/smessmer/travis-utils/master/run_with_fuse.sh
|
||||
- chmod +x run_with_fuse.sh
|
||||
- cmake --version
|
||||
# Use /dev/urandom when /dev/random is accessed, because travis doesn't have enough entropy
|
||||
- if [ "${TRAVIS_OS_NAME}" == "linux" ]; then sudo cp -a /dev/urandom /dev/random; fi
|
||||
- .travisci/install.sh
|
||||
script:
|
||||
- cmake .. -DBUILD_TESTING=on -DCMAKE_BUILD_TYPE=Debug
|
||||
- make -j$NUMCORES
|
||||
- ./test/gitversion/gitversion-test
|
||||
- ./test/cpp-utils/cpp-utils-test
|
||||
# TODO Also run on osx once fixed
|
||||
- if [ "${TRAVIS_OS_NAME}" == "linux" ]; then ./run_with_fuse.sh ./test/fspp/fspp-test || exit 1; fi
|
||||
- ./test/parallelaccessstore/parallelaccessstore-test
|
||||
- ./test/blockstore/blockstore-test
|
||||
- ./test/blobstore/blobstore-test
|
||||
# TODO Also run on osx once fixed
|
||||
- if [ "${TRAVIS_OS_NAME}" == "linux" ]; then ./test/cryfs/cryfs-test || exit 1; fi
|
||||
- if [ "${TRAVIS_OS_NAME}" == "linux" ]; then ./test/cryfs-cli/cryfs-cli-test || exit 1; fi
|
||||
after_script:
|
||||
- rm run_with_fuse.sh
|
||||
- .travisci/build_and_test.sh
|
||||
|
33
.travisci/build_and_test.sh
Executable file
33
.travisci/build_and_test.sh
Executable file
@ -0,0 +1,33 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -e
|
||||
|
||||
# Detect number of CPU cores
|
||||
export NUMCORES=`grep -c ^processor /proc/cpuinfo`
|
||||
if [ ! -n "$NUMCORES" ]; then
|
||||
export NUMCORES=`sysctl -n hw.ncpu`
|
||||
fi
|
||||
echo Using $NUMCORES cores
|
||||
|
||||
# Setup target directory
|
||||
mkdir cmake
|
||||
cd cmake
|
||||
cmake --version
|
||||
|
||||
# Build
|
||||
cmake .. -DBUILD_TESTING=on -DCMAKE_BUILD_TYPE=Debug
|
||||
make -j$NUMCORES
|
||||
|
||||
# Test
|
||||
./test/gitversion/gitversion-test
|
||||
./test/cpp-utils/cpp-utils-test
|
||||
./test/parallelaccessstore/parallelaccessstore-test
|
||||
./test/blockstore/blockstore-test
|
||||
./test/blobstore/blobstore-test
|
||||
./test/cryfs/cryfs-test
|
||||
|
||||
# TODO Also run on osx once fixed
|
||||
if [ "${TRAVIS_OS_NAME}" == "linux" ]; then
|
||||
./test/fspp/fspp-test
|
||||
./test/cryfs-cli/cryfs-cli-test
|
||||
fi
|
31
.travisci/install.sh
Executable file
31
.travisci/install.sh
Executable file
@ -0,0 +1,31 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -e
|
||||
|
||||
# Use new clang on linux
|
||||
if [ "${TRAVIS_OS_NAME}" == "linux" ] && [ "$CXX" = "clang++" ]; then
|
||||
export CXX="clang++-3.7" CC="clang-3.7"
|
||||
fi
|
||||
|
||||
# If using gcc on mac, actually use it ("gcc" just links to clang, but "gcc-4.8" is gcc, https://github.com/travis-ci/travis-ci/issues/2423)
|
||||
if [ "${TRAVIS_OS_NAME}" == "osx" ] && ["${CXX}" = "g++" ]; then
|
||||
export CXX="g++-4.8" CC="gcc-4.8"
|
||||
fi
|
||||
|
||||
# Install dependencies
|
||||
if [ "${TRAVIS_OS_NAME}" == "linux" ]; then
|
||||
./.travisci/install_boost.sh
|
||||
fi
|
||||
|
||||
if [ "${TRAVIS_OS_NAME}" == "osx" ]; then
|
||||
brew cask install osxfuse
|
||||
brew install libomp
|
||||
fi
|
||||
|
||||
# By default, travis only fetches the newest 50 commits. We need more in case we're further from the last version tag, so the build doesn't fail because it can't generate the version number.
|
||||
git fetch --unshallow
|
||||
|
||||
# Use /dev/urandom when /dev/random is accessed, because travis doesn't have enough entropy
|
||||
if [ "${TRAVIS_OS_NAME}" == "linux" ]; then
|
||||
sudo cp -a /dev/urandom /dev/random
|
||||
fi
|
@ -1,26 +1,33 @@
|
||||
cmake_minimum_required(VERSION 2.8 FATAL_ERROR)
|
||||
cmake_minimum_required(VERSION 3.0 FATAL_ERROR)
|
||||
cmake_policy(SET CMP0054 NEW)
|
||||
|
||||
# note: for clang-tidy, we need cmake 3.6, or (if the return code should be handled correctly, e.g. on CI), we need 3.8.
|
||||
|
||||
# TODO Perf test:
|
||||
# - try if setting CRYPTOPP_NATIVE_ARCH=ON and adding -march=native to the compile commands for cryfs source files makes a difference
|
||||
# -> if yes, offer a cmake option to enable both of these
|
||||
|
||||
project(cryfs)
|
||||
|
||||
include(utils.cmake)
|
||||
|
||||
require_gcc_version(4.8)
|
||||
require_clang_version(3.7)
|
||||
require_gcc_version(5.0)
|
||||
require_clang_version(4.0)
|
||||
|
||||
# Default value is not to build test cases
|
||||
if(NOT BUILD_TESTING)
|
||||
set(BUILD_TESTING OFF CACHE BOOL "BUILD_TESTING")
|
||||
endif(NOT BUILD_TESTING)
|
||||
option(BUILD_TESTING "build test cases" OFF)
|
||||
option(CRYFS_UPDATE_CHECKS "let cryfs check for updates and security vulnerabilities" ON)
|
||||
option(DISABLE_OPENMP "allow building without OpenMP libraries. This will cause performance degradations." OFF)
|
||||
|
||||
# Default vaule is to build in release mode
|
||||
# Default value is to build in release mode
|
||||
if(NOT CMAKE_BUILD_TYPE)
|
||||
set(CMAKE_BUILD_TYPE Release CACHE INTERNAL "CMAKE_BUILD_TYPE")
|
||||
endif(NOT CMAKE_BUILD_TYPE)
|
||||
|
||||
# Default value is to do update checks
|
||||
if(NOT CRYFS_UPDATE_CHECKS)
|
||||
set(CRYFS_UPDATE_CHECKS ON CACHE BOOL "CRYFS_UPDATE_CHECKS")
|
||||
endif(NOT CRYFS_UPDATE_CHECKS)
|
||||
# The MSVC version on AppVeyor CI needs this
|
||||
if(MSVC)
|
||||
add_definitions(/bigobj)
|
||||
endif()
|
||||
|
||||
add_subdirectory(vendor)
|
||||
add_subdirectory(src)
|
||||
|
@ -1,11 +1,29 @@
|
||||
Version 0.10.0 (unreleased)
|
||||
---------------
|
||||
New Features & Improvements:
|
||||
* Integrity checks ensure you notice when someone modifies your file system.
|
||||
* File system nodes (files, directories, symlinks) store a parent pointer to the directory that contains them. This information can be used in later versions to resolve some synchronization conflicts.
|
||||
* Allow mounting using system mount tool and /etc/fstab (e.g. mount -t fuse.cryfs basedir mountdir)
|
||||
* Performance improvements
|
||||
* Use relatime instead of strictatime (further performance improvement)
|
||||
* Pass fuse options directly to cryfs (i.e. 'cryfs basedir mountdir -o allow_other' instead of 'cryfs basedir mountdir -- -o allow_other')
|
||||
* CryFS tells the operating system to not swap the encryption key to the disk (note: this is best-effort and cannot be guaranteed. Hibernation, for example, will still write the encryption key to the disk)
|
||||
* New block size options: 4KB and 16KB
|
||||
* New default block size: 16KB
|
||||
* Increased scrypt hardness to (N=1048576, r=4, p=8) to make it harder to crack the key while allowing cryfs to take advantage of multicore machines.
|
||||
|
||||
Fixed bugs:
|
||||
* `du` shows correct file system size on Mac OS X.
|
||||
|
||||
|
||||
Version 0.9.9
|
||||
--------------
|
||||
Improvements:
|
||||
* Add --allow-filesystem-upgrade option which will upgrade old file systems without asking the user. This will be especially helpful for GUI tools.
|
||||
* Add --version option that shows the CryFS version and exits.
|
||||
* When CryFS fails to load a file system, the process stops with a helpful error code, which can be used by GUI tools to show detailed messages.
|
||||
* Only migrate a file system if the underlying storage format changed
|
||||
|
||||
|
||||
Version 0.9.8
|
||||
--------------
|
||||
Compatibility:
|
||||
|
23
README.md
23
README.md
@ -1,4 +1,5 @@
|
||||
# CryFS [![Build Status](https://travis-ci.org/cryfs/cryfs.svg?branch=master)](https://travis-ci.org/cryfs/cryfs)
|
||||
# CryFS [![Build Status](https://travis-ci.org/cryfs/cryfs.svg?branch=master)](https://travis-ci.org/cryfs/cryfs) [![CircleCI](https://circleci.com/gh/cryfs/cryfs/tree/master.svg?style=svg)](https://circleci.com/gh/cryfs/cryfs/tree/master)
|
||||
|
||||
CryFS encrypts your files, so you can safely store them anywhere. It works well together with cloud services like Dropbox, iCloud, OneDrive and others.
|
||||
See [https://www.cryfs.org](https://www.cryfs.org).
|
||||
|
||||
@ -22,8 +23,8 @@ Building from source
|
||||
Requirements
|
||||
------------
|
||||
- Git (for getting the source code)
|
||||
- GCC version >= 4.8 or Clang >= 3.7
|
||||
- CMake version >= 2.8
|
||||
- GCC version >= 5.0 or Clang >= 4.0
|
||||
- CMake version >= 3.0
|
||||
- libcurl4 (including development headers)
|
||||
- Boost libraries version >= 1.56 (including development headers)
|
||||
- filesystem
|
||||
@ -31,21 +32,21 @@ Requirements
|
||||
- chrono
|
||||
- program_options
|
||||
- thread
|
||||
- Crypto++ version >= 5.6.3 (including development headers)
|
||||
- SSL development libraries (including development headers, e.g. libssl-dev)
|
||||
- libFUSE version >= 2.8.6 (including development headers), on Mac OS X instead install osxfuse from https://osxfuse.github.io/
|
||||
- Python >= 2.7
|
||||
- OpenMP
|
||||
|
||||
You can use the following commands to install these requirements
|
||||
|
||||
# Ubuntu
|
||||
$ sudo apt-get install git g++ cmake make libcurl4-openssl-dev libboost-filesystem-dev libboost-system-dev libboost-chrono-dev libboost-program-options-dev libboost-thread-dev libcrypto++-dev libssl-dev libfuse-dev python
|
||||
$ sudo apt-get install git g++ cmake make libcurl4-openssl-dev libboost-filesystem-dev libboost-system-dev libboost-chrono-dev libboost-program-options-dev libboost-thread-dev libssl-dev libfuse-dev python
|
||||
|
||||
# Fedora
|
||||
sudo dnf install git gcc-c++ cmake make libcurl-devel boost-devel boost-static cryptopp-devel openssl-devel fuse-devel python
|
||||
sudo dnf install git gcc-c++ cmake make libcurl-devel boost-devel boost-static openssl-devel fuse-devel python
|
||||
|
||||
# Macintosh
|
||||
brew install cmake boost cryptopp openssl
|
||||
brew install cmake boost openssl libomp
|
||||
|
||||
Build & Install
|
||||
---------------
|
||||
@ -97,13 +98,7 @@ On most systems, CMake should find the libraries automatically. However, that do
|
||||
|
||||
cmake .. -DCMAKE_CXX_FLAGS="-I/path/to/fuse/or/osxfuse/headers"
|
||||
|
||||
4. **CryptoPP library not found**
|
||||
|
||||
Pass in the library path with
|
||||
|
||||
cmake .. -DCRYPTOPP_LIB_PATH=/path/to/cryptopp
|
||||
|
||||
5. **Openssl headers not found**
|
||||
4. **Openssl headers not found**
|
||||
|
||||
Pass in the include path with
|
||||
|
||||
|
1
TODO-0.10.txt
Normal file
1
TODO-0.10.txt
Normal file
@ -0,0 +1 @@
|
||||
Change homebrew recipe to "brew install libomp" but not "brew install cryptopp"
|
@ -1,15 +1,20 @@
|
||||
project (doc)
|
||||
INCLUDE(GNUInstallDirs)
|
||||
|
||||
find_program(GZIP gzip)
|
||||
IF (WIN32)
|
||||
MESSAGE(STATUS "This is Windows. Will not install man page")
|
||||
ELSE (WIN32)
|
||||
INCLUDE(GNUInstallDirs)
|
||||
|
||||
add_custom_command(
|
||||
OUTPUT ${CMAKE_CURRENT_BINARY_DIR}/cryfs.1.gz
|
||||
COMMAND ${GZIP} -c ${CMAKE_CURRENT_SOURCE_DIR}/man/cryfs.1 > ${CMAKE_CURRENT_BINARY_DIR}/cryfs.1.gz
|
||||
)
|
||||
add_custom_target(man ALL DEPENDS ${CMAKE_CURRENT_BINARY_DIR}/cryfs.1.gz)
|
||||
find_program(GZIP gzip)
|
||||
|
||||
install(FILES ${CMAKE_CURRENT_BINARY_DIR}/cryfs.1.gz
|
||||
DESTINATION ${CMAKE_INSTALL_MANDIR}/man1
|
||||
CONFIGURATIONS Release
|
||||
)
|
||||
add_custom_command(
|
||||
OUTPUT ${CMAKE_CURRENT_BINARY_DIR}/cryfs.1.gz
|
||||
COMMAND ${GZIP} -c ${CMAKE_CURRENT_SOURCE_DIR}/man/cryfs.1 > ${CMAKE_CURRENT_BINARY_DIR}/cryfs.1.gz
|
||||
)
|
||||
add_custom_target(man ALL DEPENDS ${CMAKE_CURRENT_BINARY_DIR}/cryfs.1.gz)
|
||||
|
||||
install(FILES ${CMAKE_CURRENT_BINARY_DIR}/cryfs.1.gz
|
||||
DESTINATION ${CMAKE_INSTALL_MANDIR}/man1
|
||||
CONFIGURATIONS Release
|
||||
)
|
||||
ENDIF(WIN32)
|
||||
|
@ -169,6 +169,24 @@ Allow upgrading the file system if it was created with an old CryFS version. Aft
|
||||
.
|
||||
.
|
||||
.TP
|
||||
\fB\-\-allow-integrity-violations\fI
|
||||
.
|
||||
By default, CryFS checks for integrity violations, i.e. will notice if an adversary modified or rolled back the file system. Using this flag, you can disable the integrity checks. This can for example be helpful for loading an old snapshot of your file system without CryFS thinking an adversary rolled it back.
|
||||
.
|
||||
.
|
||||
.TP
|
||||
\fB\-\-allow-replaced-filesystem\fI
|
||||
.
|
||||
By default, CryFS remembers file systems it has seen in this base directory and checks that it didn't get replaced by an attacker with an entirely different file system since the last time it was loaded. However, if you do want to replace the file system with an entirely new one, you can pass in this option to disable the check.
|
||||
.
|
||||
.
|
||||
.TP
|
||||
\fB\-\-missing-block-is-integrity-violation\fR=true
|
||||
.
|
||||
When CryFS encounters a missing ciphertext block, it cannot cannot (yet) know if it was deleted by an unauthorized adversary or by a second authorized client. This is one of the restrictions of the integrity checks currently in place. You can enable this flag to treat missing ciphertext blocks as integrity violations, but then your file system will not be usable by multiple clients anymore. By default, this flag is disabled.
|
||||
.
|
||||
.
|
||||
.TP
|
||||
\fB\-\-logfile\fR \fIfile\fR
|
||||
.
|
||||
Write status information to \fIfile\fR. If no logfile is given, CryFS will
|
||||
@ -205,6 +223,15 @@ By default, CryFS connects to the internet to check for known security
|
||||
vulnerabilities and new versions. This option disables this.
|
||||
.
|
||||
.
|
||||
.TP
|
||||
\fBCRYFS_LOCAL_STATE_DIR\fR=[path]
|
||||
.
|
||||
Sets the directory cryfs uses to store local state. This local state
|
||||
is used to recognize known file systems and run integrity checks
|
||||
(i.e. check that they haven't been modified by an attacker.
|
||||
Default value: ${HOME}/.cryfs
|
||||
.
|
||||
.
|
||||
.
|
||||
.SH SEE ALSO
|
||||
.
|
||||
|
20
run-clang-tidy.sh
Executable file
20
run-clang-tidy.sh
Executable file
@ -0,0 +1,20 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Note: Call this from a cmake build directory (e.g. cmake/) for out-of-source builds
|
||||
# Examples:
|
||||
# mkdir cmake && cd cmake && ../run-clang-tidy.sh
|
||||
# mkdir cmake && cd cmake && ../run-clang-tidy.sh -fix
|
||||
# mkdir cmake && cd cmake && ../run-clang-tidy.sh -export-fixes fixes.yaml
|
||||
|
||||
set -e
|
||||
|
||||
NUMCORES=`nproc`
|
||||
|
||||
# Run cmake in current working directory, but on source that is in the same directory as this script file
|
||||
cmake -DBUILD_TESTING=on -DCMAKE_EXPORT_COMPILE_COMMANDS=ON "${0%/*}"
|
||||
|
||||
# Build scrypt first. Our Makefiles call ino theirs, and this is needed to generate some header files. Clang-tidy will otherwise complain they're missing.
|
||||
make -j${NUMCORES} scrypt
|
||||
|
||||
run-clang-tidy.py -j${NUMCORES} -quiet -header-filter "$(realpath ${0%/*})/(src|test)/.*" $@
|
||||
|
@ -8,3 +8,4 @@ add_subdirectory(blockstore)
|
||||
add_subdirectory(blobstore)
|
||||
add_subdirectory(cryfs)
|
||||
add_subdirectory(cryfs-cli)
|
||||
add_subdirectory(stats)
|
||||
|
@ -11,6 +11,8 @@ set(SOURCES
|
||||
implementations/onblocks/datanodestore/DataInnerNode.cpp
|
||||
implementations/onblocks/datanodestore/DataNodeStore.cpp
|
||||
implementations/onblocks/datatreestore/impl/algorithms.cpp
|
||||
implementations/onblocks/datatreestore/impl/LeafTraverser.cpp
|
||||
implementations/onblocks/datatreestore/LeafHandle.cpp
|
||||
implementations/onblocks/datatreestore/DataTree.cpp
|
||||
implementations/onblocks/datatreestore/DataTreeStore.cpp
|
||||
implementations/onblocks/BlobOnBlocks.cpp
|
||||
|
@ -2,16 +2,19 @@
|
||||
#include "BlobOnBlocks.h"
|
||||
|
||||
#include "datanodestore/DataLeafNode.h"
|
||||
#include "datanodestore/DataNodeStore.h"
|
||||
#include "utils/Math.h"
|
||||
#include <cmath>
|
||||
#include <cpp-utils/assert/assert.h>
|
||||
#include "datatreestore/LeafHandle.h"
|
||||
|
||||
using std::function;
|
||||
using std::unique_lock;
|
||||
using std::mutex;
|
||||
using cpputils::unique_ref;
|
||||
using cpputils::Data;
|
||||
using blobstore::onblocks::datanodestore::DataLeafNode;
|
||||
using blobstore::onblocks::datanodestore::DataNodeLayout;
|
||||
using blockstore::Key;
|
||||
using blockstore::BlockId;
|
||||
using blobstore::onblocks::datatreestore::LeafHandle;
|
||||
|
||||
namespace blobstore {
|
||||
namespace onblocks {
|
||||
@ -19,11 +22,11 @@ namespace onblocks {
|
||||
using parallelaccessdatatreestore::DataTreeRef;
|
||||
|
||||
BlobOnBlocks::BlobOnBlocks(unique_ref<DataTreeRef> datatree)
|
||||
: _datatree(std::move(datatree)), _sizeCache(boost::none) {
|
||||
: _datatree(std::move(datatree)), _sizeCache(boost::none), _mutex() {
|
||||
}
|
||||
|
||||
BlobOnBlocks::~BlobOnBlocks() {
|
||||
}
|
||||
} // NOLINT (workaround https://gcc.gnu.org/bugzilla/show_bug.cgi?id=82481 )
|
||||
|
||||
uint64_t BlobOnBlocks::size() const {
|
||||
if (_sizeCache == boost::none) {
|
||||
@ -37,22 +40,50 @@ void BlobOnBlocks::resize(uint64_t numBytes) {
|
||||
_sizeCache = numBytes;
|
||||
}
|
||||
|
||||
void BlobOnBlocks::traverseLeaves(uint64_t beginByte, uint64_t sizeBytes, function<void (uint64_t, DataLeafNode *leaf, uint32_t, uint32_t)> func) const {
|
||||
void BlobOnBlocks::_traverseLeaves(uint64_t beginByte, uint64_t sizeBytes, function<void (uint64_t leafOffset, LeafHandle leaf, uint32_t begin, uint32_t count)> onExistingLeaf, function<Data (uint64_t beginByte, uint32_t count)> onCreateLeaf) const {
|
||||
unique_lock<mutex> lock(_mutex); // TODO Multiple traverse calls in parallel?
|
||||
uint64_t endByte = beginByte + sizeBytes;
|
||||
uint32_t firstLeaf = beginByte / _datatree->maxBytesPerLeaf();
|
||||
uint32_t endLeaf = utils::ceilDivision(endByte, _datatree->maxBytesPerLeaf());
|
||||
bool writingOutside = size() < endByte; // TODO Calling size() is slow because it has to traverse the tree
|
||||
_datatree->traverseLeaves(firstLeaf, endLeaf, [&func, beginByte, endByte, endLeaf, writingOutside](DataLeafNode *leaf, uint32_t leafIndex) {
|
||||
uint64_t indexOfFirstLeafByte = leafIndex * leaf->maxStoreableBytes();
|
||||
uint32_t dataBegin = utils::maxZeroSubtraction(beginByte, indexOfFirstLeafByte);
|
||||
uint32_t dataEnd = std::min(leaf->maxStoreableBytes(), endByte - indexOfFirstLeafByte);
|
||||
if (leafIndex == endLeaf-1 && writingOutside) {
|
||||
// If we are traversing an area that didn't exist before, then the last leaf was just created with a wrong size. We have to fix it.
|
||||
leaf->resize(dataEnd);
|
||||
}
|
||||
func(indexOfFirstLeafByte, leaf, dataBegin, dataEnd-dataBegin);
|
||||
});
|
||||
if (writingOutside) {
|
||||
uint64_t maxBytesPerLeaf = _datatree->maxBytesPerLeaf();
|
||||
uint32_t firstLeaf = beginByte / maxBytesPerLeaf;
|
||||
uint32_t endLeaf = utils::ceilDivision(endByte, maxBytesPerLeaf);
|
||||
bool blobIsGrowingFromThisTraversal = false;
|
||||
auto _onExistingLeaf = [&onExistingLeaf, beginByte, endByte, endLeaf, maxBytesPerLeaf, &blobIsGrowingFromThisTraversal] (uint32_t leafIndex, bool isRightBorderLeaf, LeafHandle leafHandle) {
|
||||
uint64_t indexOfFirstLeafByte = leafIndex * maxBytesPerLeaf;
|
||||
ASSERT(endByte > indexOfFirstLeafByte, "Traversal went too far right");
|
||||
uint32_t dataBegin = utils::maxZeroSubtraction(beginByte, indexOfFirstLeafByte);
|
||||
uint32_t dataEnd = std::min(maxBytesPerLeaf, endByte - indexOfFirstLeafByte);
|
||||
// If we are traversing exactly until the last leaf, then the last leaf wasn't resized by the traversal and might have a wrong size. We have to fix it.
|
||||
if (isRightBorderLeaf) {
|
||||
ASSERT(leafIndex == endLeaf-1, "If we traversed further right, this wouldn't be the right border leaf.");
|
||||
auto leaf = leafHandle.node();
|
||||
if (leaf->numBytes() < dataEnd) {
|
||||
leaf->resize(dataEnd);
|
||||
blobIsGrowingFromThisTraversal = true;
|
||||
}
|
||||
}
|
||||
onExistingLeaf(indexOfFirstLeafByte, std::move(leafHandle), dataBegin, dataEnd-dataBegin);
|
||||
};
|
||||
auto _onCreateLeaf = [&onCreateLeaf, maxBytesPerLeaf, beginByte, firstLeaf, endByte, endLeaf, &blobIsGrowingFromThisTraversal] (uint32_t leafIndex) -> Data {
|
||||
blobIsGrowingFromThisTraversal = true;
|
||||
uint64_t indexOfFirstLeafByte = leafIndex * maxBytesPerLeaf;
|
||||
ASSERT(endByte > indexOfFirstLeafByte, "Traversal went too far right");
|
||||
uint32_t dataBegin = utils::maxZeroSubtraction(beginByte, indexOfFirstLeafByte);
|
||||
uint32_t dataEnd = std::min(maxBytesPerLeaf, endByte - indexOfFirstLeafByte);
|
||||
ASSERT(leafIndex == firstLeaf || dataBegin == 0, "Only the leftmost leaf can have a gap on the left.");
|
||||
ASSERT(leafIndex == endLeaf-1 || dataEnd == maxBytesPerLeaf, "Only the rightmost leaf can have a gap on the right");
|
||||
Data data = onCreateLeaf(indexOfFirstLeafByte + dataBegin, dataEnd-dataBegin);
|
||||
ASSERT(data.size() == dataEnd-dataBegin, "Returned leaf data with wrong size");
|
||||
// If this leaf is created but only partly in the traversed region (i.e. dataBegin > leafBegin), we have to fill the data before the traversed region with zeroes.
|
||||
if (dataBegin != 0) {
|
||||
Data actualData(dataBegin + data.size());
|
||||
std::memset(actualData.data(), 0, dataBegin);
|
||||
std::memcpy(actualData.dataOffset(dataBegin), data.data(), data.size());
|
||||
data = std::move(actualData);
|
||||
}
|
||||
return data;
|
||||
};
|
||||
_datatree->traverseLeaves(firstLeaf, endLeaf, _onExistingLeaf, _onCreateLeaf);
|
||||
if (blobIsGrowingFromThisTraversal) {
|
||||
ASSERT(_datatree->numStoredBytes() == endByte, "Writing didn't grow by the correct number of bytes");
|
||||
_sizeCache = endByte;
|
||||
}
|
||||
@ -67,38 +98,60 @@ Data BlobOnBlocks::readAll() const {
|
||||
}
|
||||
|
||||
void BlobOnBlocks::read(void *target, uint64_t offset, uint64_t count) const {
|
||||
ASSERT(offset <= size() && offset + count <= size(), "BlobOnBlocks::read() read outside blob. Use BlobOnBlocks::tryRead() if this should be allowed.");
|
||||
uint64_t _size = size();
|
||||
ASSERT(offset <= _size && offset + count <= _size, "BlobOnBlocks::read() read outside blob. Use BlobOnBlocks::tryRead() if this should be allowed.");
|
||||
uint64_t read = tryRead(target, offset, count);
|
||||
ASSERT(read == count, "BlobOnBlocks::read() couldn't read all requested bytes. Use BlobOnBlocks::tryRead() if this should be allowed.");
|
||||
}
|
||||
|
||||
uint64_t BlobOnBlocks::tryRead(void *target, uint64_t offset, uint64_t count) const {
|
||||
//TODO Quite inefficient to call size() here, because that has to traverse the tree
|
||||
uint64_t realCount = std::max(UINT64_C(0), std::min(count, size()-offset));
|
||||
uint64_t realCount = std::max(INT64_C(0), std::min(static_cast<int64_t>(count), static_cast<int64_t>(size())-static_cast<int64_t>(offset)));
|
||||
_read(target, offset, realCount);
|
||||
return realCount;
|
||||
}
|
||||
|
||||
void BlobOnBlocks::_read(void *target, uint64_t offset, uint64_t count) const {
|
||||
traverseLeaves(offset, count, [target, offset] (uint64_t indexOfFirstLeafByte, const DataLeafNode *leaf, uint32_t leafDataOffset, uint32_t leafDataSize) {
|
||||
auto onExistingLeaf = [target, offset, count] (uint64_t indexOfFirstLeafByte, LeafHandle leaf, uint32_t leafDataOffset, uint32_t leafDataSize) {
|
||||
ASSERT(indexOfFirstLeafByte+leafDataOffset>=offset && indexOfFirstLeafByte-offset+leafDataOffset <= count && indexOfFirstLeafByte-offset+leafDataOffset+leafDataSize <= count, "Writing to target out of bounds");
|
||||
//TODO Simplify formula, make it easier to understand
|
||||
leaf->read((uint8_t*)target + indexOfFirstLeafByte - offset + leafDataOffset, leafDataOffset, leafDataSize);
|
||||
});
|
||||
leaf.node()->read(static_cast<uint8_t*>(target) + indexOfFirstLeafByte - offset + leafDataOffset, leafDataOffset, leafDataSize);
|
||||
};
|
||||
auto onCreateLeaf = [] (uint64_t /*beginByte*/, uint32_t /*count*/) -> Data {
|
||||
ASSERT(false, "Reading shouldn't create new leaves.");
|
||||
};
|
||||
_traverseLeaves(offset, count, onExistingLeaf, onCreateLeaf);
|
||||
}
|
||||
|
||||
void BlobOnBlocks::write(const void *source, uint64_t offset, uint64_t count) {
|
||||
traverseLeaves(offset, count, [source, offset] (uint64_t indexOfFirstLeafByte, DataLeafNode *leaf, uint32_t leafDataOffset, uint32_t leafDataSize) {
|
||||
//TODO Simplify formula, make it easier to understand
|
||||
leaf->write((uint8_t*)source + indexOfFirstLeafByte - offset + leafDataOffset, leafDataOffset, leafDataSize);
|
||||
});
|
||||
auto onExistingLeaf = [source, offset, count] (uint64_t indexOfFirstLeafByte, LeafHandle leaf, uint32_t leafDataOffset, uint32_t leafDataSize) {
|
||||
ASSERT(indexOfFirstLeafByte+leafDataOffset>=offset && indexOfFirstLeafByte-offset+leafDataOffset <= count && indexOfFirstLeafByte-offset+leafDataOffset+leafDataSize <= count, "Reading from source out of bounds");
|
||||
if (leafDataOffset == 0 && leafDataSize == leaf.nodeStore()->layout().maxBytesPerLeaf()) {
|
||||
Data leafData(leafDataSize);
|
||||
std::memcpy(leafData.data(), static_cast<const uint8_t*>(source) + indexOfFirstLeafByte - offset, leafDataSize);
|
||||
leaf.nodeStore()->overwriteLeaf(leaf.blockId(), std::move(leafData));
|
||||
} else {
|
||||
//TODO Simplify formula, make it easier to understand
|
||||
leaf.node()->write(static_cast<const uint8_t*>(source) + indexOfFirstLeafByte - offset + leafDataOffset, leafDataOffset,
|
||||
leafDataSize);
|
||||
}
|
||||
};
|
||||
auto onCreateLeaf = [source, offset, count] (uint64_t beginByte, uint32_t numBytes) -> Data {
|
||||
ASSERT(beginByte >= offset && beginByte-offset <= count && beginByte-offset+numBytes <= count, "Reading from source out of bounds");
|
||||
Data result(numBytes);
|
||||
//TODO Simplify formula, make it easier to understand
|
||||
std::memcpy(result.data(), static_cast<const uint8_t*>(source) + beginByte - offset, numBytes);
|
||||
return result;
|
||||
};
|
||||
_traverseLeaves(offset, count, onExistingLeaf, onCreateLeaf);
|
||||
}
|
||||
|
||||
void BlobOnBlocks::flush() {
|
||||
_datatree->flush();
|
||||
}
|
||||
|
||||
const Key &BlobOnBlocks::key() const {
|
||||
return _datatree->key();
|
||||
const BlockId &BlobOnBlocks::blockId() const {
|
||||
return _datatree->blockId();
|
||||
}
|
||||
|
||||
unique_ref<DataTreeRef> BlobOnBlocks::releaseTree() {
|
||||
|
@ -3,6 +3,7 @@
|
||||
#define MESSMER_BLOBSTORE_IMPLEMENTATIONS_ONBLOCKS_BLOBONBLOCKS_H_
|
||||
|
||||
#include "../../interface/Blob.h"
|
||||
#include "datatreestore/LeafHandle.h"
|
||||
|
||||
#include <memory>
|
||||
#include <boost/optional.hpp>
|
||||
@ -21,7 +22,7 @@ public:
|
||||
BlobOnBlocks(cpputils::unique_ref<parallelaccessdatatreestore::DataTreeRef> datatree);
|
||||
~BlobOnBlocks();
|
||||
|
||||
const blockstore::Key &key() const override;
|
||||
const blockstore::BlockId &blockId() const override;
|
||||
|
||||
uint64_t size() const override;
|
||||
void resize(uint64_t numBytes) override;
|
||||
@ -38,10 +39,11 @@ public:
|
||||
private:
|
||||
|
||||
void _read(void *target, uint64_t offset, uint64_t count) const;
|
||||
void traverseLeaves(uint64_t offsetBytes, uint64_t sizeBytes, std::function<void (uint64_t, datanodestore::DataLeafNode *, uint32_t, uint32_t)>) const;
|
||||
void _traverseLeaves(uint64_t offsetBytes, uint64_t sizeBytes, std::function<void (uint64_t leafOffset, datatreestore::LeafHandle leaf, uint32_t begin, uint32_t count)> onExistingLeaf, std::function<cpputils::Data (uint64_t beginByte, uint32_t count)> onCreateLeaf) const;
|
||||
|
||||
cpputils::unique_ref<parallelaccessdatatreestore::DataTreeRef> _datatree;
|
||||
mutable boost::optional<uint64_t> _sizeCache;
|
||||
mutable std::mutex _mutex;
|
||||
|
||||
DISALLOW_COPY_AND_ASSIGN(BlobOnBlocks);
|
||||
};
|
||||
|
@ -15,7 +15,7 @@ using cpputils::make_unique_ref;
|
||||
|
||||
using blockstore::BlockStore;
|
||||
using blockstore::parallelaccess::ParallelAccessBlockStore;
|
||||
using blockstore::Key;
|
||||
using blockstore::BlockId;
|
||||
using cpputils::dynamic_pointer_move;
|
||||
using boost::optional;
|
||||
using boost::none;
|
||||
@ -38,8 +38,8 @@ unique_ref<Blob> BlobStoreOnBlocks::create() {
|
||||
return make_unique_ref<BlobOnBlocks>(_dataTreeStore->createNewTree());
|
||||
}
|
||||
|
||||
optional<unique_ref<Blob>> BlobStoreOnBlocks::load(const Key &key) {
|
||||
auto tree = _dataTreeStore->load(key);
|
||||
optional<unique_ref<Blob>> BlobStoreOnBlocks::load(const BlockId &blockId) {
|
||||
auto tree = _dataTreeStore->load(blockId);
|
||||
if (tree == none) {
|
||||
return none;
|
||||
}
|
||||
@ -52,6 +52,10 @@ void BlobStoreOnBlocks::remove(unique_ref<Blob> blob) {
|
||||
_dataTreeStore->remove((*_blob)->releaseTree());
|
||||
}
|
||||
|
||||
void BlobStoreOnBlocks::remove(const BlockId &blockId) {
|
||||
_dataTreeStore->remove(blockId);
|
||||
}
|
||||
|
||||
uint64_t BlobStoreOnBlocks::virtualBlocksizeBytes() const {
|
||||
return _dataTreeStore->virtualBlocksizeBytes();
|
||||
}
|
||||
|
@ -20,9 +20,10 @@ public:
|
||||
~BlobStoreOnBlocks();
|
||||
|
||||
cpputils::unique_ref<Blob> create() override;
|
||||
boost::optional<cpputils::unique_ref<Blob>> load(const blockstore::Key &key) override;
|
||||
boost::optional<cpputils::unique_ref<Blob>> load(const blockstore::BlockId &blockId) override;
|
||||
|
||||
void remove(cpputils::unique_ref<Blob> blob) override;
|
||||
void remove(const blockstore::BlockId &blockId) override;
|
||||
|
||||
//TODO Test blocksizeBytes/numBlocks/estimateSpaceForNumBlocksLeft
|
||||
//virtual means "space we can use" as opposed to "space it takes on the disk" (i.e. virtual is without headers, checksums, ...)
|
||||
|
@ -3,10 +3,12 @@
|
||||
#include <cpp-utils/assert/assert.h>
|
||||
|
||||
using blockstore::Block;
|
||||
using blockstore::BlockStore;
|
||||
using cpputils::Data;
|
||||
using cpputils::unique_ref;
|
||||
using cpputils::make_unique_ref;
|
||||
using blockstore::Key;
|
||||
using blockstore::BlockId;
|
||||
using std::vector;
|
||||
|
||||
namespace blobstore {
|
||||
namespace onblocks {
|
||||
@ -23,62 +25,62 @@ DataInnerNode::DataInnerNode(DataNodeView view)
|
||||
DataInnerNode::~DataInnerNode() {
|
||||
}
|
||||
|
||||
unique_ref<DataInnerNode> DataInnerNode::InitializeNewNode(unique_ref<Block> block, const DataNode &first_child) {
|
||||
DataNodeView node(std::move(block));
|
||||
node.setFormatVersion(DataNode::FORMAT_VERSION_HEADER);
|
||||
node.setDepth(first_child.depth() + 1);
|
||||
node.setSize(1);
|
||||
auto result = make_unique_ref<DataInnerNode>(std::move(node));
|
||||
result->ChildrenBegin()->setKey(first_child.key());
|
||||
return result;
|
||||
unique_ref<DataInnerNode> DataInnerNode::InitializeNewNode(unique_ref<Block> block, const DataNodeLayout &layout, uint8_t depth, const vector<BlockId> &children) {
|
||||
ASSERT(children.size() >= 1, "An inner node must have at least one child");
|
||||
Data data = _serializeChildren(children);
|
||||
|
||||
return make_unique_ref<DataInnerNode>(DataNodeView::initialize(std::move(block), layout, DataNode::FORMAT_VERSION_HEADER, depth, children.size(), std::move(data)));
|
||||
}
|
||||
|
||||
unique_ref<DataInnerNode> DataInnerNode::CreateNewNode(BlockStore *blockStore, const DataNodeLayout &layout, uint8_t depth, const vector<BlockId> &children) {
|
||||
ASSERT(children.size() >= 1, "An inner node must have at least one child");
|
||||
Data data = _serializeChildren(children);
|
||||
|
||||
return make_unique_ref<DataInnerNode>(DataNodeView::create(blockStore, layout, DataNode::FORMAT_VERSION_HEADER, depth, children.size(), std::move(data)));
|
||||
}
|
||||
|
||||
Data DataInnerNode::_serializeChildren(const vector<BlockId> &children) {
|
||||
Data data(sizeof(ChildEntry) * children.size());
|
||||
uint32_t i = 0;
|
||||
for (const BlockId &child : children) {
|
||||
child.ToBinary(data.dataOffset(i * BlockId::BINARY_LENGTH));
|
||||
++i;
|
||||
}
|
||||
return data;
|
||||
}
|
||||
|
||||
uint32_t DataInnerNode::numChildren() const {
|
||||
return node().Size();
|
||||
}
|
||||
|
||||
DataInnerNode::ChildEntry *DataInnerNode::ChildrenBegin() {
|
||||
return const_cast<ChildEntry*>(const_cast<const DataInnerNode*>(this)->ChildrenBegin());
|
||||
}
|
||||
|
||||
const DataInnerNode::ChildEntry *DataInnerNode::ChildrenBegin() const {
|
||||
return node().DataBegin<ChildEntry>();
|
||||
}
|
||||
|
||||
DataInnerNode::ChildEntry *DataInnerNode::ChildrenEnd() {
|
||||
return const_cast<ChildEntry*>(const_cast<const DataInnerNode*>(this)->ChildrenEnd());
|
||||
}
|
||||
|
||||
const DataInnerNode::ChildEntry *DataInnerNode::ChildrenEnd() const {
|
||||
return ChildrenBegin() + node().Size();
|
||||
}
|
||||
|
||||
DataInnerNode::ChildEntry *DataInnerNode::LastChild() {
|
||||
return const_cast<ChildEntry*>(const_cast<const DataInnerNode*>(this)->LastChild());
|
||||
}
|
||||
|
||||
const DataInnerNode::ChildEntry *DataInnerNode::LastChild() const {
|
||||
return getChild(numChildren()-1);
|
||||
}
|
||||
|
||||
DataInnerNode::ChildEntry *DataInnerNode::getChild(unsigned int index) {
|
||||
return const_cast<ChildEntry*>(const_cast<const DataInnerNode*>(this)->getChild(index));
|
||||
}
|
||||
|
||||
const DataInnerNode::ChildEntry *DataInnerNode::getChild(unsigned int index) const {
|
||||
DataInnerNode::ChildEntry DataInnerNode::readChild(unsigned int index) const {
|
||||
ASSERT(index < numChildren(), "Accessing child out of range");
|
||||
return ChildrenBegin()+index;
|
||||
return ChildEntry(BlockId::FromBinary(static_cast<const uint8_t*>(node().data()) + index * sizeof(ChildEntry)));
|
||||
}
|
||||
|
||||
void DataInnerNode::_writeChild(unsigned int index, const ChildEntry& child) {
|
||||
ASSERT(index < numChildren(), "Accessing child out of range");
|
||||
node().write(child.blockId().data().data(), index * sizeof(ChildEntry), sizeof(ChildEntry));
|
||||
}
|
||||
|
||||
DataInnerNode::ChildEntry DataInnerNode::readLastChild() const {
|
||||
return readChild(numChildren() - 1);
|
||||
}
|
||||
|
||||
void DataInnerNode::_writeLastChild(const ChildEntry& child) {
|
||||
_writeChild(numChildren() - 1, child);
|
||||
}
|
||||
|
||||
void DataInnerNode::addChild(const DataNode &child) {
|
||||
ASSERT(numChildren() < maxStoreableChildren(), "Adding more children than we can store");
|
||||
ASSERT(child.depth() == depth()-1, "The child that should be added has wrong depth");
|
||||
node().setSize(node().Size()+1);
|
||||
LastChild()->setKey(child.key());
|
||||
_writeLastChild(ChildEntry(child.blockId()));
|
||||
}
|
||||
|
||||
void DataInnerNode::removeLastChild() {
|
||||
ASSERT(node().Size() > 1, "There is no child to remove");
|
||||
_writeLastChild(ChildEntry(BlockId::Null()));
|
||||
node().setSize(node().Size()-1);
|
||||
}
|
||||
|
||||
|
@ -11,33 +11,29 @@ namespace datanodestore {
|
||||
|
||||
class DataInnerNode final: public DataNode {
|
||||
public:
|
||||
static cpputils::unique_ref<DataInnerNode> InitializeNewNode(cpputils::unique_ref<blockstore::Block> block, const DataNode &first_child_key);
|
||||
static cpputils::unique_ref<DataInnerNode> InitializeNewNode(cpputils::unique_ref<blockstore::Block> block, const DataNodeLayout &layout, uint8_t depth, const std::vector<blockstore::BlockId> &children);
|
||||
static cpputils::unique_ref<DataInnerNode> CreateNewNode(blockstore::BlockStore *blockStore, const DataNodeLayout &layout, uint8_t depth, const std::vector<blockstore::BlockId> &children);
|
||||
|
||||
using ChildEntry = DataInnerNode_ChildEntry;
|
||||
|
||||
DataInnerNode(DataNodeView block);
|
||||
~DataInnerNode();
|
||||
|
||||
using ChildEntry = DataInnerNode_ChildEntry;
|
||||
|
||||
uint32_t maxStoreableChildren() const;
|
||||
|
||||
ChildEntry *getChild(unsigned int index);
|
||||
const ChildEntry *getChild(unsigned int index) const;
|
||||
ChildEntry readChild(unsigned int index) const;
|
||||
ChildEntry readLastChild() const;
|
||||
|
||||
uint32_t numChildren() const;
|
||||
|
||||
void addChild(const DataNode &child_key);
|
||||
void addChild(const DataNode &child_blockId);
|
||||
|
||||
void removeLastChild();
|
||||
|
||||
ChildEntry *LastChild();
|
||||
const ChildEntry *LastChild() const;
|
||||
|
||||
private:
|
||||
|
||||
ChildEntry *ChildrenBegin();
|
||||
ChildEntry *ChildrenEnd();
|
||||
const ChildEntry *ChildrenBegin() const;
|
||||
const ChildEntry *ChildrenEnd() const;
|
||||
void _writeChild(unsigned int index, const ChildEntry& child);
|
||||
void _writeLastChild(const ChildEntry& child);
|
||||
static cpputils::Data _serializeChildren(const std::vector<blockstore::BlockId> &children);
|
||||
|
||||
DISALLOW_COPY_AND_ASSIGN(DataInnerNode);
|
||||
};
|
||||
|
@ -10,17 +10,19 @@ namespace datanodestore{
|
||||
|
||||
struct DataInnerNode_ChildEntry final {
|
||||
public:
|
||||
blockstore::Key key() const {
|
||||
return blockstore::Key::FromBinary(_keydata);
|
||||
}
|
||||
private:
|
||||
void setKey(const blockstore::Key &key) {
|
||||
key.ToBinary(_keydata);
|
||||
}
|
||||
friend class DataInnerNode;
|
||||
uint8_t _keydata[blockstore::Key::BINARY_LENGTH];
|
||||
DataInnerNode_ChildEntry(const blockstore::BlockId &blockId): _blockId(blockId) {}
|
||||
|
||||
DISALLOW_COPY_AND_ASSIGN(DataInnerNode_ChildEntry);
|
||||
const blockstore::BlockId& blockId() const {
|
||||
return _blockId;
|
||||
}
|
||||
|
||||
DataInnerNode_ChildEntry(const DataInnerNode_ChildEntry&) = delete;
|
||||
DataInnerNode_ChildEntry& operator=(const DataInnerNode_ChildEntry&) = delete;
|
||||
DataInnerNode_ChildEntry(DataInnerNode_ChildEntry&&) = default;
|
||||
DataInnerNode_ChildEntry& operator=(DataInnerNode_ChildEntry&&) = default;
|
||||
|
||||
private:
|
||||
blockstore::BlockId _blockId;
|
||||
};
|
||||
|
||||
}
|
||||
|
@ -2,9 +2,9 @@
|
||||
#include "DataInnerNode.h"
|
||||
#include <cpp-utils/assert/assert.h>
|
||||
|
||||
using blockstore::Block;
|
||||
using cpputils::Data;
|
||||
using blockstore::Key;
|
||||
using blockstore::BlockId;
|
||||
using blockstore::BlockStore;
|
||||
using cpputils::unique_ref;
|
||||
using cpputils::make_unique_ref;
|
||||
|
||||
@ -24,18 +24,21 @@ DataLeafNode::DataLeafNode(DataNodeView view)
|
||||
DataLeafNode::~DataLeafNode() {
|
||||
}
|
||||
|
||||
unique_ref<DataLeafNode> DataLeafNode::InitializeNewNode(unique_ref<Block> block) {
|
||||
DataNodeView node(std::move(block));
|
||||
node.setFormatVersion(DataNode::FORMAT_VERSION_HEADER);
|
||||
node.setDepth(0);
|
||||
node.setSize(0);
|
||||
//fillDataWithZeroes(); not needed, because a newly created block will be zeroed out. DataLeafNodeTest.SpaceIsZeroFilledWhenGrowing ensures this.
|
||||
return make_unique_ref<DataLeafNode>(std::move(node));
|
||||
unique_ref<DataLeafNode> DataLeafNode::CreateNewNode(BlockStore *blockStore, const DataNodeLayout &layout, Data data) {
|
||||
ASSERT(data.size() <= layout.maxBytesPerLeaf(), "Data passed in is too large for one leaf.");
|
||||
uint32_t size = data.size();
|
||||
return make_unique_ref<DataLeafNode>(DataNodeView::create(blockStore, layout, DataNode::FORMAT_VERSION_HEADER, 0, size, std::move(data)));
|
||||
}
|
||||
|
||||
unique_ref<DataLeafNode> DataLeafNode::OverwriteNode(BlockStore *blockStore, const DataNodeLayout &layout, const BlockId &blockId, Data data) {
|
||||
ASSERT(data.size() == layout.maxBytesPerLeaf(), "Data passed in is too large for one leaf.");
|
||||
uint32_t size = data.size();
|
||||
return make_unique_ref<DataLeafNode>(DataNodeView::overwrite(blockStore, layout, DataNode::FORMAT_VERSION_HEADER, 0, size, blockId, std::move(data)));
|
||||
}
|
||||
|
||||
void DataLeafNode::read(void *target, uint64_t offset, uint64_t size) const {
|
||||
ASSERT(offset <= node().Size() && offset + size <= node().Size(), "Read out of valid area"); // Also check offset, because the addition could lead to overflows
|
||||
std::memcpy(target, (uint8_t*)node().data() + offset, size);
|
||||
std::memcpy(target, static_cast<const uint8_t*>(node().data()) + offset, size);
|
||||
}
|
||||
|
||||
void DataLeafNode::write(const void *source, uint64_t offset, uint64_t size) {
|
||||
|
@ -11,7 +11,8 @@ class DataInnerNode;
|
||||
|
||||
class DataLeafNode final: public DataNode {
|
||||
public:
|
||||
static cpputils::unique_ref<DataLeafNode> InitializeNewNode(cpputils::unique_ref<blockstore::Block> block);
|
||||
static cpputils::unique_ref<DataLeafNode> CreateNewNode(blockstore::BlockStore *blockStore, const DataNodeLayout &layout, cpputils::Data data);
|
||||
static cpputils::unique_ref<DataLeafNode> OverwriteNode(blockstore::BlockStore *blockStore, const DataNodeLayout &layout, const blockstore::BlockId &blockId, cpputils::Data data);
|
||||
|
||||
DataLeafNode(DataNodeView block);
|
||||
~DataLeafNode();
|
||||
|
@ -4,10 +4,8 @@
|
||||
#include "DataNodeStore.h"
|
||||
#include <blockstore/utils/BlockStoreUtils.h>
|
||||
|
||||
using blockstore::Block;
|
||||
using blockstore::Key;
|
||||
using blockstore::BlockId;
|
||||
|
||||
using std::runtime_error;
|
||||
using cpputils::unique_ref;
|
||||
|
||||
namespace blobstore {
|
||||
@ -31,20 +29,19 @@ const DataNodeView &DataNode::node() const {
|
||||
return _node;
|
||||
}
|
||||
|
||||
const Key &DataNode::key() const {
|
||||
return _node.key();
|
||||
const BlockId &DataNode::blockId() const {
|
||||
return _node.blockId();
|
||||
}
|
||||
|
||||
uint8_t DataNode::depth() const {
|
||||
return _node.Depth();
|
||||
}
|
||||
|
||||
unique_ref<DataInnerNode> DataNode::convertToNewInnerNode(unique_ref<DataNode> node, const DataNode &first_child) {
|
||||
Key key = node->key();
|
||||
unique_ref<DataInnerNode> DataNode::convertToNewInnerNode(unique_ref<DataNode> node, const DataNodeLayout &layout, const DataNode &first_child) {
|
||||
auto block = node->_node.releaseBlock();
|
||||
blockstore::utils::fillWithZeroes(block.get());
|
||||
|
||||
return DataInnerNode::InitializeNewNode(std::move(block), first_child);
|
||||
return DataInnerNode::InitializeNewNode(std::move(block), layout, first_child.depth()+1, {first_child.blockId()});
|
||||
}
|
||||
|
||||
void DataNode::flush() const {
|
||||
|
4
4