Compare commits
15 Commits
Author | SHA1 | Date | |
---|---|---|---|
35a2198ac9 | |||
267fb40fe7 | |||
f03d39433c | |||
9c19680b55 | |||
fb593c468c | |||
ac2b8a615b | |||
f89ac70d59 | |||
6cd6d9ef5d | |||
412df2e7be | |||
e9f000353b | |||
03fffc44a5 | |||
56c7d96bbf | |||
e6d735dbab | |||
3866607d68 | |||
3df8023b21 |
@ -1,14 +0,0 @@
|
||||
# This config file is a dummy CircleCI config that does nothing. We migrated away from CircleCI to Github Actions.
|
||||
# But our release/0.10 branch still uses CircleCI, so we can't disable the service entirely and need some way
|
||||
# to disable it only for newer versions. That's what this file is doing.
|
||||
|
||||
version: 2.1
|
||||
|
||||
jobs:
|
||||
build:
|
||||
docker:
|
||||
- image: circleci/node:11.12.0
|
||||
steps:
|
||||
- run:
|
||||
name: Dummy
|
||||
command: 'echo Not running any Circle CI'
|
16
.github/ISSUE_TEMPLATE.md
vendored
16
.github/ISSUE_TEMPLATE.md
vendored
@ -1,16 +0,0 @@
|
||||
## Expected Behavior
|
||||
|
||||
|
||||
## Actual Behavior
|
||||
|
||||
|
||||
## Steps to Reproduce the Problem
|
||||
|
||||
1.
|
||||
2.
|
||||
3.
|
||||
|
||||
## Specifications
|
||||
|
||||
- CryFS Version:
|
||||
- Operating System (incl. Version):
|
@ -1,76 +0,0 @@
|
||||
name: 'Install local dependencies'
|
||||
description: 'Install local dependencies'
|
||||
runs:
|
||||
using: "composite"
|
||||
steps:
|
||||
- name: Install local dependencies
|
||||
shell: bash
|
||||
run: |
|
||||
set -v
|
||||
# TODO Cache these dependencies for faster runtime
|
||||
|
||||
export NUMCORES=`nproc` && if [ ! -n "$NUMCORES" ]; then export NUMCORES=`sysctl -n hw.ncpu`; fi
|
||||
echo Using $NUMCORES cores
|
||||
|
||||
echo Download range-v3
|
||||
cd ~
|
||||
wget https://github.com/ericniebler/range-v3/archive/0.11.0.tar.gz -O range-v3-0.11.0.tar.gz
|
||||
if [ $(sha512sum range-v3-0.11.0.tar.gz | awk '{print $1;}') == "9d6cdcbc1e50104206ba731c3bdc9aab3acfcf69cd83f0e0b4de18b88df2a9e73d64e55638421768d4433c542b6619f6e5af6b17cccd3090cf8b4d4efe9863e4" ]; then
|
||||
echo Correct sha512sum
|
||||
else
|
||||
echo Wrong sha512sum
|
||||
sha512sum range-v3-0.11.0.tar.gz
|
||||
exit 1
|
||||
fi
|
||||
tar -xvf range-v3-0.11.0.tar.gz
|
||||
cd range-v3-0.11.0/
|
||||
|
||||
echo Install range-v3
|
||||
mkdir build
|
||||
cd build
|
||||
cmake .. -DRANGES_HAS_WERROR=off -DRANGE_V3_EXAMPLES=off -DRANGE_V3_TESTS=off
|
||||
make -j$NUMCORES
|
||||
sudo make install
|
||||
cd ~
|
||||
rm -rf range-v3-0.11.0
|
||||
rm range-v3-0.11.0.tar.gz
|
||||
|
||||
echo Download spdlog
|
||||
cd ~
|
||||
wget https://github.com/gabime/spdlog/archive/v1.8.5.tar.gz -O spdlog.tar.gz
|
||||
if [ $(sha512sum spdlog.tar.gz | awk '{print $1;}') == "77cc9df0c40bbdbfe1f3e5818dccf121918bfceac28f2608f39e5bf944968b7e8e24a6fc29f01bc58a9bae41b8892d49cfb59c196935ec9868884320b50f130c" ]; then
|
||||
echo Correct sha512sum
|
||||
else
|
||||
echo Wrong sha512sum
|
||||
sha512sum spdlog.tar.gz
|
||||
exit 1
|
||||
fi
|
||||
tar -xvf spdlog.tar.gz
|
||||
rm spdlog.tar.gz
|
||||
cd spdlog-1.8.5
|
||||
|
||||
echo Install spdlog
|
||||
mkdir build
|
||||
cd build
|
||||
cmake ..
|
||||
make -j$NUMCORES
|
||||
sudo make install
|
||||
|
||||
echo Download boost
|
||||
cd ~
|
||||
wget -O boost.tar.bz2 https://sourceforge.net/projects/boost/files/boost/1.75.0/boost_1_75_0.tar.bz2/download
|
||||
if [ $(sha512sum boost.tar.bz2 | awk '{print $1;}') == "d86f060245e98dca5c7f3f831c98ea9ccbfa8310f20830dd913d9d4c939fbe7cb94accd35f1128e7c4faf6c27adb6f4bb54e5477a6bde983dfc7aa33c4eed03a" ]; then
|
||||
echo Correct sha512sum
|
||||
else
|
||||
echo Wrong sha512sum
|
||||
sha512sum boost.tar.bz2
|
||||
exit 1
|
||||
fi
|
||||
echo Extracting boost
|
||||
tar -xf boost.tar.bz2
|
||||
rm boost.tar.bz2
|
||||
cd boost_1_75_0
|
||||
|
||||
echo Install boost
|
||||
./bootstrap.sh --with-libraries=filesystem,system,thread,chrono,program_options
|
||||
sudo ./b2 link=shared cxxflags=-fPIC --prefix=/usr -d0 -j$NUMCORES install
|
56
.github/workflows/actions/run_build/action.yaml
vendored
56
.github/workflows/actions/run_build/action.yaml
vendored
@ -1,56 +0,0 @@
|
||||
name: 'Build'
|
||||
description: 'Compile CryFS'
|
||||
inputs:
|
||||
cc:
|
||||
description: "Which C compiler to use for the build"
|
||||
required: true
|
||||
cxx:
|
||||
description: "Which C++ compiler to use for the build"
|
||||
required: true
|
||||
build_type:
|
||||
description: "Which cmake build type to use (e.g. Release, Debug, RelWithDebInfo)"
|
||||
required: true
|
||||
extra_cmake_flags:
|
||||
description: "Extra flags to add to the cmake command"
|
||||
required: true
|
||||
extra_cxxflags:
|
||||
description: "Extra flags to add to the compiler"
|
||||
required: true
|
||||
runs:
|
||||
using: "composite"
|
||||
steps:
|
||||
- name: Show build system information
|
||||
shell: bash
|
||||
run: |
|
||||
set -v
|
||||
echo CMake version:
|
||||
cmake --version
|
||||
echo Ninja version:
|
||||
ninja --version
|
||||
echo CC: ${{inputs.cc}}
|
||||
${{inputs.cc}} --version
|
||||
echo CXX: ${{inputs.cxx}}
|
||||
${{inputs.cxx}} --version
|
||||
echo CCache:
|
||||
ccache --version
|
||||
ccache -s
|
||||
- name: Run cmake
|
||||
shell: bash
|
||||
run: |
|
||||
set -v
|
||||
export CXXFLAGS="$CXXFLAGS ${{inputs.extra_cxxflags}}"
|
||||
if [[ "${{inputs.cxx}}" == clang* && "${{inputs.build_type}}" == "Debug" ]]; then
|
||||
# TODO Our linux clang build actually use libstdc++11 instead of libc++, we need to fix this check
|
||||
# TODO Add the corresponding libstdc++11 debug macros when building with gcc
|
||||
echo We are doing a debug build on clang. Adding some more debug flags for libc++
|
||||
export CXXFLAGS="$CXXFLAGS -D_LIBCPP_DEBUG=1 -D_LIBCPP_ENABLE_NODISCARD=1 -D_LIBCPP_ENABLE_DEPRECATION_WARNINGS=1"
|
||||
fi
|
||||
mkdir build
|
||||
cd build
|
||||
cmake .. -GNinja -DCMAKE_CXX_COMPILER=${{inputs.cxx}} -DCMAKE_C_COMPILER=${{inputs.cc}} -DBUILD_TESTING=on -DCMAKE_BUILD_TYPE=${{inputs.build_type}} -DCMAKE_CXX_COMPILER_LAUNCHER=ccache -DCMAKE_C_COMPILER_LAUNCHER=ccache ${{inputs.extra_cmake_flags}}
|
||||
- name: Run ninja
|
||||
shell: bash
|
||||
run: |
|
||||
set -v
|
||||
cd build
|
||||
ninja
|
38
.github/workflows/actions/run_tests/action.yaml
vendored
38
.github/workflows/actions/run_tests/action.yaml
vendored
@ -1,38 +0,0 @@
|
||||
name: 'Test'
|
||||
description: 'Run CryFS Tests'
|
||||
inputs:
|
||||
gtest_args:
|
||||
description: "Extra arguments for gtest runners, for example tests to exclude"
|
||||
required: true
|
||||
extra_env_vars:
|
||||
description: "Extra environment variables to set before running tests"
|
||||
required: true
|
||||
runs:
|
||||
using: "composite"
|
||||
steps:
|
||||
- name: Run tests
|
||||
shell: bash
|
||||
run: |
|
||||
set -v
|
||||
echo Running on ${{runner.os}}
|
||||
cd build
|
||||
|
||||
export ${{ inputs.extra_env_vars }}
|
||||
|
||||
./test/gitversion/gitversion-test ${{inputs.gtest_args}}
|
||||
./test/cpp-utils/cpp-utils-test ${{inputs.gtest_args}}
|
||||
./test/parallelaccessstore/parallelaccessstore-test ${{inputs.gtest_args}}
|
||||
./test/blockstore/blockstore-test ${{inputs.gtest_args}}
|
||||
./test/blobstore/blobstore-test ${{inputs.gtest_args}}
|
||||
./test/cryfs/cryfs-test ${{inputs.gtest_args}}
|
||||
|
||||
# TODO Also run on macOS once fixed
|
||||
if [[ "${{runner.os}}" == "macOS" ]]; then
|
||||
echo Skipping some tests because they are not fixed for macOS yet
|
||||
else
|
||||
# TODO Also run with TSAN once fixed
|
||||
if [[ "${{matrix.name}}" != "TSAN" ]]; then
|
||||
./test/fspp/fspp-test ${{inputs.gtest_args}}
|
||||
fi
|
||||
./test/cryfs-cli/cryfs-cli-test ${{inputs.gtest_args}}
|
||||
fi
|
@ -1,40 +0,0 @@
|
||||
name: 'Setup Linux'
|
||||
description: 'Setup Linux'
|
||||
inputs:
|
||||
os:
|
||||
description: "Exact os (i.e. ubuntu version) this runs on"
|
||||
required: true
|
||||
extra_apt_packages:
|
||||
description: "Job-specific apt packages to install (e.g. the compiler)"
|
||||
required: true
|
||||
runs:
|
||||
using: "composite"
|
||||
steps:
|
||||
- name: Install Linux dependencies
|
||||
shell: bash
|
||||
run: |
|
||||
if [[ "${{inputs.os}}" == "ubuntu-18.04" ]]; then
|
||||
echo Adding apt repositories for newer clang versions on Ubuntu 18.04
|
||||
wget -O - https://apt.llvm.org/llvm-snapshot.gpg.key|sudo apt-key add -
|
||||
sudo touch /etc/apt/sources.list.d/clang.list
|
||||
sudo chmod o+w /etc/apt/sources.list.d/clang.list
|
||||
echo "deb http://apt.llvm.org/bionic/ llvm-toolchain-bionic-11 main" >> /etc/apt/sources.list.d/clang.list
|
||||
echo "deb-src http://apt.llvm.org/bionic/ llvm-toolchain-bionic-11 main" >> /etc/apt/sources.list.d/clang.list
|
||||
sudo chmod o-w /etc/apt/sources.list.d/clang.list
|
||||
elif [[ "${{inputs.os}}" == "ubuntu-20.04" ]]; then
|
||||
echo Adding apt repositories for newer clang versions on Ubuntu 20.04
|
||||
wget -O - https://apt.llvm.org/llvm-snapshot.gpg.key|sudo apt-key add -
|
||||
sudo touch /etc/apt/sources.list.d/clang.list
|
||||
sudo chmod o+w /etc/apt/sources.list.d/clang.list
|
||||
echo "deb http://apt.llvm.org/focal/ llvm-toolchain-focal-11 main" >> /etc/apt/sources.list.d/clang.list
|
||||
echo "deb-src http://apt.llvm.org/focal/ llvm-toolchain-focal-11 main" >> /etc/apt/sources.list.d/clang.list
|
||||
sudo chmod o-w /etc/apt/sources.list.d/clang.list
|
||||
fi
|
||||
sudo apt-get update
|
||||
sudo apt-get install ninja-build libcurl4-openssl-dev libfuse-dev ccache ${{inputs.extra_apt_packages}}
|
||||
- name: Speed up random generator
|
||||
run: |
|
||||
set -v
|
||||
# Use /dev/urandom when /dev/random is accessed to use less entropy
|
||||
sudo cp -a /dev/urandom /dev/random
|
||||
shell: bash
|
@ -1,13 +0,0 @@
|
||||
name: 'Setup macOS'
|
||||
description: 'Setup macOS'
|
||||
inputs:
|
||||
extra_homebrew_packages:
|
||||
description: "Job-specific homebrew packages to install (e.g. the compiler)"
|
||||
required: true
|
||||
runs:
|
||||
using: "composite"
|
||||
steps:
|
||||
- name: Install macOS dependencies
|
||||
shell: bash
|
||||
run: |
|
||||
brew install ninja macfuse libomp ccache md5sha1sum ${{inputs.extra_homebrew_packages}}
|
@ -1,10 +0,0 @@
|
||||
name: 'Setup Windows'
|
||||
description: 'Setup Windows'
|
||||
runs:
|
||||
using: "composite"
|
||||
steps:
|
||||
- name: Install Windows dependencies
|
||||
shell: bash
|
||||
run: |
|
||||
choco install -y ninja
|
||||
choco install -y dokany --version 1.3.0.1000 --installargs INSTALLDEVFILES=1
|
604
.github/workflows/main.yaml
vendored
604
.github/workflows/main.yaml
vendored
@ -1,604 +0,0 @@
|
||||
name: CI
|
||||
on: ['push', 'pull_request']
|
||||
|
||||
jobs:
|
||||
linux_macos:
|
||||
name: CI (Linux/macOS)
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
name: [""]
|
||||
os:
|
||||
- macos-10.15
|
||||
- ubuntu-18.04
|
||||
- ubuntu-20.04
|
||||
compiler:
|
||||
- cxx: g++-7
|
||||
cc: gcc-7
|
||||
macos_cxx: g++-7
|
||||
macos_cc: gcc-7
|
||||
homebrew_package: gcc@7
|
||||
apt_package: g++-7
|
||||
- cxx: g++-8
|
||||
cc: gcc-8
|
||||
macos_cxx: g++-8
|
||||
macos_cc: gcc-8
|
||||
homebrew_package: gcc@8
|
||||
apt_package: g++-8
|
||||
- cxx: g++-9
|
||||
cc: gcc-9
|
||||
macos_cxx: g++-9
|
||||
macos_cc: gcc-9
|
||||
apt_package: g++-9
|
||||
homebrew_package: gcc@9
|
||||
# TODO gcc 10 doesn't work, potentially because cmake doesn't know yet that the STL now depends on pthread. See https://github.com/pothosware/SoapySDRPlay3/issues/5
|
||||
# - cxx: g++-10
|
||||
# cc: gcc-10
|
||||
# macos_cxx: g++-10
|
||||
# macos_cc: gcc-10
|
||||
# apt_package: g++-10
|
||||
# homebrew_package: gcc@10
|
||||
- cxx: clang++-7
|
||||
cc: clang-7
|
||||
macos_cxx: /usr/local/opt/llvm@7/bin/clang++
|
||||
macos_cc: /usr/local/opt/llvm@7/bin/clang
|
||||
apt_package: clang-7
|
||||
homebrew_package: llvm@7
|
||||
- cxx: clang++-8
|
||||
cc: clang-8
|
||||
macos_cxx: /usr/local/opt/llvm@8/bin/clang++
|
||||
macos_cc: /usr/local/opt/llvm@8/bin/clang
|
||||
apt_package: clang-8
|
||||
homebrew_package: llvm@8
|
||||
- cxx: clang++-9
|
||||
cc: clang-9
|
||||
macos_cxx: /usr/local/opt/llvm@9/bin/clang++
|
||||
macos_cc: /usr/local/opt/llvm@9/bin/clang
|
||||
apt_package: clang-9
|
||||
homebrew_package: llvm@9
|
||||
# TODO Clang-10 on linux? macos homebrew doesn't seem to have it
|
||||
- cxx: clang++-11
|
||||
cc: clang-11
|
||||
macos_cxx: /usr/local/opt/llvm@11/bin/clang++
|
||||
macos_cc: /usr/local/opt/llvm@11/bin/clang
|
||||
apt_package: clang-11
|
||||
homebrew_package: llvm@11
|
||||
# Apple Clang
|
||||
# - cxx: clang++
|
||||
# cc: clang
|
||||
# homebrew_package: ""
|
||||
build_type:
|
||||
- Debug
|
||||
- Release
|
||||
- RelWithDebInfo
|
||||
extra_cmake_flags: [""]
|
||||
extra_cxxflags: [""]
|
||||
extra_env_vars_for_test: [""]
|
||||
install_dependencies_manually: [false]
|
||||
run_build: [true]
|
||||
run_tests: [true]
|
||||
run_clang_tidy: [false]
|
||||
include:
|
||||
- name: Local dependencies
|
||||
os: ubuntu-18.04
|
||||
compiler:
|
||||
cxx: clang++-11
|
||||
cc: clang-11
|
||||
apt_package: clang-11
|
||||
build_type: RelWithDebInfo
|
||||
extra_cmake_flags: -DDEPENDENCY_CONFIG=../cmake-utils/DependenciesFromLocalSystem.cmake
|
||||
extra_cxxflags: ""
|
||||
extra_env_vars_for_test: ""
|
||||
install_dependencies_manually: true
|
||||
run_build: true
|
||||
run_tests: true
|
||||
- name: Local dependencies
|
||||
os: ubuntu-20.04
|
||||
compiler:
|
||||
cxx: clang++-11
|
||||
cc: clang-11
|
||||
apt_package: clang-11
|
||||
build_type: RelWithDebInfo
|
||||
extra_cmake_flags: -DDEPENDENCY_CONFIG=../cmake-utils/DependenciesFromLocalSystem.cmake
|
||||
extra_cxxflags: ""
|
||||
extra_env_vars_for_test: ""
|
||||
install_dependencies_manually: true
|
||||
run_build: true
|
||||
run_tests: true
|
||||
- name: Werror gcc
|
||||
os: ubuntu-20.04
|
||||
compiler:
|
||||
cxx: g++-9
|
||||
cc: gcc-9
|
||||
apt_package: g++-9
|
||||
build_type: RelWithDebInfo
|
||||
extra_cmake_flags: -DUSE_WERROR=on
|
||||
extra_cxxflags: ""
|
||||
install_dependencies_manually: false
|
||||
run_build: true
|
||||
run_tests: false
|
||||
- name: Werror clang
|
||||
os: ubuntu-20.04
|
||||
compiler:
|
||||
cxx: clang++-11
|
||||
cc: clang-11
|
||||
apt_package: clang++-11
|
||||
build_type: RelWithDebInfo
|
||||
extra_cmake_flags: -DUSE_WERROR=on
|
||||
extra_cxxflags: ""
|
||||
install_dependencies_manually: false
|
||||
run_build: true
|
||||
run_tests: false
|
||||
- name: No compatibility
|
||||
os: ubuntu-20.04
|
||||
compiler:
|
||||
cxx: clang++-11
|
||||
cc: clang-11
|
||||
apt_package: clang++-11
|
||||
build_type: RelWithDebInfo
|
||||
extra_cmake_flags: ""
|
||||
extra_cxxflags: "-DCRYFS_NO_COMPATIBILITY"
|
||||
extra_env_vars_for_test: ""
|
||||
install_dependencies_manually: false
|
||||
run_build: true
|
||||
run_tests: true
|
||||
- name: ASAN
|
||||
os: ubuntu-20.04
|
||||
compiler:
|
||||
cxx: clang++-11
|
||||
cc: clang-11
|
||||
apt_package: clang++-11
|
||||
build_type: Debug
|
||||
# OpenMP crashes under asan. Disable OpenMP.
|
||||
# TODO is it enough to replace this with omp_num_threads: 1 ?
|
||||
extra_cmake_flags: "-DDISABLE_OPENMP=ON"
|
||||
extra_cxxflags: "-O1 -fsanitize=address -fno-omit-frame-pointer -fno-optimize-sibling-calls -fno-common -fsanitize-address-use-after-scope"
|
||||
extra_env_vars_for_test: ASAN_OPTIONS="detect_leaks=1 check_initialization_order=1 detect_stack_use_after_return=1 detect_invalid_pointer_pairs=1 atexit=1"
|
||||
install_dependencies_manually: false
|
||||
run_build: true
|
||||
run_tests: true
|
||||
- name: UBSAN
|
||||
os: ubuntu-20.04
|
||||
compiler:
|
||||
cxx: clang++-11
|
||||
cc: clang-11
|
||||
apt_package: clang++-11
|
||||
build_type: Debug
|
||||
# OpenMP crashes under ubsan. Disable OpenMP.
|
||||
# TODO is it enough to replace this with omp_num_threads: 1 ?
|
||||
extra_cmake_flags: "-DDISABLE_OPENMP=ON"
|
||||
extra_cxxflags: "-O1 -fno-sanitize-recover=undefined,nullability,implicit-conversion,unsigned-integer-overflow,local-bounds,float-divide-by-zero -fno-omit-frame-pointer -fno-optimize-sibling-calls -fno-common"
|
||||
extra_env_vars_for_test: UBSAN_OPTIONS="print_stacktrace=1"
|
||||
install_dependencies_manually: false
|
||||
run_build: true
|
||||
run_tests: true
|
||||
- name: TSAN
|
||||
os: ubuntu-20.04
|
||||
compiler:
|
||||
cxx: clang++-11
|
||||
cc: clang-11
|
||||
apt_package: clang++-11
|
||||
build_type: Debug
|
||||
extra_cmake_flags: ""
|
||||
extra_cxxflags: "-O2 -fsanitize=thread -fno-omit-frame-pointer -fno-omit-frame-pointer -fno-optimize-sibling-calls -fno-common"
|
||||
install_dependencies_manually: false
|
||||
run_build: true
|
||||
run_tests: true
|
||||
gtest_args: "--gtest_filter=-LoggingTest.LoggingAlsoWorksAfterFork:AssertTest_*:BacktraceTest.*:SubprocessTest.*:SignalCatcherTest.*_thenDies:SignalHandlerTest.*_thenDies:SignalHandlerTest.givenMultipleSigIntHandlers_whenRaising_thenCatchesCorrectSignal:CliTest_Setup.*:CliTest_IntegrityCheck.*:*/CliTest_WrongEnvironment.*:CliTest_Unmount.*:CliTest.WorksWithCommasInBasedir"
|
||||
extra_env_vars_for_test: OMP_NUM_THREADS=1
|
||||
- name: clang-tidy
|
||||
os: ubuntu-20.04
|
||||
compiler:
|
||||
cxx: clang++-11
|
||||
cc: clang-11
|
||||
apt_package: clang++-11 clang-tidy-11
|
||||
build_type: RelWithDebInfo
|
||||
extra_cmake_flags: ""
|
||||
extra_cxxflags: ""
|
||||
install_dependencies_manually: false
|
||||
run_build: false
|
||||
run_tests: false
|
||||
extra_env_vars_for_test: ""
|
||||
run_clang_tidy: true
|
||||
runs-on: ${{matrix.os}}
|
||||
env:
|
||||
# Setting conan cache dir to a location where our Github Cache Action can find it
|
||||
CONAN_USER_HOME: "${{ github.workspace }}/conan-cache/"
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v1
|
||||
#TODO Ideally, all the setup actions would be in their own subaction, but Github doesn't support using third party actions (e.g. cache) from nested actions yet, see https://github.com/actions/runner/issues/862
|
||||
- name: Setup MacOS
|
||||
if: ${{ runner.os == 'macOS' }}
|
||||
uses: ./.github/workflows/actions/setup_macos
|
||||
with:
|
||||
extra_homebrew_packages: ${{ matrix.compiler.homebrew_package }}
|
||||
- name: Setup Linux
|
||||
if: ${{ runner.os == 'Linux' }}
|
||||
uses: ./.github/workflows/actions/setup_linux
|
||||
with:
|
||||
os: ${{ matrix.os }}
|
||||
extra_apt_packages: ${{ matrix.compiler.apt_package }}
|
||||
- name: Install local dependencies
|
||||
if: ${{ matrix.install_dependencies_manually }}
|
||||
uses: ./.github/workflows/actions/install_local_dependencies
|
||||
- name: Find pip cache location
|
||||
id: pip_cache_dir
|
||||
run: |
|
||||
# We need at least pip 20.1 to get the "pip cache dir" command. Ubuntu doesn't have pip 20.1 by default yet, let's upgrade it
|
||||
python3 -m pip install -U pip
|
||||
python3 -m pip --version
|
||||
echo "::set-output name=pip_cache_dir::$(python3 -m pip cache dir)"
|
||||
shell: bash
|
||||
- name: Retrieve pip cache
|
||||
# Many jobs access the cache in parallel an we might observe an incomplete state that isn't valid. This would fail with a checksum error. Let's not fail the CI job but continue it, later on this job will upload a new new cache as part of the regular job run.
|
||||
continue-on-error: true
|
||||
# We're using an S3 based cache because the standard GitHub Action cache (actions/cache) only gives us 5GB of storage and we need more
|
||||
uses: leroy-merlin-br/action-s3-cache@8d75079437b388688b9ea9c7d73dff4ef975c5fa # v1.0.5
|
||||
with:
|
||||
action: get
|
||||
# note: this access key has read-only access to the cache. It's public so it runs on PRs.
|
||||
aws-access-key-id: AKIAV5S2KH4F5OUZXV5E
|
||||
aws-secret-access-key: qqqE8j/73w2EEJ984rVvxbDzdvnL93hk3X5ba1ac
|
||||
aws-region: eu-west-1
|
||||
bucket: ci-cache.cryfs
|
||||
key: v0-${{ runner.os }}-${{ matrix.os }}-setup-pip
|
||||
- name: Install Conan
|
||||
shell: bash
|
||||
run: |
|
||||
# Using "python3 -m pip" instead of "pip3" to make sure we get the same pip that we queried the cache dir for the Github Cache action
|
||||
if [[ "${{matrix.os}}" == "ubuntu-18.04" ]]; then
|
||||
python3 -m pip install setuptools
|
||||
fi
|
||||
python3 -m pip install conan
|
||||
- name: Save pip cache
|
||||
# note: this access key has write access to the cache. This can't run on PRs.
|
||||
if: ${{github.event_name == 'push' }}
|
||||
# Cache things sometimes indeterministically fail (roughly 1% of times this is run), let's not fail the job for it
|
||||
continue-on-error: true
|
||||
uses: leroy-merlin-br/action-s3-cache@8d75079437b388688b9ea9c7d73dff4ef975c5fa # v1.0.5
|
||||
with:
|
||||
action: put
|
||||
aws-access-key-id: ${{ secrets.CACHE_AWS_ACCESS_KEY_ID }}
|
||||
aws-secret-access-key: ${{ secrets.CACHE_AWS_SECRET_ACCESS_KEY }}
|
||||
aws-region: eu-west-1
|
||||
bucket: ci-cache.cryfs
|
||||
key: v0-${{ runner.os }}-${{ matrix.os }}-setup-pip
|
||||
artifacts: ${{ steps.pip_cache_dir.outputs.pip_cache_dir }}
|
||||
#TODO Ideally, the Setup ccache step would be part of the build action, but Github doesn't support nested actions yet, see https://github.com/actions/runner/issues/862
|
||||
- name: Configure ccache
|
||||
shell: bash
|
||||
run: |
|
||||
set -v
|
||||
ccache --set-config=compiler_check=content
|
||||
ccache --set-config=max_size=500M
|
||||
ccache --set-config=cache_dir=${{github.workspace}}/.ccache
|
||||
ccache --set-config=compression=true
|
||||
ccache --set-config=sloppiness=include_file_mtime,include_file_ctime
|
||||
echo CCache config:
|
||||
ccache -p
|
||||
echo Clearing ccache statistics
|
||||
ccache -z
|
||||
- name: Hash flags
|
||||
id: hash_flags
|
||||
run: |
|
||||
# Write it into file first so we fail if the command fails. Errors inside $() are ignored by bash unfortunately.
|
||||
echo __${{matrix.extra_cmake_flags}}__${{matrix.extra_cxxflags}}__ | md5sum > /tmp/hash_flags
|
||||
echo "::set-output name=hash_flags::$(cat /tmp/hash_flags)"
|
||||
rm /tmp/hash_flags
|
||||
shell: bash
|
||||
- name: Retrieve ccache cache
|
||||
# Many jobs access the cache in parallel an we might observe an incomplete state that isn't valid. This would fail with a checksum error. Let's not fail the CI job but continue it, later on this job will upload a new new cache as part of the regular job run.
|
||||
continue-on-error: true
|
||||
# We're using an S3 based cache because the standard GitHub Action cache (actions/cache) only gives us 5GB of storage and we need more
|
||||
uses: leroy-merlin-br/action-s3-cache@8d75079437b388688b9ea9c7d73dff4ef975c5fa # v1.0.5
|
||||
with:
|
||||
action: get
|
||||
# note: this access key has read-only access to the cache. It's public so it runs on PRs.
|
||||
aws-access-key-id: AKIAV5S2KH4F5OUZXV5E
|
||||
aws-secret-access-key: qqqE8j/73w2EEJ984rVvxbDzdvnL93hk3X5ba1ac
|
||||
aws-region: eu-west-1
|
||||
bucket: ci-cache.cryfs
|
||||
key: v0-${{ runner.os }}-${{ matrix.os }}-ccache__${{matrix.compiler.cxx}}__${{matrix.compiler.cc}}__${{matrix.build_type}}__${{matrix.run_build}}__${{matrix.run_clang_tidy}}__${{steps.hash_flags.outputs.hash_flags}}__
|
||||
- name: Show ccache statistics
|
||||
shell: bash
|
||||
run: |
|
||||
set -v
|
||||
ccache -s
|
||||
# TODO Ideally, the Setup conan cache step would be part of the build action, but Github doesn't support nested actions yet, see https://github.com/actions/runner/issues/862
|
||||
- name: Retrieve conan cache
|
||||
# Many jobs access the cache in parallel an we might observe an incomplete state that isn't valid. This would fail with a checksum error. Let's not fail the CI job but continue it, later on this job will upload a new new cache as part of the regular job run.
|
||||
continue-on-error: true
|
||||
# We're using an S3 based cache because the standard GitHub Action cache (actions/cache) only gives us 5GB of storage and we need more
|
||||
uses: leroy-merlin-br/action-s3-cache@8d75079437b388688b9ea9c7d73dff4ef975c5fa # v1.0.5
|
||||
with:
|
||||
action: get
|
||||
# note: this access key has read-only access to the cache. It's public so it runs on PRs.
|
||||
aws-access-key-id: AKIAV5S2KH4F5OUZXV5E
|
||||
aws-secret-access-key: qqqE8j/73w2EEJ984rVvxbDzdvnL93hk3X5ba1ac
|
||||
aws-region: eu-west-1
|
||||
bucket: ci-cache.cryfs
|
||||
key: v1-${{ runner.os }}-${{ matrix.os }}-conancache__${{matrix.compiler.cxx}}__${{matrix.compiler.cc}}__${{matrix.build_type}}__
|
||||
- name: Build (macOS)
|
||||
if: ${{ matrix.run_build && runner.os == 'macOS' }}
|
||||
uses: ./.github/workflows/actions/run_build
|
||||
with:
|
||||
cxx: ${{ matrix.compiler.macos_cxx }}
|
||||
cc: ${{ matrix.compiler.macos_cc }}
|
||||
build_type: ${{ matrix.build_type }}
|
||||
- name: Build (Linux)
|
||||
if: ${{ matrix.run_build && runner.os == 'Linux' }}
|
||||
uses: ./.github/workflows/actions/run_build
|
||||
with:
|
||||
cxx: ${{ matrix.compiler.cxx }}
|
||||
cc: ${{ matrix.compiler.cc }}
|
||||
build_type: ${{ matrix.build_type }}
|
||||
extra_cmake_flags: ${{ matrix.extra_cmake_flags }}
|
||||
extra_cxxflags: ${{ matrix.extra_cxxflags }}
|
||||
- name: Run clang-tidy
|
||||
id: clang_tidy
|
||||
if: ${{ matrix.run_clang_tidy }}
|
||||
shell: bash
|
||||
run: |
|
||||
set -v
|
||||
mkdir cmake
|
||||
cd cmake
|
||||
if ! ../run-clang-tidy.sh -fix ; then
|
||||
git diff > /tmp/clang-tidy-fixes
|
||||
echo Found clang tidy fixes:
|
||||
cat /tmp/clang-tidy-fixes
|
||||
exit 1
|
||||
else
|
||||
echo Did not find any clang-tidy fixes
|
||||
fi
|
||||
- name: Upload fixes as artifact
|
||||
if: ${{ always() && matrix.run_clang_tidy }}
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: clang-tidy-fixes
|
||||
path: /tmp/clang-tidy-fixes
|
||||
- name: Show ccache statistics
|
||||
shell: bash
|
||||
run: |
|
||||
set -v
|
||||
ccache -s
|
||||
- name: Reduce ccache size
|
||||
if: ${{ runner.os == 'macOS' }}
|
||||
shell: bash
|
||||
run: |
|
||||
set -v
|
||||
ccache --evict-older-than 7d
|
||||
ccache -s
|
||||
- name: Save ccache cache
|
||||
# note: this access key has write access to the cache. This can't run on PRs.
|
||||
if: ${{ github.event_name == 'push' }}
|
||||
# Cache things sometimes indeterministically fail (roughly 1% of times this is run), let's not fail the job for it
|
||||
continue-on-error: true
|
||||
uses: leroy-merlin-br/action-s3-cache@8d75079437b388688b9ea9c7d73dff4ef975c5fa # v1.0.5
|
||||
with:
|
||||
action: put
|
||||
aws-access-key-id: ${{ secrets.CACHE_AWS_ACCESS_KEY_ID }}
|
||||
aws-secret-access-key: ${{ secrets.CACHE_AWS_SECRET_ACCESS_KEY }}
|
||||
aws-region: eu-west-1
|
||||
bucket: ci-cache.cryfs
|
||||
key: v0-${{ runner.os }}-${{ matrix.os }}-ccache__${{matrix.compiler.cxx}}__${{matrix.compiler.cc}}__${{matrix.build_type}}__${{matrix.run_build}}__${{matrix.run_clang_tidy}}__${{steps.hash_flags.outputs.hash_flags}}__
|
||||
artifacts: ${{ github.workspace }}/.ccache
|
||||
- name: Save conan cache
|
||||
# note: this access key has write access to the cache. This can't run on PRs.
|
||||
if: ${{ github.event_name == 'push' }}
|
||||
# Cache things sometimes indeterministically fail (roughly 1% of times this is run), let's not fail the job for it
|
||||
continue-on-error: true
|
||||
uses: leroy-merlin-br/action-s3-cache@8d75079437b388688b9ea9c7d73dff4ef975c5fa # v1.0.5
|
||||
with:
|
||||
action: put
|
||||
aws-access-key-id: ${{ secrets.CACHE_AWS_ACCESS_KEY_ID }}
|
||||
aws-secret-access-key: ${{ secrets.CACHE_AWS_SECRET_ACCESS_KEY }}
|
||||
aws-region: eu-west-1
|
||||
bucket: ci-cache.cryfs
|
||||
key: v1-${{ runner.os }}-${{ matrix.os }}-conancache__${{matrix.compiler.cxx}}__${{matrix.compiler.cc}}__${{matrix.build_type}}__
|
||||
artifacts: ${{ env.CONAN_USER_HOME }}
|
||||
- name: Test
|
||||
if: ${{ matrix.run_tests }}
|
||||
uses: ./.github/workflows/actions/run_tests
|
||||
with:
|
||||
gtest_args: ${{matrix.gtest_args}}
|
||||
extra_env_vars: ${{matrix.extra_env_vars_for_test}}
|
||||
|
||||
windows:
|
||||
name: CI (Windows)
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
name: [""]
|
||||
os:
|
||||
- windows-2019
|
||||
arch:
|
||||
- Win32
|
||||
- x64
|
||||
build_type:
|
||||
- Debug
|
||||
- Release
|
||||
- RelWithDebInfo
|
||||
runs-on: ${{matrix.os}}
|
||||
env:
|
||||
# Setting conan cache dir to a location where our Github Cache Action can find it
|
||||
CONAN_USER_HOME: "D:/.conan/f/"
|
||||
CONAN_USER_HOME_SHORT: "D:/.conan/s/"
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v1
|
||||
#TODO Ideally, all the setup actions would be in their own subaction, but Github doesn't support using third party actions (e.g. cache) from nested actions yet, see https://github.com/actions/runner/issues/862
|
||||
- name: Setup Windows
|
||||
uses: ./.github/workflows/actions/setup_windows
|
||||
- name: Find pip cache location
|
||||
id: pip_cache_dir
|
||||
run: |
|
||||
# We need at least pip 20.1 to get the "pip cache dir" command. Ubuntu doesn't have pip 20.1 by default yet, let's upgrade it
|
||||
python3 -m pip install -U pip
|
||||
python3 -m pip --version
|
||||
echo "::set-output name=pip_cache_dir::$(python3 -m pip cache dir)"
|
||||
shell: bash
|
||||
- name: Retrieve pip cache
|
||||
# Many jobs access the cache in parallel an we might observe an incomplete state that isn't valid. This would fail with a checksum error. Let's not fail the CI job but continue it, later on this job will upload a new new cache as part of the regular job run.
|
||||
continue-on-error: true
|
||||
# We're using an S3 based cache because the standard GitHub Action cache (actions/cache) only gives us 5GB of storage and we need more
|
||||
uses: leroy-merlin-br/action-s3-cache@8d75079437b388688b9ea9c7d73dff4ef975c5fa # v1.0.5
|
||||
with:
|
||||
action: get
|
||||
# note: this access key has read-only access to the cache. It's public so it runs on PRs.
|
||||
aws-access-key-id: AKIAV5S2KH4F5OUZXV5E
|
||||
aws-secret-access-key: qqqE8j/73w2EEJ984rVvxbDzdvnL93hk3X5ba1ac
|
||||
aws-region: eu-west-1
|
||||
bucket: ci-cache.cryfs
|
||||
key: v0-${{ runner.os }}-${{ matrix.os }}-setup-pip
|
||||
- name: Install Conan
|
||||
shell: bash
|
||||
run: |
|
||||
# Using "python3 -m pip" instead of "pip3" to make sure we get the same pip that we queried the cache dir for the Github Cache action
|
||||
python3 -m pip install conan
|
||||
- name: Save pip cache
|
||||
# note: this access key has write access to the cache. This can't run on PRs.
|
||||
if: ${{github.event_name == 'push' }}
|
||||
# Cache things sometimes indeterministically fail (roughly 1% of times this is run), let's not fail the job for it
|
||||
continue-on-error: true
|
||||
uses: leroy-merlin-br/action-s3-cache@8d75079437b388688b9ea9c7d73dff4ef975c5fa # v1.0.5
|
||||
with:
|
||||
action: put
|
||||
aws-access-key-id: ${{ secrets.CACHE_AWS_ACCESS_KEY_ID }}
|
||||
aws-secret-access-key: ${{ secrets.CACHE_AWS_SECRET_ACCESS_KEY }}
|
||||
aws-region: eu-west-1
|
||||
bucket: ci-cache.cryfs
|
||||
key: v0-${{ runner.os }}-${{ matrix.os }}-setup-pip
|
||||
artifacts: ${{ steps.pip_cache_dir.outputs.pip_cache_dir }}
|
||||
#TODO Ideally, the Setup ccache step would be part of the build action, but Github doesn't support nested actions yet, see https://github.com/actions/runner/issues/862
|
||||
# - name: Configure ccache
|
||||
# shell: bash
|
||||
# run: |
|
||||
# set -v
|
||||
# ccache --set-config=compiler_check=content
|
||||
# ccache --set-config=max_size=500M
|
||||
# ccache --set-config=cache_dir=${{github.workspace}}/.ccache
|
||||
# ccache --set-config=compression=true
|
||||
# ccache --set-config=sloppiness=include_file_mtime,include_file_ctime
|
||||
# echo CCache config:
|
||||
# ccache -p
|
||||
# echo Clearing ccache statistics
|
||||
# ccache -z
|
||||
# - name: Hash flags
|
||||
# id: hash_flags
|
||||
# run: |
|
||||
# # Write it into file first so we fail if the command fails. Errors inside $() are ignored by bash unfortunately.
|
||||
# echo __${{matrix.extra_cmake_flags}}__${{matrix.extra_cxxflags}}__ | md5sum > /tmp/hash_flags
|
||||
# echo "::set-output name=hash_flags::$(cat /tmp/hash_flags)"
|
||||
# rm /tmp/hash_flags
|
||||
# shell: bash
|
||||
# - name: Retrieve ccache cache
|
||||
# # Many jobs access the cache in parallel an we might observe an incomplete state that isn't valid. This would fail with a checksum error. Let's not fail the CI job but continue it, later on this job will upload a new new cache as part of the regular job run.
|
||||
# continue-on-error: true
|
||||
# # We're using an S3 based cache because the standard GitHub Action cache (actions/cache) only gives us 5GB of storage and we need more
|
||||
# uses: leroy-merlin-br/action-s3-cache@8d75079437b388688b9ea9c7d73dff4ef975c5fa # v1.0.5
|
||||
# with:
|
||||
# action: get
|
||||
# # note: this access key has read-only access to the cache. It's public so it runs on PRs.
|
||||
# aws-access-key-id: AKIAV5S2KH4F5OUZXV5E
|
||||
# aws-secret-access-key: qqqE8j/73w2EEJ984rVvxbDzdvnL93hk3X5ba1ac
|
||||
# aws-region: eu-west-1
|
||||
# bucket: ci-cache.cryfs
|
||||
# key: v0-${{ runner.os }}-${{ matrix.os }}-ccache__${{matrix.compiler.cxx}}__${{matrix.compiler.cc}}__${{matrix.build_type}}__${{matrix.run_build}}__${{matrix.run_clang_tidy}}__${{steps.hash_flags.outputs.hash_flags}}__
|
||||
# - name: Show ccache statistics
|
||||
# shell: bash
|
||||
# run: |
|
||||
# set -v
|
||||
# ccache -s
|
||||
# TODO Ideally, the Setup conan cache step would be part of the build action, but Github doesn't support nested actions yet, see https://github.com/actions/runner/issues/862
|
||||
- name: Retrieve conan cache
|
||||
# Many jobs access the cache in parallel an we might observe an incomplete state that isn't valid. This would fail with a checksum error. Let's not fail the CI job but continue it, later on this job will upload a new new cache as part of the regular job run.
|
||||
continue-on-error: true
|
||||
# We're using an S3 based cache because the standard GitHub Action cache (actions/cache) only gives us 5GB of storage and we need more
|
||||
uses: leroy-merlin-br/action-s3-cache@8d75079437b388688b9ea9c7d73dff4ef975c5fa # v1.0.5
|
||||
with:
|
||||
action: get
|
||||
# note: this access key has read-only access to the cache. It's public so it runs on PRs.
|
||||
aws-access-key-id: AKIAV5S2KH4F5OUZXV5E
|
||||
aws-secret-access-key: qqqE8j/73w2EEJ984rVvxbDzdvnL93hk3X5ba1ac
|
||||
aws-region: eu-west-1
|
||||
bucket: ci-cache.cryfs
|
||||
key: v1-${{ runner.os }}-${{ matrix.os }}-conancache__${{matrix.compiler.cxx}}__${{matrix.compiler.cc}}__${{matrix.build_type}}__
|
||||
- name: Build
|
||||
shell: bash
|
||||
run: |
|
||||
set -v
|
||||
# note: The cmake+ninja workflow requires us to set build type in both cmake commands ('cmake' and 'cmake --build'), otherwise the cryfs.exe will depend on debug versions of the visual studio c++ runtime (i.e. msvcp140d.dll)
|
||||
# note: The CMAKE_SYSTEM_VERSION variable is set to 10.0.18362.0 because as of this writing, appveyor uses 10.0.17763.0 and that has a bug, see https://developercommunity.visualstudio.com/content/problem/343296/sdk-and-experimentalpreprocessor.html
|
||||
# TODO CMAKE_SYSTEM_VERSION is probably not needed anymore
|
||||
mkdir build
|
||||
cd build
|
||||
cmake .. -G "Visual Studio 16 2019" -DCMAKE_BUILD_TYPE=${{matrix.build_type}} -DBUILD_TESTING=on -DDOKAN_PATH="C:/Program Files/Dokan/DokanLibrary-1.3.0" -A ${{matrix.arch}} -DCMAKE_SYSTEM_VERSION="10.0.18362.0"
|
||||
cmake --build . --config ${{matrix.build_type}}
|
||||
# - name: Show ccache statistics
|
||||
# shell: bash
|
||||
# run: |
|
||||
# set -v
|
||||
# ccache -s
|
||||
# - name: Reduce ccache size
|
||||
# if: ${{ runner.os == 'macOS' }}
|
||||
# shell: bash
|
||||
# run: |
|
||||
# set -v
|
||||
# ccache --evict-older-than 7d
|
||||
# ccache -s
|
||||
# - name: Save ccache cache
|
||||
# # note: this access key has write access to the cache. This can't run on PRs.
|
||||
# if: ${{ github.event_name == 'push' }}
|
||||
# # Cache things sometimes indeterministically fail (roughly 1% of times this is run), let's not fail the job for it
|
||||
# continue-on-error: true
|
||||
# uses: leroy-merlin-br/action-s3-cache@8d75079437b388688b9ea9c7d73dff4ef975c5fa # v1.0.5
|
||||
# with:
|
||||
# action: put
|
||||
# aws-access-key-id: ${{ secrets.CACHE_AWS_ACCESS_KEY_ID }}
|
||||
# aws-secret-access-key: ${{ secrets.CACHE_AWS_SECRET_ACCESS_KEY }}
|
||||
# aws-region: eu-west-1
|
||||
# bucket: ci-cache.cryfs
|
||||
# key: v0-${{ runner.os }}-${{ matrix.os }}-ccache__${{matrix.compiler.cxx}}__${{matrix.compiler.cc}}__${{matrix.build_type}}__${{matrix.run_build}}__${{matrix.run_clang_tidy}}__${{steps.hash_flags.outputs.hash_flags}}__
|
||||
# artifacts: ${{ github.workspace }}/.ccache
|
||||
- name: Save conan cache
|
||||
# note: this access key has write access to the cache. This can't run on PRs.
|
||||
if: ${{ github.event_name == 'push' }}
|
||||
# Cache things sometimes indeterministically fail (roughly 1% of times this is run), let's not fail the job for it
|
||||
continue-on-error: true
|
||||
uses: leroy-merlin-br/action-s3-cache@8d75079437b388688b9ea9c7d73dff4ef975c5fa # v1.0.5
|
||||
with:
|
||||
action: put
|
||||
aws-access-key-id: ${{ secrets.CACHE_AWS_ACCESS_KEY_ID }}
|
||||
aws-secret-access-key: ${{ secrets.CACHE_AWS_SECRET_ACCESS_KEY }}
|
||||
aws-region: eu-west-1
|
||||
bucket: ci-cache.cryfs
|
||||
key: v1-${{ runner.os }}-${{ matrix.os }}-conancache__${{matrix.compiler.cxx}}__${{matrix.compiler.cc}}__${{matrix.build_type}}__
|
||||
artifacts: |
|
||||
${{ env.CONAN_USER_HOME }}
|
||||
${{ env.CONAN_USER_HOME_SHORT }}
|
||||
- name: Test
|
||||
shell: bash
|
||||
run: |
|
||||
set -v
|
||||
cd build
|
||||
./test/gitversion/${{matrix.build_type}}/gitversion-test.exe
|
||||
./test/cpp-utils/${{matrix.build_type}}/cpp-utils-test.exe
|
||||
# ./test/fspp/${{matrix.build_type}}/fspp-test.exe
|
||||
./test/parallelaccessstore/${{matrix.build_type}}/parallelaccessstore-test.exe
|
||||
./test/blockstore/${{matrix.build_type}}/blockstore-test.exe
|
||||
./test/blobstore/${{matrix.build_type}}/blobstore-test.exe
|
||||
./test/cryfs/${{matrix.build_type}}/cryfs-test.exe
|
||||
# TODO Enable cryfs-cli-test on Windows
|
||||
# ./test/cryfs-cli/${{matrix.build_type}}/cryfs-cli-test.exe
|
||||
- name: CPack
|
||||
shell: bash
|
||||
run: |
|
||||
set -v
|
||||
cd build
|
||||
cpack -C ${{matrix.build_type}} --verbose -G WIX
|
||||
- name: Upload installers as artifact
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: cryfs-${{matrix.arch}}-${{matrix.build_type}}.msi
|
||||
path: build/cryfs-*.msi
|
6
.gitmodules
vendored
Normal file
6
.gitmodules
vendored
Normal file
@ -0,0 +1,6 @@
|
||||
[submodule "vendor/boost/Boost-for-Android"]
|
||||
path = vendor/boost/Boost-for-Android
|
||||
url = https://github.com/moritz-wundke/Boost-for-Android.git
|
||||
[submodule "vendor/spdlog"]
|
||||
path = vendor/spdlog
|
||||
url = https://github.com/gabime/spdlog.git
|
@ -17,7 +17,7 @@ require_clang_version(7.0)
|
||||
|
||||
# Default value is not to build test cases
|
||||
option(BUILD_TESTING "build test cases" OFF)
|
||||
option(CRYFS_UPDATE_CHECKS "let cryfs check for updates and security vulnerabilities" ON)
|
||||
option(CRYFS_UPDATE_CHECKS "let cryfs check for updates and security vulnerabilities" OFF)
|
||||
option(DISABLE_OPENMP "allow building without OpenMP libraries. This will cause performance degradations." OFF)
|
||||
|
||||
# The following options are helpful for development and/or CI
|
||||
@ -26,8 +26,6 @@ option(USE_CLANG_TIDY "build with clang-tidy checks enabled" OFF)
|
||||
option(USE_IWYU "build with iwyu checks enabled" OFF)
|
||||
option(CLANG_TIDY_WARNINGS_AS_ERRORS "treat clang-tidy warnings as errors" OFF)
|
||||
|
||||
set(DEPENDENCY_CONFIG "cmake-utils/DependenciesFromConan.cmake" CACHE FILEPATH "cmake configuration file defining how to get dependencies")
|
||||
|
||||
if (MSVC)
|
||||
option(DOKAN_PATH "Location of the Dokan library, e.g. C:\\Program Files\\Dokan\\DokanLibrary-1.1.0" "")
|
||||
endif()
|
||||
@ -44,10 +42,5 @@ if(MSVC)
|
||||
add_definitions(/bigobj)
|
||||
endif()
|
||||
|
||||
include(${DEPENDENCY_CONFIG})
|
||||
|
||||
add_subdirectory(vendor EXCLUDE_FROM_ALL)
|
||||
add_subdirectory(src)
|
||||
add_subdirectory(doc)
|
||||
add_subdirectory(test)
|
||||
add_subdirectory(cpack)
|
||||
|
249
README.md
249
README.md
@ -1,230 +1,19 @@
|
||||
# CryFS
|
||||
|
||||
CryFS encrypts your files, so you can safely store them anywhere. It works well together with cloud services like Dropbox, iCloud, OneDrive and others.
|
||||
See [https://www.cryfs.org](https://www.cryfs.org).
|
||||
|
||||
Install latest release
|
||||
======================
|
||||
|
||||
Linux
|
||||
------
|
||||
|
||||
CryFS is available through apt, but depending on which version of Ubuntu or Debian you're using, you may get an old version.
|
||||
|
||||
sudo apt install cryfs
|
||||
|
||||
The following should work on Arch and Arch-based distros:
|
||||
|
||||
sudo pacman -S cryfs
|
||||
|
||||
If you use homebrew-core, using the following instruction you should be able to install CrysFS:
|
||||
|
||||
brew install cryfs/tap/cryfs
|
||||
|
||||
Additionally, the following would work for any Linux distro with the Nix package manager:
|
||||
|
||||
nix-env -iA nixpkgs.cryfs
|
||||
|
||||
OSX
|
||||
----
|
||||
|
||||
CryFS is distributed via Homebrew, MacPorts, and Nix.
|
||||
|
||||
If you use Homebrew:
|
||||
|
||||
brew install --cask macfuse
|
||||
brew install cryfs/tap/cryfs
|
||||
|
||||
If you use MacPorts:
|
||||
|
||||
port install cryfs
|
||||
|
||||
For Nix, the macOS build for cryfs is available in the Nixpkgs channel 21.05
|
||||
and later:
|
||||
|
||||
brew install --cask macfuse # or download from https://osxfuse.github.io/
|
||||
nix-env -iA nixpkgs.cryfs
|
||||
|
||||
Windows (experimental)
|
||||
----------------------
|
||||
|
||||
CryFS has experimental Windows support since the 0.10 release series. To install it, do:
|
||||
|
||||
1. Install [DokanY](https://github.com/dokan-dev/dokany/releases)
|
||||
2. Install [Microsoft Visual C++ Redistributable for Visual Studio 2019](https://support.microsoft.com/en-us/help/2977003/the-latest-supported-visual-c-downloads)
|
||||
3. Install [CryFS](https://www.cryfs.org/#download)
|
||||
|
||||
GUI
|
||||
===
|
||||
There are some GUI applications with CryFS support. You usually have to install the GUI **and** also CryFS itself for it to work.
|
||||
- [SiriKali](https://mhogomchungu.github.io/sirikali/)
|
||||
- [Plasma Vault](https://www.kde.org/announcements/plasma-5.11.0.php) in KDE Plasma >= 5.11
|
||||
|
||||
Stability / Production readiness
|
||||
====================
|
||||
CryFS 0.10 or later is stable for most use cases, but has a couple of known issues that can corrupt your file system.
|
||||
They don't happen in normal day to day use, but can happen if you don't pay attention or aren't aware of them.
|
||||
This is why the version number hasn't reached 1.0 yet.
|
||||
|
||||
- If you kill the CryFS process while it was in the middle of writing data (either intentionally or unintentionally by losing power to your PC), your file system could get corrupted.
|
||||
CryFS does not do journaling. Note that in 0.10.x, read accesses into a CryFS file system can cause writes because file timestamps get updated. So if you're unlucky, your file system
|
||||
could get corrupted if you lose power while you were reading files as well. Read accesses aren't an issue in CryFS 0.11.x anymore, because it mounts the filesystem with `noatime` by default.
|
||||
- The same corruption mentioned above can happen when CryFS is trying to write data but your disk ran out of space, causing the write to fail.
|
||||
- CryFS does not currently support concurrent access, i.e. accessing a file system from multiple devices at the same time.
|
||||
CryFS works very well for storing data in a cloud and using it from multiple devices, but you need to make sure that only one CryFS process is active at any point in time, and you also need
|
||||
to make sure that the cloud synchronization client (e.g. Dropbox) finishes its synchronization before you switch devices. There are some ideas on how concurrent access could be supported in
|
||||
future versions, but it's a hard problem to solve. If you do happen to access the file system from multiple devices at the same time, it will likely go well most of the time, but it can corrupt your file system.
|
||||
- In addition to the scenarios above that can corrupt your file system, note that there is currently no fsck-like tool for CryFS that could recover your data. Although such a tool is in theory, possible,
|
||||
it hasn't been implemented yet and a corrupted file system will most likely cause a loss of your data.
|
||||
|
||||
If the scenarios mentioned above don't apply to you, then you can consider CryFS 0.10.x as stable. The 0.9.x versions are not recommended anymore.
|
||||
|
||||
Building from source
|
||||
====================
|
||||
|
||||
Requirements
|
||||
------------
|
||||
- Git (for getting the source code)
|
||||
- GCC version >= 7 or Clang >= 7
|
||||
- CMake version >= 3.10
|
||||
- pkg-config (on Unix)
|
||||
- Conan package manager
|
||||
- libcurl4 (including development headers)
|
||||
- SSL development libraries (including development headers, e.g. libssl-dev)
|
||||
- libFUSE version >= 2.8.6 (including development headers), on Mac OS X instead install macFUSE from https://osxfuse.github.io/
|
||||
- Python >= 3.5
|
||||
- OpenMP
|
||||
|
||||
You can use the following commands to install these requirements
|
||||
|
||||
# Ubuntu
|
||||
$ sudo apt install git g++ cmake make pkg-config libcurl4-openssl-dev libssl-dev libfuse-dev python python3-pip
|
||||
$ sudo pip3 install conan
|
||||
|
||||
# Fedora
|
||||
$ sudo dnf install git gcc-c++ cmake make pkgconf libcurl-devel openssl-devel fuse-devel python python3-pip
|
||||
$ sudo pip3 install conan
|
||||
|
||||
# Macintosh
|
||||
$ brew install cmake pkg-config openssl libomp macfuse
|
||||
$ sudo pip3 install conan
|
||||
|
||||
Build & Install
|
||||
---------------
|
||||
|
||||
1. Clone repository
|
||||
|
||||
$ git clone https://github.com/cryfs/cryfs.git cryfs
|
||||
$ cd cryfs
|
||||
|
||||
2. Build
|
||||
|
||||
$ mkdir build && cd build
|
||||
$ cmake ..
|
||||
$ make
|
||||
|
||||
3. Install
|
||||
|
||||
$ sudo make install
|
||||
|
||||
You can pass the following variables to the *cmake* command (using *-Dvariablename=value*):
|
||||
- **-DCMAKE_BUILD_TYPE**=[Release|Debug]: Whether to run code optimization or add debug symbols. Default: Release
|
||||
- **-DBUILD_TESTING**=[on|off]: Whether to build the test cases (can take a long time). Default: off
|
||||
- **-DCRYFS_UPDATE_CHECKS**=off: Build a CryFS that doesn't check online for updates and security vulnerabilities.
|
||||
|
||||
Building on Windows (experimental)
|
||||
----------------------------------
|
||||
|
||||
1. Install conan. If you want to use "pip install conan", you may have to install Python first.
|
||||
2. Install DokanY 1.3.0.1000. Other versions may not work.
|
||||
3. Run CMake to generate Visual Studio 2019 project files (this may not be necessary, but it makes sure everything works as expected and you can see potential errors happening during this step)
|
||||
|
||||
$ mkdir build && cd build
|
||||
$ cmake .. -G "Visual Studio 16 2019" -DDOKAN_PATH=[dokan library location, e.g. "C:\Program Files\Dokan\DokanLibrary-1.3.0"]
|
||||
|
||||
4. Potentially modify CMakeSettings.json file to fit your needs
|
||||
5. Open the cryfs source folder with Visual Studio 2019, or alternatively build on command line using
|
||||
|
||||
$ cd build && cmake --build . --config RelWithDebInfo
|
||||
|
||||
Troubleshooting
|
||||
---------------
|
||||
|
||||
On most systems, CMake should find the libraries automatically. However, that doesn't always work.
|
||||
|
||||
1. **Fuse library not found**
|
||||
|
||||
Pass in the library path with
|
||||
|
||||
PKG_CONFIG_PATH=/path-to-fuse-or-macFUSE/lib/pkgconfig cmake ..
|
||||
|
||||
2. **Fuse headers not found**
|
||||
|
||||
Pass in the include path with
|
||||
|
||||
PKG_CONFIG_PATH=/path-to-fuse-or-macFUSE/lib/pkgconfig cmake ..
|
||||
|
||||
3. **Openssl headers not found**
|
||||
|
||||
Pass in the include path with
|
||||
|
||||
cmake .. -DCMAKE_C_FLAGS="-I/path/to/openssl/include"
|
||||
|
||||
4. **OpenMP not found (osx)**
|
||||
|
||||
Either build it without OpenMP
|
||||
|
||||
cmake .. -DDISABLE_OPENMP=on
|
||||
|
||||
but this will cause slower file system mount times (performance after mounting will be unaffected).
|
||||
If you installed OpenMP with homebrew or macports, it will be autodetected.
|
||||
If that doesn't work for some reason (or you want to use a different installation than the autodetected one),
|
||||
pass in these flags:
|
||||
|
||||
cmake .. -DOpenMP_CXX_FLAGS='-Xpreprocessor -fopenmp -I/path/to/openmp/include' -DOpenMP_CXX_LIB_NAMES=omp -DOpenMP_omp_LIBRARY=/path/to/libomp.dylib
|
||||
|
||||
|
||||
Using local dependencies
|
||||
-------------------------------
|
||||
Starting with CryFS 0.11, Conan is used for dependency management.
|
||||
When you build CryFS, Conan downloads the exact version of each dependency library that was also used for development.
|
||||
All dependencies are linked statically, so there should be no incompatibility with locally installed libraries.
|
||||
This is the recommended way because it has the highest probability of working correctly.
|
||||
|
||||
However, some distributions prefer software packages to be built against dependencies dynamically and against locally installed versions of libraries.
|
||||
So if you're building a package for such a distribution, you have the option of doing that, at the cost of potential incompatibilities.
|
||||
If you follow this workflow, please make sure to extensively test your build of CryFS.
|
||||
You're using a setup that wasn't tested by the CryFS developers.
|
||||
|
||||
To use local dependencies, you need to tell the CryFS build how to get these dependencies.
|
||||
You can do this by writing a small CMake configuration file and passing it to the CryFS build using `-DDEPENDENCY_CONFIG=filename`.
|
||||
This configuration file needs to define a cmake target for each of the dependencies.
|
||||
|
||||
Here's an [example config file](cmake-utils/DependenciesFromConan.cmake) that gets the dependencies from conan.
|
||||
And here's another [example config file](cmake-utils/DependenciesFromLocalSystem.cmake) that works for getting dependencies that are locally installed in Ubuntu.
|
||||
You can create your own configuration file to tell the build how to get its dependencies and, for example, mix and match. Get some dependencies from Conan and others from the local system.
|
||||
|
||||
|
||||
Creating .deb and .rpm packages
|
||||
-------------------------------
|
||||
|
||||
It is recommended to install CryFS using packages, because that allows for an easy way to uninstall it again once you don't need it anymore.
|
||||
|
||||
If you want to create a .rpm package, you need to install rpmbuild.
|
||||
|
||||
1. Clone repository
|
||||
|
||||
$ git clone https://github.com/cryfs/cryfs.git cryfs
|
||||
$ cd cryfs
|
||||
|
||||
2. Build
|
||||
|
||||
$ mkdir cmake && cd cmake
|
||||
$ cmake .. -DCMAKE_BUILD_TYPE=RelWithDebInfo -DBUILD_TESTING=off
|
||||
$ make package
|
||||
|
||||
|
||||
Disclaimer
|
||||
----------------------
|
||||
|
||||
In the event of a password leak, you are strongly advised to create a new filesystem and copy all the data over from the previous one. Then, remove all copies of the compromised filesystem and config file(e.g, from the "previous versions" feature of your cloud system) to prevent access to the key (and, as a result, your data) using the leaked password.
|
||||
libcryfs is a re-desing of the original [CryFS](https://github.com/cryfs/cryfs) code to work as a library. Volumes are not mounted with [FUSE](https://www.kernel.org/doc/html/latest/filesystems/fuse.html) but rather opened in memory and accessed through API calls. What the purpose ?
|
||||
- Allow the use of CryFS on platforms where FUSE is not available (such as Android)
|
||||
- Reduce attack surface by restricting volumes access to only one process rather than one user
|
||||
|
||||
## Warning !
|
||||
The only goal of this library is to be integrated in [DroidFS](https://forge.chapril.org/hardcoresushi/DroidFS). Thus, the current API has been designed to be accessed only via [Java Native Interface](https://docs.oracle.com/javase/8/docs/technotes/guides/jni), and logging has been redirected to Android logcat. You cannot use this library as is outside of Android. Moreover, libcryfs doesn't implement all features provided by CryFS such as symbolic links, editing attributes, flushing files... Use it at your own risk !
|
||||
|
||||
## Changes:
|
||||
Here is what has been modified from the original project:
|
||||
- Update checks disabled
|
||||
- FUSE, [curl](https://curl.se), [range-v3](https://github.com/ericniebler/range-v3) dependencies removed
|
||||
- [Conan](https://conan.io) configuration removed (switched to git submodules for [boost](https://www.boost.org) and [spdlog](https://github.com/gabime/spdlog))
|
||||
- `src/cryfs-unmount` and `src/stats` directories deleted
|
||||
- sh scripts, `cpack`, `doc`, `test` and `vendor/googletest` deleted
|
||||
- Main program `cryfs-cli` removed
|
||||
- boost build configured with [Boost-for-Android](https://github.com/moritz-wundke/Boost-for-Android)
|
||||
- Interactive mode removed (including any writes to stdout)
|
||||
- Logging output redirected to logcat
|
||||
- JNI API created in `src/jni`
|
||||
|
11
appveyor.yml
11
appveyor.yml
@ -1,11 +0,0 @@
|
||||
image:
|
||||
- Visual Studio 2019
|
||||
|
||||
platform:
|
||||
- x64
|
||||
|
||||
configuration:
|
||||
- Release
|
||||
|
||||
build_script:
|
||||
- cmd: echo Appveyor CI is disabled since we now have Github Actions
|
10
archive.sh
10
archive.sh
@ -1,10 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
TAG=$1
|
||||
GPGHOMEDIR=$2
|
||||
|
||||
git archive --format=tgz "$1" > "cryfs-$1.tar.gz"
|
||||
gpg --homedir "$GPGHOMEDIR" --armor --detach-sign "cryfs-$1.tar.gz"
|
||||
|
||||
git archive --format=tar "$1" | xz -9 > "cryfs-$1.tar.xz"
|
||||
gpg --homedir "$GPGHOMEDIR" --armor --detach-sign "cryfs-$1.tar.xz"
|
@ -1,19 +0,0 @@
|
||||
include(cmake-utils/conan.cmake)
|
||||
|
||||
conan_cmake_autodetect(settings)
|
||||
conan_cmake_install(
|
||||
PATH_OR_REFERENCE ${CMAKE_CURRENT_SOURCE_DIR}/conanfile.py
|
||||
BUILD missing
|
||||
SETTINGS ${settings})
|
||||
|
||||
include(${CMAKE_BINARY_DIR}/conanbuildinfo.cmake)
|
||||
conan_basic_setup(TARGETS SKIP_STD NO_OUTPUT_DIRS)
|
||||
|
||||
add_library(CryfsDependencies_range-v3 INTERFACE)
|
||||
target_link_libraries(CryfsDependencies_range-v3 INTERFACE CONAN_PKG::range-v3)
|
||||
|
||||
add_library(CryfsDependencies_spdlog INTERFACE)
|
||||
target_link_libraries(CryfsDependencies_spdlog INTERFACE CONAN_PKG::spdlog)
|
||||
|
||||
add_library(CryfsDependencies_boost INTERFACE)
|
||||
target_link_libraries(CryfsDependencies_boost INTERFACE CONAN_PKG::boost)
|
@ -1,61 +0,0 @@
|
||||
# This configuration file can be used to build CryFS against local dependencies instead of using Conan.
|
||||
#
|
||||
# Example:
|
||||
# $ mkdir build && cd build && cmake .. -DDEPENDENCY_CONFIG=../cmake-utils/DependenciesFromLocalSystem.cmake
|
||||
#
|
||||
# Note that this is only provided as an example and not officially supported. Please still open issues
|
||||
# on GitHub if it doesn't work though.
|
||||
#
|
||||
# There's another file in this directory, DependenciesFromConan.cmake, which, well, gets the dependencies from
|
||||
# Conan instead of from the local system. This is the default. You can also create your own file to tell the build
|
||||
# how to get its dependencies, for example you can mix and match, get some dependencies from Conan and others
|
||||
# from the local system. If you mix and match Conan and local dependencies, please call conan_basic_setup()
|
||||
# **after** running all find_package() for your local dependencies, otherwise find_package() might also find
|
||||
# the versions from Conan.
|
||||
#
|
||||
# Note that if you use dependencies from the local system, you're very likely using different versions of the
|
||||
# dependencies than were used in the development of CryFS. The official version of each dependency required is
|
||||
# listed in conanfile.py. Different versions might work but are untested. Please intensively test your CryFS build
|
||||
# if you build it with different versions of the dependencies.
|
||||
|
||||
|
||||
function(check_target_is_not_from_conan TARGET)
|
||||
get_target_property(INCLUDE_DIRS ${TARGET} INTERFACE_INCLUDE_DIRECTORIES)
|
||||
if("${INCLUDE_DIRS}" MATCHES "conan")
|
||||
message(WARNING "It seems setting up the local ${TARGET} dependency didn't work correctly and it got the version from Conan instead. Please set up cmake so that it sets up conan after all local dependencies are defined.")
|
||||
endif()
|
||||
endfunction()
|
||||
|
||||
|
||||
|
||||
|
||||
# Setup range-v3 dependency
|
||||
find_package(range-v3 REQUIRED)
|
||||
check_target_is_not_from_conan(range-v3::range-v3)
|
||||
add_library(CryfsDependencies_range-v3 INTERFACE)
|
||||
target_link_libraries(CryfsDependencies_range-v3 INTERFACE range-v3::range-v3)
|
||||
|
||||
|
||||
|
||||
|
||||
# Setup boost dependency
|
||||
set(Boost_USE_STATIC_LIBS OFF)
|
||||
find_package(Boost 1.65.1
|
||||
REQUIRED
|
||||
COMPONENTS filesystem system thread chrono program_options)
|
||||
check_target_is_not_from_conan(Boost::boost)
|
||||
add_library(CryfsDependencies_boost INTERFACE)
|
||||
target_link_libraries(CryfsDependencies_boost INTERFACE Boost::boost Boost::filesystem Boost::thread Boost::chrono Boost::program_options)
|
||||
if(${CMAKE_SYSTEM_NAME} MATCHES "Linux")
|
||||
# Also link to rt, because boost thread needs that.
|
||||
target_link_libraries(CryfsDependencies_boost INTERFACE rt)
|
||||
endif()
|
||||
|
||||
|
||||
|
||||
|
||||
# Setup spdlog dependency
|
||||
find_package(spdlog REQUIRED)
|
||||
check_target_is_not_from_conan(spdlog::spdlog)
|
||||
add_library(CryfsDependencies_spdlog INTERFACE)
|
||||
target_link_libraries(CryfsDependencies_spdlog INTERFACE spdlog::spdlog)
|
@ -1,903 +0,0 @@
|
||||
# Taken from https://github.com/conan-io/cmake-conan/blob/v0.16.1/conan.cmake
|
||||
|
||||
# The MIT License (MIT)
|
||||
|
||||
# Copyright (c) 2018 JFrog
|
||||
|
||||
# Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
# of this software and associated documentation files (the "Software"), to deal
|
||||
# in the Software without restriction, including without limitation the rights
|
||||
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
# copies of the Software, and to permit persons to whom the Software is
|
||||
# furnished to do so, subject to the following conditions:
|
||||
|
||||
# The above copyright notice and this permission notice shall be included in all
|
||||
# copies or substantial portions of the Software.
|
||||
|
||||
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
# SOFTWARE.
|
||||
|
||||
|
||||
|
||||
# This file comes from: https://github.com/conan-io/cmake-conan. Please refer
|
||||
# to this repository for issues and documentation.
|
||||
|
||||
# Its purpose is to wrap and launch Conan C/C++ Package Manager when cmake is called.
|
||||
# It will take CMake current settings (os, compiler, compiler version, architecture)
|
||||
# and translate them to conan settings for installing and retrieving dependencies.
|
||||
|
||||
# It is intended to facilitate developers building projects that have conan dependencies,
|
||||
# but it is only necessary on the end-user side. It is not necessary to create conan
|
||||
# packages, in fact it shouldn't be use for that. Check the project documentation.
|
||||
|
||||
# version: 0.16.1
|
||||
|
||||
include(CMakeParseArguments)
|
||||
|
||||
function(_get_msvc_ide_version result)
|
||||
set(${result} "" PARENT_SCOPE)
|
||||
if(NOT MSVC_VERSION VERSION_LESS 1400 AND MSVC_VERSION VERSION_LESS 1500)
|
||||
set(${result} 8 PARENT_SCOPE)
|
||||
elseif(NOT MSVC_VERSION VERSION_LESS 1500 AND MSVC_VERSION VERSION_LESS 1600)
|
||||
set(${result} 9 PARENT_SCOPE)
|
||||
elseif(NOT MSVC_VERSION VERSION_LESS 1600 AND MSVC_VERSION VERSION_LESS 1700)
|
||||
set(${result} 10 PARENT_SCOPE)
|
||||
elseif(NOT MSVC_VERSION VERSION_LESS 1700 AND MSVC_VERSION VERSION_LESS 1800)
|
||||
set(${result} 11 PARENT_SCOPE)
|
||||
elseif(NOT MSVC_VERSION VERSION_LESS 1800 AND MSVC_VERSION VERSION_LESS 1900)
|
||||
set(${result} 12 PARENT_SCOPE)
|
||||
elseif(NOT MSVC_VERSION VERSION_LESS 1900 AND MSVC_VERSION VERSION_LESS 1910)
|
||||
set(${result} 14 PARENT_SCOPE)
|
||||
elseif(NOT MSVC_VERSION VERSION_LESS 1910 AND MSVC_VERSION VERSION_LESS 1920)
|
||||
set(${result} 15 PARENT_SCOPE)
|
||||
elseif(NOT MSVC_VERSION VERSION_LESS 1920 AND MSVC_VERSION VERSION_LESS 1930)
|
||||
set(${result} 16 PARENT_SCOPE)
|
||||
else()
|
||||
message(FATAL_ERROR "Conan: Unknown MSVC compiler version [${MSVC_VERSION}]")
|
||||
endif()
|
||||
endfunction()
|
||||
|
||||
macro(_conan_detect_build_type)
|
||||
conan_parse_arguments(${ARGV})
|
||||
|
||||
if(ARGUMENTS_BUILD_TYPE)
|
||||
set(_CONAN_SETTING_BUILD_TYPE ${ARGUMENTS_BUILD_TYPE})
|
||||
elseif(CMAKE_BUILD_TYPE)
|
||||
set(_CONAN_SETTING_BUILD_TYPE ${CMAKE_BUILD_TYPE})
|
||||
else()
|
||||
message(FATAL_ERROR "Please specify in command line CMAKE_BUILD_TYPE (-DCMAKE_BUILD_TYPE=Release)")
|
||||
endif()
|
||||
|
||||
string(TOUPPER ${_CONAN_SETTING_BUILD_TYPE} _CONAN_SETTING_BUILD_TYPE_UPPER)
|
||||
if (_CONAN_SETTING_BUILD_TYPE_UPPER STREQUAL "DEBUG")
|
||||
set(_CONAN_SETTING_BUILD_TYPE "Debug")
|
||||
elseif(_CONAN_SETTING_BUILD_TYPE_UPPER STREQUAL "RELEASE")
|
||||
set(_CONAN_SETTING_BUILD_TYPE "Release")
|
||||
elseif(_CONAN_SETTING_BUILD_TYPE_UPPER STREQUAL "RELWITHDEBINFO")
|
||||
set(_CONAN_SETTING_BUILD_TYPE "RelWithDebInfo")
|
||||
elseif(_CONAN_SETTING_BUILD_TYPE_UPPER STREQUAL "MINSIZEREL")
|
||||
set(_CONAN_SETTING_BUILD_TYPE "MinSizeRel")
|
||||
endif()
|
||||
endmacro()
|
||||
|
||||
macro(_conan_check_system_name)
|
||||
#handle -s os setting
|
||||
if(CMAKE_SYSTEM_NAME AND NOT CMAKE_SYSTEM_NAME STREQUAL "Generic")
|
||||
#use default conan os setting if CMAKE_SYSTEM_NAME is not defined
|
||||
set(CONAN_SYSTEM_NAME ${CMAKE_SYSTEM_NAME})
|
||||
if(${CMAKE_SYSTEM_NAME} STREQUAL "Darwin")
|
||||
set(CONAN_SYSTEM_NAME Macos)
|
||||
endif()
|
||||
if(${CMAKE_SYSTEM_NAME} STREQUAL "QNX")
|
||||
set(CONAN_SYSTEM_NAME Neutrino)
|
||||
endif()
|
||||
set(CONAN_SUPPORTED_PLATFORMS Windows Linux Macos Android iOS FreeBSD WindowsStore WindowsCE watchOS tvOS FreeBSD SunOS AIX Arduino Emscripten Neutrino)
|
||||
list (FIND CONAN_SUPPORTED_PLATFORMS "${CONAN_SYSTEM_NAME}" _index)
|
||||
if (${_index} GREATER -1)
|
||||
#check if the cmake system is a conan supported one
|
||||
set(_CONAN_SETTING_OS ${CONAN_SYSTEM_NAME})
|
||||
else()
|
||||
message(FATAL_ERROR "cmake system ${CONAN_SYSTEM_NAME} is not supported by conan. Use one of ${CONAN_SUPPORTED_PLATFORMS}")
|
||||
endif()
|
||||
endif()
|
||||
endmacro()
|
||||
|
||||
macro(_conan_check_language)
|
||||
get_property(_languages GLOBAL PROPERTY ENABLED_LANGUAGES)
|
||||
if (";${_languages};" MATCHES ";CXX;")
|
||||
set(LANGUAGE CXX)
|
||||
set(USING_CXX 1)
|
||||
elseif (";${_languages};" MATCHES ";C;")
|
||||
set(LANGUAGE C)
|
||||
set(USING_CXX 0)
|
||||
else ()
|
||||
message(FATAL_ERROR "Conan: Neither C or C++ was detected as a language for the project. Unabled to detect compiler version.")
|
||||
endif()
|
||||
endmacro()
|
||||
|
||||
macro(_conan_detect_compiler)
|
||||
|
||||
conan_parse_arguments(${ARGV})
|
||||
|
||||
if(ARGUMENTS_ARCH)
|
||||
set(_CONAN_SETTING_ARCH ${ARGUMENTS_ARCH})
|
||||
endif()
|
||||
|
||||
if (${CMAKE_${LANGUAGE}_COMPILER_ID} STREQUAL GNU)
|
||||
# using GCC
|
||||
# TODO: Handle other params
|
||||
string(REPLACE "." ";" VERSION_LIST ${CMAKE_${LANGUAGE}_COMPILER_VERSION})
|
||||
list(GET VERSION_LIST 0 MAJOR)
|
||||
list(GET VERSION_LIST 1 MINOR)
|
||||
set(COMPILER_VERSION ${MAJOR}.${MINOR})
|
||||
if(${MAJOR} GREATER 4)
|
||||
set(COMPILER_VERSION ${MAJOR})
|
||||
endif()
|
||||
set(_CONAN_SETTING_COMPILER gcc)
|
||||
set(_CONAN_SETTING_COMPILER_VERSION ${COMPILER_VERSION})
|
||||
if (USING_CXX)
|
||||
conan_cmake_detect_unix_libcxx(_LIBCXX)
|
||||
set(_CONAN_SETTING_COMPILER_LIBCXX ${_LIBCXX})
|
||||
endif ()
|
||||
elseif (${CMAKE_${LANGUAGE}_COMPILER_ID} STREQUAL Intel)
|
||||
string(REPLACE "." ";" VERSION_LIST ${CMAKE_${LANGUAGE}_COMPILER_VERSION})
|
||||
list(GET VERSION_LIST 0 MAJOR)
|
||||
list(GET VERSION_LIST 1 MINOR)
|
||||
set(COMPILER_VERSION ${MAJOR}.${MINOR})
|
||||
set(_CONAN_SETTING_COMPILER intel)
|
||||
set(_CONAN_SETTING_COMPILER_VERSION ${COMPILER_VERSION})
|
||||
if (USING_CXX)
|
||||
conan_cmake_detect_unix_libcxx(_LIBCXX)
|
||||
set(_CONAN_SETTING_COMPILER_LIBCXX ${_LIBCXX})
|
||||
endif ()
|
||||
elseif (${CMAKE_${LANGUAGE}_COMPILER_ID} STREQUAL AppleClang)
|
||||
# using AppleClang
|
||||
string(REPLACE "." ";" VERSION_LIST ${CMAKE_${LANGUAGE}_COMPILER_VERSION})
|
||||
list(GET VERSION_LIST 0 MAJOR)
|
||||
list(GET VERSION_LIST 1 MINOR)
|
||||
set(_CONAN_SETTING_COMPILER apple-clang)
|
||||
set(_CONAN_SETTING_COMPILER_VERSION ${MAJOR}.${MINOR})
|
||||
if (USING_CXX)
|
||||
conan_cmake_detect_unix_libcxx(_LIBCXX)
|
||||
set(_CONAN_SETTING_COMPILER_LIBCXX ${_LIBCXX})
|
||||
endif ()
|
||||
elseif (${CMAKE_${LANGUAGE}_COMPILER_ID} STREQUAL Clang)
|
||||
string(REPLACE "." ";" VERSION_LIST ${CMAKE_${LANGUAGE}_COMPILER_VERSION})
|
||||
list(GET VERSION_LIST 0 MAJOR)
|
||||
list(GET VERSION_LIST 1 MINOR)
|
||||
set(_CONAN_SETTING_COMPILER clang)
|
||||
set(_CONAN_SETTING_COMPILER_VERSION ${MAJOR}.${MINOR})
|
||||
if(APPLE)
|
||||
cmake_policy(GET CMP0025 APPLE_CLANG_POLICY)
|
||||
if(NOT APPLE_CLANG_POLICY STREQUAL NEW)
|
||||
message(STATUS "Conan: APPLE and Clang detected. Assuming apple-clang compiler. Set CMP0025 to avoid it")
|
||||
set(_CONAN_SETTING_COMPILER apple-clang)
|
||||
endif()
|
||||
endif()
|
||||
if(${_CONAN_SETTING_COMPILER} STREQUAL clang AND ${MAJOR} GREATER 7)
|
||||
set(_CONAN_SETTING_COMPILER_VERSION ${MAJOR})
|
||||
endif()
|
||||
if (USING_CXX)
|
||||
conan_cmake_detect_unix_libcxx(_LIBCXX)
|
||||
set(_CONAN_SETTING_COMPILER_LIBCXX ${_LIBCXX})
|
||||
endif ()
|
||||
elseif(${CMAKE_${LANGUAGE}_COMPILER_ID} STREQUAL MSVC)
|
||||
set(_VISUAL "Visual Studio")
|
||||
_get_msvc_ide_version(_VISUAL_VERSION)
|
||||
if("${_VISUAL_VERSION}" STREQUAL "")
|
||||
message(FATAL_ERROR "Conan: Visual Studio not recognized")
|
||||
else()
|
||||
set(_CONAN_SETTING_COMPILER ${_VISUAL})
|
||||
set(_CONAN_SETTING_COMPILER_VERSION ${_VISUAL_VERSION})
|
||||
endif()
|
||||
|
||||
if(NOT _CONAN_SETTING_ARCH)
|
||||
if (MSVC_${LANGUAGE}_ARCHITECTURE_ID MATCHES "64")
|
||||
set(_CONAN_SETTING_ARCH x86_64)
|
||||
elseif (MSVC_${LANGUAGE}_ARCHITECTURE_ID MATCHES "^ARM")
|
||||
message(STATUS "Conan: Using default ARM architecture from MSVC")
|
||||
set(_CONAN_SETTING_ARCH armv6)
|
||||
elseif (MSVC_${LANGUAGE}_ARCHITECTURE_ID MATCHES "86")
|
||||
set(_CONAN_SETTING_ARCH x86)
|
||||
else ()
|
||||
message(FATAL_ERROR "Conan: Unknown MSVC architecture [${MSVC_${LANGUAGE}_ARCHITECTURE_ID}]")
|
||||
endif()
|
||||
endif()
|
||||
|
||||
conan_cmake_detect_vs_runtime(_vs_runtime ${ARGV})
|
||||
message(STATUS "Conan: Detected VS runtime: ${_vs_runtime}")
|
||||
set(_CONAN_SETTING_COMPILER_RUNTIME ${_vs_runtime})
|
||||
|
||||
if (CMAKE_GENERATOR_TOOLSET)
|
||||
set(_CONAN_SETTING_COMPILER_TOOLSET ${CMAKE_VS_PLATFORM_TOOLSET})
|
||||
elseif(CMAKE_VS_PLATFORM_TOOLSET AND (CMAKE_GENERATOR STREQUAL "Ninja"))
|
||||
set(_CONAN_SETTING_COMPILER_TOOLSET ${CMAKE_VS_PLATFORM_TOOLSET})
|
||||
endif()
|
||||
else()
|
||||
message(FATAL_ERROR "Conan: compiler setup not recognized")
|
||||
endif()
|
||||
|
||||
endmacro()
|
||||
|
||||
function(conan_cmake_settings result)
|
||||
#message(STATUS "COMPILER " ${CMAKE_CXX_COMPILER})
|
||||
#message(STATUS "COMPILER " ${CMAKE_CXX_COMPILER_ID})
|
||||
#message(STATUS "VERSION " ${CMAKE_CXX_COMPILER_VERSION})
|
||||
#message(STATUS "FLAGS " ${CMAKE_LANG_FLAGS})
|
||||
#message(STATUS "LIB ARCH " ${CMAKE_CXX_LIBRARY_ARCHITECTURE})
|
||||
#message(STATUS "BUILD TYPE " ${CMAKE_BUILD_TYPE})
|
||||
#message(STATUS "GENERATOR " ${CMAKE_GENERATOR})
|
||||
#message(STATUS "GENERATOR WIN64 " ${CMAKE_CL_64})
|
||||
|
||||
message(STATUS "Conan: Automatic detection of conan settings from cmake")
|
||||
|
||||
conan_parse_arguments(${ARGV})
|
||||
|
||||
_conan_detect_build_type(${ARGV})
|
||||
|
||||
_conan_check_system_name()
|
||||
|
||||
_conan_check_language()
|
||||
|
||||
_conan_detect_compiler(${ARGV})
|
||||
|
||||
# If profile is defined it is used
|
||||
if(CMAKE_BUILD_TYPE STREQUAL "Debug" AND ARGUMENTS_DEBUG_PROFILE)
|
||||
set(_APPLIED_PROFILES ${ARGUMENTS_DEBUG_PROFILE})
|
||||
elseif(CMAKE_BUILD_TYPE STREQUAL "Release" AND ARGUMENTS_RELEASE_PROFILE)
|
||||
set(_APPLIED_PROFILES ${ARGUMENTS_RELEASE_PROFILE})
|
||||
elseif(CMAKE_BUILD_TYPE STREQUAL "RelWithDebInfo" AND ARGUMENTS_RELWITHDEBINFO_PROFILE)
|
||||
set(_APPLIED_PROFILES ${ARGUMENTS_RELWITHDEBINFO_PROFILE})
|
||||
elseif(CMAKE_BUILD_TYPE STREQUAL "MinSizeRel" AND ARGUMENTS_MINSIZEREL_PROFILE)
|
||||
set(_APPLIED_PROFILES ${ARGUMENTS_MINSIZEREL_PROFILE})
|
||||
elseif(ARGUMENTS_PROFILE)
|
||||
set(_APPLIED_PROFILES ${ARGUMENTS_PROFILE})
|
||||
endif()
|
||||
|
||||
foreach(ARG ${_APPLIED_PROFILES})
|
||||
set(_SETTINGS ${_SETTINGS} -pr=${ARG})
|
||||
endforeach()
|
||||
foreach(ARG ${ARGUMENTS_PROFILE_BUILD})
|
||||
conan_check(VERSION 1.24.0 REQUIRED DETECT_QUIET)
|
||||
set(_SETTINGS ${_SETTINGS} -pr:b=${ARG})
|
||||
endforeach()
|
||||
|
||||
if(NOT _SETTINGS OR ARGUMENTS_PROFILE_AUTO STREQUAL "ALL")
|
||||
set(ARGUMENTS_PROFILE_AUTO arch build_type compiler compiler.version
|
||||
compiler.runtime compiler.libcxx compiler.toolset)
|
||||
endif()
|
||||
|
||||
# remove any manually specified settings from the autodetected settings
|
||||
foreach(ARG ${ARGUMENTS_SETTINGS})
|
||||
string(REGEX MATCH "[^=]*" MANUAL_SETTING "${ARG}")
|
||||
message(STATUS "Conan: ${MANUAL_SETTING} was added as an argument. Not using the autodetected one.")
|
||||
list(REMOVE_ITEM ARGUMENTS_PROFILE_AUTO "${MANUAL_SETTING}")
|
||||
endforeach()
|
||||
|
||||
# Automatic from CMake
|
||||
foreach(ARG ${ARGUMENTS_PROFILE_AUTO})
|
||||
string(TOUPPER ${ARG} _arg_name)
|
||||
string(REPLACE "." "_" _arg_name ${_arg_name})
|
||||
if(_CONAN_SETTING_${_arg_name})
|
||||
set(_SETTINGS ${_SETTINGS} -s ${ARG}=${_CONAN_SETTING_${_arg_name}})
|
||||
endif()
|
||||
endforeach()
|
||||
|
||||
foreach(ARG ${ARGUMENTS_SETTINGS})
|
||||
set(_SETTINGS ${_SETTINGS} -s ${ARG})
|
||||
endforeach()
|
||||
|
||||
message(STATUS "Conan: Settings= ${_SETTINGS}")
|
||||
|
||||
set(${result} ${_SETTINGS} PARENT_SCOPE)
|
||||
endfunction()
|
||||
|
||||
|
||||
function(conan_cmake_detect_unix_libcxx result)
|
||||
# Take into account any -stdlib in compile options
|
||||
get_directory_property(compile_options DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR} COMPILE_OPTIONS)
|
||||
string(GENEX_STRIP "${compile_options}" compile_options)
|
||||
|
||||
# Take into account any _GLIBCXX_USE_CXX11_ABI in compile definitions
|
||||
get_directory_property(defines DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR} COMPILE_DEFINITIONS)
|
||||
string(GENEX_STRIP "${defines}" defines)
|
||||
|
||||
foreach(define ${defines})
|
||||
if(define MATCHES "_GLIBCXX_USE_CXX11_ABI")
|
||||
if(define MATCHES "^-D")
|
||||
set(compile_options ${compile_options} "${define}")
|
||||
else()
|
||||
set(compile_options ${compile_options} "-D${define}")
|
||||
endif()
|
||||
endif()
|
||||
endforeach()
|
||||
|
||||
# add additional compiler options ala cmRulePlaceholderExpander::ExpandRuleVariable
|
||||
set(EXPAND_CXX_COMPILER ${CMAKE_CXX_COMPILER})
|
||||
if(CMAKE_CXX_COMPILER_ARG1)
|
||||
# CMake splits CXX="foo bar baz" into CMAKE_CXX_COMPILER="foo", CMAKE_CXX_COMPILER_ARG1="bar baz"
|
||||
# without this, ccache, winegcc, or other wrappers might lose all their arguments
|
||||
separate_arguments(SPLIT_CXX_COMPILER_ARG1 NATIVE_COMMAND ${CMAKE_CXX_COMPILER_ARG1})
|
||||
list(APPEND EXPAND_CXX_COMPILER ${SPLIT_CXX_COMPILER_ARG1})
|
||||
endif()
|
||||
|
||||
if(CMAKE_CXX_COMPILE_OPTIONS_TARGET AND CMAKE_CXX_COMPILER_TARGET)
|
||||
# without --target= we may be calling the wrong underlying GCC
|
||||
list(APPEND EXPAND_CXX_COMPILER "${CMAKE_CXX_COMPILE_OPTIONS_TARGET}${CMAKE_CXX_COMPILER_TARGET}")
|
||||
endif()
|
||||
|
||||
if(CMAKE_CXX_COMPILE_OPTIONS_EXTERNAL_TOOLCHAIN AND CMAKE_CXX_COMPILER_EXTERNAL_TOOLCHAIN)
|
||||
list(APPEND EXPAND_CXX_COMPILER "${CMAKE_CXX_COMPILE_OPTIONS_EXTERNAL_TOOLCHAIN}${CMAKE_CXX_COMPILER_EXTERNAL_TOOLCHAIN}")
|
||||
endif()
|
||||
|
||||
if(CMAKE_CXX_COMPILE_OPTIONS_SYSROOT)
|
||||
# without --sysroot= we may find the wrong #include <string>
|
||||
if(CMAKE_SYSROOT_COMPILE)
|
||||
list(APPEND EXPAND_CXX_COMPILER "${CMAKE_CXX_COMPILE_OPTIONS_SYSROOT}${CMAKE_SYSROOT_COMPILE}")
|
||||
elseif(CMAKE_SYSROOT)
|
||||
list(APPEND EXPAND_CXX_COMPILER "${CMAKE_CXX_COMPILE_OPTIONS_SYSROOT}${CMAKE_SYSROOT}")
|
||||
endif()
|
||||
endif()
|
||||
|
||||
separate_arguments(SPLIT_CXX_FLAGS NATIVE_COMMAND ${CMAKE_CXX_FLAGS})
|
||||
|
||||
if(CMAKE_OSX_SYSROOT)
|
||||
set(xcode_sysroot_option "--sysroot=${CMAKE_OSX_SYSROOT}")
|
||||
endif()
|
||||
|
||||
execute_process(
|
||||
COMMAND ${CMAKE_COMMAND} -E echo "#include <string>"
|
||||
COMMAND ${EXPAND_CXX_COMPILER} ${SPLIT_CXX_FLAGS} -x c++ ${xcode_sysroot_option} ${compile_options} -E -dM -
|
||||
OUTPUT_VARIABLE string_defines
|
||||
)
|
||||
|
||||
if(string_defines MATCHES "#define __GLIBCXX__")
|
||||
# Allow -D_GLIBCXX_USE_CXX11_ABI=ON/OFF as argument to cmake
|
||||
if(DEFINED _GLIBCXX_USE_CXX11_ABI)
|
||||
if(_GLIBCXX_USE_CXX11_ABI)
|
||||
set(${result} libstdc++11 PARENT_SCOPE)
|
||||
return()
|
||||
else()
|
||||
set(${result} libstdc++ PARENT_SCOPE)
|
||||
return()
|
||||
endif()
|
||||
endif()
|
||||
|
||||
if(string_defines MATCHES "#define _GLIBCXX_USE_CXX11_ABI 1\n")
|
||||
set(${result} libstdc++11 PARENT_SCOPE)
|
||||
else()
|
||||
# Either the compiler is missing the define because it is old, and so
|
||||
# it can't use the new abi, or the compiler was configured to use the
|
||||
# old abi by the user or distro (e.g. devtoolset on RHEL/CentOS)
|
||||
set(${result} libstdc++ PARENT_SCOPE)
|
||||
endif()
|
||||
else()
|
||||
set(${result} libc++ PARENT_SCOPE)
|
||||
endif()
|
||||
endfunction()
|
||||
|
||||
function(conan_cmake_detect_vs_runtime result)
|
||||
|
||||
conan_parse_arguments(${ARGV})
|
||||
if(ARGUMENTS_BUILD_TYPE)
|
||||
set(build_type "${ARGUMENTS_BUILD_TYPE}")
|
||||
elseif(CMAKE_BUILD_TYPE)
|
||||
set(build_type "${CMAKE_BUILD_TYPE}")
|
||||
else()
|
||||
message(FATAL_ERROR "Please specify in command line CMAKE_BUILD_TYPE (-DCMAKE_BUILD_TYPE=Release)")
|
||||
endif()
|
||||
|
||||
if(build_type)
|
||||
string(TOUPPER "${build_type}" build_type)
|
||||
endif()
|
||||
set(variables CMAKE_CXX_FLAGS_${build_type} CMAKE_C_FLAGS_${build_type} CMAKE_CXX_FLAGS CMAKE_C_FLAGS)
|
||||
foreach(variable ${variables})
|
||||
if(NOT "${${variable}}" STREQUAL "")
|
||||
string(REPLACE " " ";" flags "${${variable}}")
|
||||
foreach (flag ${flags})
|
||||
if("${flag}" STREQUAL "/MD" OR "${flag}" STREQUAL "/MDd" OR "${flag}" STREQUAL "/MT" OR "${flag}" STREQUAL "/MTd")
|
||||
string(SUBSTRING "${flag}" 1 -1 runtime)
|
||||
set(${result} "${runtime}" PARENT_SCOPE)
|
||||
return()
|
||||
endif()
|
||||
endforeach()
|
||||
endif()
|
||||
endforeach()
|
||||
if("${build_type}" STREQUAL "DEBUG")
|
||||
set(${result} "MDd" PARENT_SCOPE)
|
||||
else()
|
||||
set(${result} "MD" PARENT_SCOPE)
|
||||
endif()
|
||||
endfunction()
|
||||
|
||||
function(_collect_settings result)
|
||||
set(ARGUMENTS_PROFILE_AUTO arch build_type compiler compiler.version
|
||||
compiler.runtime compiler.libcxx compiler.toolset)
|
||||
foreach(ARG ${ARGUMENTS_PROFILE_AUTO})
|
||||
string(TOUPPER ${ARG} _arg_name)
|
||||
string(REPLACE "." "_" _arg_name ${_arg_name})
|
||||
if(_CONAN_SETTING_${_arg_name})
|
||||
set(detected_setings ${detected_setings} ${ARG}=${_CONAN_SETTING_${_arg_name}})
|
||||
endif()
|
||||
endforeach()
|
||||
set(${result} ${detected_setings} PARENT_SCOPE)
|
||||
endfunction()
|
||||
|
||||
function(conan_cmake_autodetect detected_settings)
|
||||
_conan_detect_build_type()
|
||||
_conan_check_system_name()
|
||||
_conan_check_language()
|
||||
_conan_detect_compiler()
|
||||
_collect_settings(collected_settings)
|
||||
set(${detected_settings} ${collected_settings} PARENT_SCOPE)
|
||||
endfunction()
|
||||
|
||||
macro(conan_parse_arguments)
|
||||
set(options BASIC_SETUP CMAKE_TARGETS UPDATE KEEP_RPATHS NO_LOAD NO_OUTPUT_DIRS OUTPUT_QUIET NO_IMPORTS SKIP_STD)
|
||||
set(oneValueArgs CONANFILE ARCH BUILD_TYPE INSTALL_FOLDER CONAN_COMMAND)
|
||||
set(multiValueArgs DEBUG_PROFILE RELEASE_PROFILE RELWITHDEBINFO_PROFILE MINSIZEREL_PROFILE
|
||||
PROFILE REQUIRES OPTIONS IMPORTS SETTINGS BUILD ENV GENERATORS PROFILE_AUTO
|
||||
INSTALL_ARGS CONFIGURATION_TYPES PROFILE_BUILD BUILD_REQUIRES)
|
||||
cmake_parse_arguments(ARGUMENTS "${options}" "${oneValueArgs}" "${multiValueArgs}" ${ARGN})
|
||||
endmacro()
|
||||
|
||||
function(old_conan_cmake_install)
|
||||
# Calls "conan install"
|
||||
# Argument BUILD is equivalant to --build={missing, PkgName,...} or
|
||||
# --build when argument is 'BUILD all' (which builds all packages from source)
|
||||
# Argument CONAN_COMMAND, to specify the conan path, e.g. in case of running from source
|
||||
# cmake does not identify conan as command, even if it is +x and it is in the path
|
||||
conan_parse_arguments(${ARGV})
|
||||
|
||||
if(CONAN_CMAKE_MULTI)
|
||||
set(ARGUMENTS_GENERATORS ${ARGUMENTS_GENERATORS} cmake_multi)
|
||||
else()
|
||||
set(ARGUMENTS_GENERATORS ${ARGUMENTS_GENERATORS} cmake)
|
||||
endif()
|
||||
|
||||
set(CONAN_BUILD_POLICY "")
|
||||
foreach(ARG ${ARGUMENTS_BUILD})
|
||||
if(${ARG} STREQUAL "all")
|
||||
set(CONAN_BUILD_POLICY ${CONAN_BUILD_POLICY} --build)
|
||||
break()
|
||||
else()
|
||||
set(CONAN_BUILD_POLICY ${CONAN_BUILD_POLICY} --build=${ARG})
|
||||
endif()
|
||||
endforeach()
|
||||
if(ARGUMENTS_CONAN_COMMAND)
|
||||
set(CONAN_CMD ${ARGUMENTS_CONAN_COMMAND})
|
||||
else()
|
||||
conan_check(REQUIRED)
|
||||
endif()
|
||||
set(CONAN_OPTIONS "")
|
||||
if(ARGUMENTS_CONANFILE)
|
||||
if(IS_ABSOLUTE ${ARGUMENTS_CONANFILE})
|
||||
set(CONANFILE ${ARGUMENTS_CONANFILE})
|
||||
else()
|
||||
set(CONANFILE ${CMAKE_CURRENT_SOURCE_DIR}/${ARGUMENTS_CONANFILE})
|
||||
endif()
|
||||
else()
|
||||
set(CONANFILE ".")
|
||||
endif()
|
||||
foreach(ARG ${ARGUMENTS_OPTIONS})
|
||||
set(CONAN_OPTIONS ${CONAN_OPTIONS} -o=${ARG})
|
||||
endforeach()
|
||||
if(ARGUMENTS_UPDATE)
|
||||
set(CONAN_INSTALL_UPDATE --update)
|
||||
endif()
|
||||
if(ARGUMENTS_NO_IMPORTS)
|
||||
set(CONAN_INSTALL_NO_IMPORTS --no-imports)
|
||||
endif()
|
||||
set(CONAN_INSTALL_FOLDER "")
|
||||
if(ARGUMENTS_INSTALL_FOLDER)
|
||||
set(CONAN_INSTALL_FOLDER -if=${ARGUMENTS_INSTALL_FOLDER})
|
||||
endif()
|
||||
foreach(ARG ${ARGUMENTS_GENERATORS})
|
||||
set(CONAN_GENERATORS ${CONAN_GENERATORS} -g=${ARG})
|
||||
endforeach()
|
||||
foreach(ARG ${ARGUMENTS_ENV})
|
||||
set(CONAN_ENV_VARS ${CONAN_ENV_VARS} -e=${ARG})
|
||||
endforeach()
|
||||
set(conan_args install ${CONANFILE} ${settings} ${CONAN_ENV_VARS} ${CONAN_GENERATORS} ${CONAN_BUILD_POLICY} ${CONAN_INSTALL_UPDATE} ${CONAN_INSTALL_NO_IMPORTS} ${CONAN_OPTIONS} ${CONAN_INSTALL_FOLDER} ${ARGUMENTS_INSTALL_ARGS})
|
||||
|
||||
string (REPLACE ";" " " _conan_args "${conan_args}")
|
||||
message(STATUS "Conan executing: ${CONAN_CMD} ${_conan_args}")
|
||||
|
||||
if(ARGUMENTS_OUTPUT_QUIET)
|
||||
execute_process(COMMAND ${CONAN_CMD} ${conan_args}
|
||||
RESULT_VARIABLE return_code
|
||||
OUTPUT_VARIABLE conan_output
|
||||
ERROR_VARIABLE conan_output
|
||||
WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR})
|
||||
else()
|
||||
execute_process(COMMAND ${CONAN_CMD} ${conan_args}
|
||||
RESULT_VARIABLE return_code
|
||||
WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR})
|
||||
endif()
|
||||
|
||||
if(NOT "${return_code}" STREQUAL "0")
|
||||
message(FATAL_ERROR "Conan install failed='${return_code}'")
|
||||
endif()
|
||||
|
||||
endfunction()
|
||||
|
||||
function(conan_cmake_install)
|
||||
if(DEFINED CONAN_COMMAND)
|
||||
set(CONAN_CMD ${CONAN_COMMAND})
|
||||
else()
|
||||
conan_check(REQUIRED)
|
||||
endif()
|
||||
|
||||
set(installOptions UPDATE NO_IMPORTS OUTPUT_QUIET ERROR_QUIET)
|
||||
set(installOneValueArgs PATH_OR_REFERENCE REFERENCE REMOTE LOCKFILE LOCKFILE_OUT LOCKFILE_NODE_ID INSTALL_FOLDER)
|
||||
set(installMultiValueArgs GENERATOR BUILD ENV ENV_HOST ENV_BUILD OPTIONS_HOST OPTIONS OPTIONS_BUILD PROFILE
|
||||
PROFILE_HOST PROFILE_BUILD SETTINGS SETTINGS_HOST SETTINGS_BUILD)
|
||||
cmake_parse_arguments(ARGS "${installOptions}" "${installOneValueArgs}" "${installMultiValueArgs}" ${ARGN})
|
||||
foreach(arg ${installOptions})
|
||||
if(ARGS_${arg})
|
||||
set(${arg} ${${arg}} ${ARGS_${arg}})
|
||||
endif()
|
||||
endforeach()
|
||||
foreach(arg ${installOneValueArgs})
|
||||
if(DEFINED ARGS_${arg})
|
||||
if("${arg}" STREQUAL "REMOTE")
|
||||
set(flag "--remote")
|
||||
elseif("${arg}" STREQUAL "LOCKFILE")
|
||||
set(flag "--lockfile")
|
||||
elseif("${arg}" STREQUAL "LOCKFILE_OUT")
|
||||
set(flag "--lockfile-out")
|
||||
elseif("${arg}" STREQUAL "LOCKFILE_NODE_ID")
|
||||
set(flag "--lockfile-node-id")
|
||||
elseif("${arg}" STREQUAL "INSTALL_FOLDER")
|
||||
set(flag "--install-folder")
|
||||
endif()
|
||||
set(${arg} ${${arg}} ${flag} ${ARGS_${arg}})
|
||||
endif()
|
||||
endforeach()
|
||||
foreach(arg ${installMultiValueArgs})
|
||||
if(DEFINED ARGS_${arg})
|
||||
if("${arg}" STREQUAL "GENERATOR")
|
||||
set(flag "--generator")
|
||||
elseif("${arg}" STREQUAL "BUILD")
|
||||
set(flag "--build")
|
||||
elseif("${arg}" STREQUAL "ENV")
|
||||
set(flag "--env")
|
||||
elseif("${arg}" STREQUAL "ENV_HOST")
|
||||
set(flag "--env:host")
|
||||
elseif("${arg}" STREQUAL "ENV_BUILD")
|
||||
set(flag "--env:build")
|
||||
elseif("${arg}" STREQUAL "OPTIONS")
|
||||
set(flag "--options")
|
||||
elseif("${arg}" STREQUAL "OPTIONS_HOST")
|
||||
set(flag "--options:host")
|
||||
elseif("${arg}" STREQUAL "OPTIONS_BUILD")
|
||||
set(flag "--options:build")
|
||||
elseif("${arg}" STREQUAL "PROFILE")
|
||||
set(flag "--profile")
|
||||
elseif("${arg}" STREQUAL "PROFILE_HOST")
|
||||
set(flag "--profile:host")
|
||||
elseif("${arg}" STREQUAL "PROFILE_BUILD")
|
||||
set(flag "--profile:build")
|
||||
elseif("${arg}" STREQUAL "SETTINGS")
|
||||
set(flag "--settings")
|
||||
elseif("${arg}" STREQUAL "SETTINGS_HOST")
|
||||
set(flag "--settings:host")
|
||||
elseif("${arg}" STREQUAL "SETTINGS_BUILD")
|
||||
set(flag "--settings:build")
|
||||
endif()
|
||||
list(LENGTH ARGS_${arg} numargs)
|
||||
foreach(item ${ARGS_${arg}})
|
||||
if(${item} STREQUAL "all" AND ${arg} STREQUAL "BUILD")
|
||||
set(${arg} "--build")
|
||||
break()
|
||||
endif()
|
||||
set(${arg} ${${arg}} ${flag} ${item})
|
||||
endforeach()
|
||||
endif()
|
||||
endforeach()
|
||||
if(DEFINED UPDATE)
|
||||
set(UPDATE --update)
|
||||
endif()
|
||||
if(DEFINED NO_IMPORTS)
|
||||
set(NO_IMPORTS --no-imports)
|
||||
endif()
|
||||
set(install_args install ${PATH_OR_REFERENCE} ${REFERENCE} ${UPDATE} ${NO_IMPORTS} ${REMOTE} ${LOCKFILE} ${LOCKFILE_OUT} ${LOCKFILE_NODE_ID} ${INSTALL_FOLDER}
|
||||
${GENERATOR} ${BUILD} ${ENV} ${ENV_HOST} ${ENV_BUILD} ${OPTIONS} ${OPTIONS_HOST} ${OPTIONS_BUILD}
|
||||
${PROFILE} ${PROFILE_HOST} ${PROFILE_BUILD} ${SETTINGS} ${SETTINGS_HOST} ${SETTINGS_BUILD})
|
||||
|
||||
string(REPLACE ";" " " _install_args "${install_args}")
|
||||
message(STATUS "Conan executing: ${CONAN_CMD} ${_install_args}")
|
||||
|
||||
if(ARGS_OUTPUT_QUIET)
|
||||
set(OUTPUT_OPT OUTPUT_QUIET)
|
||||
endif()
|
||||
if(ARGS_ERROR_QUIET)
|
||||
set(ERROR_OPT ERROR_QUIET)
|
||||
endif()
|
||||
|
||||
execute_process(COMMAND ${CONAN_CMD} ${install_args}
|
||||
RESULT_VARIABLE return_code
|
||||
${OUTPUT_OPT}
|
||||
${ERROR_OPT}
|
||||
WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR})
|
||||
|
||||
if(NOT "${return_code}" STREQUAL "0")
|
||||
if (ARGS_ERROR_QUIET)
|
||||
message(WARNING "Conan install failed='${return_code}'")
|
||||
else()
|
||||
message(FATAL_ERROR "Conan install failed='${return_code}'")
|
||||
endif()
|
||||
endif()
|
||||
|
||||
endfunction()
|
||||
|
||||
function(conan_cmake_setup_conanfile)
|
||||
conan_parse_arguments(${ARGV})
|
||||
if(ARGUMENTS_CONANFILE)
|
||||
get_filename_component(_CONANFILE_NAME ${ARGUMENTS_CONANFILE} NAME)
|
||||
# configure_file will make sure cmake re-runs when conanfile is updated
|
||||
configure_file(${ARGUMENTS_CONANFILE} ${CMAKE_CURRENT_BINARY_DIR}/${_CONANFILE_NAME}.junk COPYONLY)
|
||||
file(REMOVE ${CMAKE_CURRENT_BINARY_DIR}/${_CONANFILE_NAME}.junk)
|
||||
else()
|
||||
conan_cmake_generate_conanfile(ON ${ARGV})
|
||||
endif()
|
||||
endfunction()
|
||||
|
||||
function(conan_cmake_configure)
|
||||
conan_cmake_generate_conanfile(OFF ${ARGV})
|
||||
endfunction()
|
||||
|
||||
# Generate, writing in disk a conanfile.txt with the requires, options, and imports
|
||||
# specified as arguments
|
||||
# This will be considered as temporary file, generated in CMAKE_CURRENT_BINARY_DIR)
|
||||
function(conan_cmake_generate_conanfile DEFAULT_GENERATOR)
|
||||
|
||||
conan_parse_arguments(${ARGV})
|
||||
|
||||
set(_FN "${CMAKE_CURRENT_BINARY_DIR}/conanfile.txt")
|
||||
file(WRITE ${_FN} "")
|
||||
|
||||
if(DEFINED ARGUMENTS_REQUIRES)
|
||||
file(APPEND ${_FN} "[requires]\n")
|
||||
foreach(REQUIRE ${ARGUMENTS_REQUIRES})
|
||||
file(APPEND ${_FN} ${REQUIRE} "\n")
|
||||
endforeach()
|
||||
endif()
|
||||
|
||||
if (DEFAULT_GENERATOR OR DEFINED ARGUMENTS_GENERATORS)
|
||||
file(APPEND ${_FN} "[generators]\n")
|
||||
if (DEFAULT_GENERATOR)
|
||||
file(APPEND ${_FN} "cmake\n")
|
||||
endif()
|
||||
if (DEFINED ARGUMENTS_GENERATORS)
|
||||
foreach(GENERATOR ${ARGUMENTS_GENERATORS})
|
||||
file(APPEND ${_FN} ${GENERATOR} "\n")
|
||||
endforeach()
|
||||
endif()
|
||||
endif()
|
||||
|
||||
if(DEFINED ARGUMENTS_BUILD_REQUIRES)
|
||||
file(APPEND ${_FN} "[build_requires]\n")
|
||||
foreach(BUILD_REQUIRE ${ARGUMENTS_BUILD_REQUIRES})
|
||||
file(APPEND ${_FN} ${BUILD_REQUIRE} "\n")
|
||||
endforeach()
|
||||
endif()
|
||||
|
||||
if(DEFINED ARGUMENTS_IMPORTS)
|
||||
file(APPEND ${_FN} "[imports]\n")
|
||||
foreach(IMPORTS ${ARGUMENTS_IMPORTS})
|
||||
file(APPEND ${_FN} ${IMPORTS} "\n")
|
||||
endforeach()
|
||||
endif()
|
||||
|
||||
if(DEFINED ARGUMENTS_OPTIONS)
|
||||
file(APPEND ${_FN} "[options]\n")
|
||||
foreach(OPTION ${ARGUMENTS_OPTIONS})
|
||||
file(APPEND ${_FN} ${OPTION} "\n")
|
||||
endforeach()
|
||||
endif()
|
||||
|
||||
endfunction()
|
||||
|
||||
|
||||
macro(conan_load_buildinfo)
|
||||
if(CONAN_CMAKE_MULTI)
|
||||
set(_CONANBUILDINFO conanbuildinfo_multi.cmake)
|
||||
else()
|
||||
set(_CONANBUILDINFO conanbuildinfo.cmake)
|
||||
endif()
|
||||
if(ARGUMENTS_INSTALL_FOLDER)
|
||||
set(_CONANBUILDINFOFOLDER ${ARGUMENTS_INSTALL_FOLDER})
|
||||
else()
|
||||
set(_CONANBUILDINFOFOLDER ${CMAKE_CURRENT_BINARY_DIR})
|
||||
endif()
|
||||
# Checks for the existence of conanbuildinfo.cmake, and loads it
|
||||
# important that it is macro, so variables defined at parent scope
|
||||
if(EXISTS "${_CONANBUILDINFOFOLDER}/${_CONANBUILDINFO}")
|
||||
message(STATUS "Conan: Loading ${_CONANBUILDINFO}")
|
||||
include(${_CONANBUILDINFOFOLDER}/${_CONANBUILDINFO})
|
||||
else()
|
||||
message(FATAL_ERROR "${_CONANBUILDINFO} doesn't exist in ${CMAKE_CURRENT_BINARY_DIR}")
|
||||
endif()
|
||||
endmacro()
|
||||
|
||||
|
||||
macro(conan_cmake_run)
|
||||
conan_parse_arguments(${ARGV})
|
||||
|
||||
if(ARGUMENTS_CONFIGURATION_TYPES AND NOT CMAKE_CONFIGURATION_TYPES)
|
||||
message(WARNING "CONFIGURATION_TYPES should only be specified for multi-configuration generators")
|
||||
elseif(ARGUMENTS_CONFIGURATION_TYPES AND ARGUMENTS_BUILD_TYPE)
|
||||
message(WARNING "CONFIGURATION_TYPES and BUILD_TYPE arguments should not be defined at the same time.")
|
||||
endif()
|
||||
|
||||
if(CMAKE_CONFIGURATION_TYPES AND NOT CMAKE_BUILD_TYPE AND NOT CONAN_EXPORTED
|
||||
AND NOT ARGUMENTS_BUILD_TYPE)
|
||||
set(CONAN_CMAKE_MULTI ON)
|
||||
if (NOT ARGUMENTS_CONFIGURATION_TYPES)
|
||||
set(ARGUMENTS_CONFIGURATION_TYPES "Release;Debug")
|
||||
endif()
|
||||
message(STATUS "Conan: Using cmake-multi generator")
|
||||
else()
|
||||
set(CONAN_CMAKE_MULTI OFF)
|
||||
endif()
|
||||
|
||||
if(NOT CONAN_EXPORTED)
|
||||
conan_cmake_setup_conanfile(${ARGV})
|
||||
if(CONAN_CMAKE_MULTI)
|
||||
foreach(CMAKE_BUILD_TYPE ${ARGUMENTS_CONFIGURATION_TYPES})
|
||||
set(ENV{CONAN_IMPORT_PATH} ${CMAKE_BUILD_TYPE})
|
||||
conan_cmake_settings(settings ${ARGV})
|
||||
old_conan_cmake_install(SETTINGS ${settings} ${ARGV})
|
||||
endforeach()
|
||||
set(CMAKE_BUILD_TYPE)
|
||||
else()
|
||||
conan_cmake_settings(settings ${ARGV})
|
||||
old_conan_cmake_install(SETTINGS ${settings} ${ARGV})
|
||||
endif()
|
||||
endif()
|
||||
|
||||
if (NOT ARGUMENTS_NO_LOAD)
|
||||
conan_load_buildinfo()
|
||||
endif()
|
||||
|
||||
if(ARGUMENTS_BASIC_SETUP)
|
||||
foreach(_option CMAKE_TARGETS KEEP_RPATHS NO_OUTPUT_DIRS SKIP_STD)
|
||||
if(ARGUMENTS_${_option})
|
||||
if(${_option} STREQUAL "CMAKE_TARGETS")
|
||||
list(APPEND _setup_options "TARGETS")
|
||||
else()
|
||||
list(APPEND _setup_options ${_option})
|
||||
endif()
|
||||
endif()
|
||||
endforeach()
|
||||
conan_basic_setup(${_setup_options})
|
||||
endif()
|
||||
endmacro()
|
||||
|
||||
macro(conan_check)
|
||||
# Checks conan availability in PATH
|
||||
# Arguments REQUIRED, DETECT_QUIET and VERSION are optional
|
||||
# Example usage:
|
||||
# conan_check(VERSION 1.0.0 REQUIRED)
|
||||
set(options REQUIRED DETECT_QUIET)
|
||||
set(oneValueArgs VERSION)
|
||||
cmake_parse_arguments(CONAN "${options}" "${oneValueArgs}" "" ${ARGN})
|
||||
if(NOT CONAN_DETECT_QUIET)
|
||||
message(STATUS "Conan: checking conan executable")
|
||||
endif()
|
||||
|
||||
find_program(CONAN_CMD conan)
|
||||
if(NOT CONAN_CMD AND CONAN_REQUIRED)
|
||||
message(FATAL_ERROR "Conan executable not found! Please install conan.")
|
||||
endif()
|
||||
if(NOT CONAN_DETECT_QUIET)
|
||||
message(STATUS "Conan: Found program ${CONAN_CMD}")
|
||||
endif()
|
||||
execute_process(COMMAND ${CONAN_CMD} --version
|
||||
RESULT_VARIABLE return_code
|
||||
OUTPUT_VARIABLE CONAN_VERSION_OUTPUT
|
||||
ERROR_VARIABLE CONAN_VERSION_OUTPUT)
|
||||
|
||||
if(NOT "${return_code}" STREQUAL "0")
|
||||
message(FATAL_ERROR "Conan --version failed='${return_code}'")
|
||||
endif()
|
||||
|
||||
if(NOT CONAN_DETECT_QUIET)
|
||||
message(STATUS "Conan: Version found ${CONAN_VERSION_OUTPUT}")
|
||||
endif()
|
||||
|
||||
if(DEFINED CONAN_VERSION)
|
||||
string(REGEX MATCH ".*Conan version ([0-9]+\\.[0-9]+\\.[0-9]+)" FOO
|
||||
"${CONAN_VERSION_OUTPUT}")
|
||||
if(${CMAKE_MATCH_1} VERSION_LESS ${CONAN_VERSION})
|
||||
message(FATAL_ERROR "Conan outdated. Installed: ${CMAKE_MATCH_1}, \
|
||||
required: ${CONAN_VERSION}. Consider updating via 'pip \
|
||||
install conan==${CONAN_VERSION}'.")
|
||||
endif()
|
||||
endif()
|
||||
endmacro()
|
||||
|
||||
function(conan_add_remote)
|
||||
# Adds a remote
|
||||
# Arguments URL and NAME are required, INDEX, COMMAND and VERIFY_SSL are optional
|
||||
# Example usage:
|
||||
# conan_add_remote(NAME bincrafters INDEX 1
|
||||
# URL https://api.bintray.com/conan/bincrafters/public-conan
|
||||
# VERIFY_SSL True)
|
||||
set(oneValueArgs URL NAME INDEX COMMAND VERIFY_SSL)
|
||||
cmake_parse_arguments(CONAN "" "${oneValueArgs}" "" ${ARGN})
|
||||
|
||||
if(DEFINED CONAN_INDEX)
|
||||
set(CONAN_INDEX_ARG "-i ${CONAN_INDEX}")
|
||||
endif()
|
||||
if(DEFINED CONAN_COMMAND)
|
||||
set(CONAN_CMD ${CONAN_COMMAND})
|
||||
else()
|
||||
conan_check(REQUIRED)
|
||||
endif()
|
||||
set(CONAN_VERIFY_SSL_ARG "True")
|
||||
if(DEFINED CONAN_VERIFY_SSL)
|
||||
set(CONAN_VERIFY_SSL_ARG ${CONAN_VERIFY_SSL})
|
||||
endif()
|
||||
message(STATUS "Conan: Adding ${CONAN_NAME} remote repository (${CONAN_URL}) verify ssl (${CONAN_VERIFY_SSL_ARG})")
|
||||
execute_process(COMMAND ${CONAN_CMD} remote add ${CONAN_NAME} ${CONAN_INDEX_ARG} -f ${CONAN_URL} ${CONAN_VERIFY_SSL_ARG}
|
||||
RESULT_VARIABLE return_code)
|
||||
if(NOT "${return_code}" STREQUAL "0")
|
||||
message(FATAL_ERROR "Conan remote failed='${return_code}'")
|
||||
endif()
|
||||
endfunction()
|
||||
|
||||
macro(conan_config_install)
|
||||
# install a full configuration from a local or remote zip file
|
||||
# Argument ITEM is required, arguments TYPE, SOURCE, TARGET and VERIFY_SSL are optional
|
||||
# Example usage:
|
||||
# conan_config_install(ITEM https://github.com/conan-io/cmake-conan.git
|
||||
# TYPE git SOURCE source-folder TARGET target-folder VERIFY_SSL false)
|
||||
set(oneValueArgs ITEM TYPE SOURCE TARGET VERIFY_SSL)
|
||||
set(multiValueArgs ARGS)
|
||||
cmake_parse_arguments(CONAN "" "${oneValueArgs}" "${multiValueArgs}" ${ARGN})
|
||||
|
||||
find_program(CONAN_CMD conan)
|
||||
if(NOT CONAN_CMD AND CONAN_REQUIRED)
|
||||
message(FATAL_ERROR "Conan executable not found!")
|
||||
endif()
|
||||
|
||||
if(DEFINED CONAN_VERIFY_SSL)
|
||||
set(CONAN_VERIFY_SSL_ARG "--verify-ssl=${CONAN_VERIFY_SSL}")
|
||||
endif()
|
||||
|
||||
if(DEFINED CONAN_TYPE)
|
||||
set(CONAN_TYPE_ARG "--type=${CONAN_TYPE}")
|
||||
endif()
|
||||
|
||||
if(DEFINED CONAN_ARGS)
|
||||
set(CONAN_ARGS_ARGS "--args=\"${CONAN_ARGS}\"")
|
||||
endif()
|
||||
|
||||
if(DEFINED CONAN_SOURCE)
|
||||
set(CONAN_SOURCE_ARGS "--source-folder=${CONAN_SOURCE}")
|
||||
endif()
|
||||
|
||||
if(DEFINED CONAN_TARGET)
|
||||
set(CONAN_TARGET_ARGS "--target-folder=${CONAN_TARGET}")
|
||||
endif()
|
||||
|
||||
set (CONAN_CONFIG_INSTALL_ARGS ${CONAN_VERIFY_SSL_ARG}
|
||||
${CONAN_TYPE_ARG}
|
||||
${CONAN_ARGS_ARGS}
|
||||
${CONAN_SOURCE_ARGS}
|
||||
${CONAN_TARGET_ARGS})
|
||||
|
||||
message(STATUS "Conan: Installing config from ${CONAN_ITEM}")
|
||||
execute_process(COMMAND ${CONAN_CMD} config install ${CONAN_ITEM} ${CONAN_CONFIG_INSTALL_ARGS}
|
||||
RESULT_VARIABLE return_code)
|
||||
if(NOT "${return_code}" STREQUAL "0")
|
||||
message(FATAL_ERROR "Conan config failed='${return_code}'")
|
||||
endif()
|
||||
endmacro()
|
@ -97,7 +97,6 @@ endfunction(target_enable_style_warnings)
|
||||
# target_add_boost(buildtarget)
|
||||
##################################################
|
||||
function(target_add_boost TARGET)
|
||||
target_link_libraries(${TARGET} PUBLIC CryfsDependencies_boost)
|
||||
target_compile_definitions(${TARGET} PUBLIC BOOST_THREAD_VERSION=4)
|
||||
endfunction(target_add_boost)
|
||||
|
||||
|
46
conanfile.py
46
conanfile.py
@ -1,46 +0,0 @@
|
||||
from conans import ConanFile, CMake
|
||||
|
||||
class CryFSConan(ConanFile):
|
||||
settings = "os", "compiler", "build_type", "arch"
|
||||
requires = [
|
||||
"range-v3/0.11.0",
|
||||
"spdlog/1.8.5",
|
||||
"boost/1.75.0",
|
||||
]
|
||||
generators = "cmake"
|
||||
default_options = {
|
||||
"boost:system_no_deprecated": True,
|
||||
"boost:asio_no_deprecated": True,
|
||||
"boost:filesystem_no_deprecated": True,
|
||||
"boost:without_atomic": False, # needed by boost thread
|
||||
"boost:without_chrono": False, # needed by CryFS
|
||||
"boost:without_container": False, # needed by boost thread
|
||||
"boost:without_context": True,
|
||||
"boost:without_contract": True,
|
||||
"boost:without_coroutine": True,
|
||||
"boost:without_date_time": False, # needed by boost thread
|
||||
"boost:without_exception": False, # needed by boost thread
|
||||
"boost:without_fiber": True,
|
||||
"boost:without_filesystem": False, # needed by CryFS
|
||||
"boost:without_graph": True,
|
||||
"boost:without_graph_parallel": True,
|
||||
"boost:without_iostreams": True,
|
||||
"boost:without_json": True,
|
||||
"boost:without_locale": True,
|
||||
"boost:without_log": True,
|
||||
"boost:without_math": True,
|
||||
"boost:without_mpi": True,
|
||||
"boost:without_nowide": True,
|
||||
"boost:without_program_options": False, # needed by CryFS
|
||||
"boost:without_python": True,
|
||||
"boost:without_random": True,
|
||||
"boost:without_regex": True,
|
||||
"boost:without_serialization": False, # needed by boost date_time
|
||||
"boost:without_stacktrace": True,
|
||||
"boost:without_system": False, # needed by CryFS
|
||||
"boost:without_test": True,
|
||||
"boost:without_thread": False, # needed by CryFS
|
||||
"boost:without_timer": True,
|
||||
"boost:without_type_erasure": True,
|
||||
"boost:without_wave": True,
|
||||
}
|
@ -1,72 +0,0 @@
|
||||
# appends a build number from the APPVEYOR_BUILD_NUMBER environment variable as fourth component to a version number,
|
||||
# i.e. "0.10" becomes "0.10.0.[buildnumber]", "1" becomes "1.0.0.[buildnumber]".
|
||||
function(append_build_number VERSION_NUMBER OUTPUT)
|
||||
string(REPLACE "." ";" VERSION_COMPONENTS ${STRIPPED_VERSION_NUMBER})
|
||||
list(LENGTH VERSION_COMPONENTS NUM_VERSION_COMPONENTS)
|
||||
if (${NUM_VERSION_COMPONENTS} LESS_EQUAL 0)
|
||||
message(FATAL_ERROR "Didn't find any version components")
|
||||
endif()
|
||||
if (${NUM_VERSION_COMPONENTS} LESS_EQUAL 1)
|
||||
string(APPEND STRIPPED_VERSION_NUMBER ".0")
|
||||
endif()
|
||||
if (${NUM_VERSION_COMPONENTS} LESS_EQUAL 2)
|
||||
string(APPEND STRIPPED_VERSION_NUMBER ".0")
|
||||
endif()
|
||||
if (NOT $ENV{APPVEYOR_BUILD_NUMBER} STREQUAL "")
|
||||
string(APPEND STRIPPED_VERSION_NUMBER ".$ENV{APPVEYOR_BUILD_NUMBER}")
|
||||
endif()
|
||||
set(${OUTPUT} "${STRIPPED_VERSION_NUMBER}" PARENT_SCOPE)
|
||||
endfunction()
|
||||
|
||||
if("${CMAKE_VERSION}" VERSION_LESS "3.3")
|
||||
# Earlier cmake versions generate .deb packages for which the package manager says they're bad quality
|
||||
# and asks the user whether they really want to install it. Cmake 3.3 fixes this.
|
||||
message(WARNING "Distribution package generation is only supported for CMake version >= 3.3. You're using ${CMAKE_VERSION}. You will be able to build and install CryFS, but you won't be able to generate .deb packages.")
|
||||
else()
|
||||
# Fix debfiles permissions. Unfortunately, git doesn't store file permissions.
|
||||
# When installing the .deb package and these files have the wrong permissions, the package manager complains.
|
||||
execute_process(COMMAND /bin/bash -c "chmod 0755 ${CMAKE_CURRENT_SOURCE_DIR}/debfiles/*")
|
||||
|
||||
set(CPACK_PACKAGE_NAME "cryfs")
|
||||
set(CPACK_PACKAGE_DESCRIPTION_SUMMARY "Encrypt your files and store them in the cloud.")
|
||||
set(CPACK_PACKAGE_DESCRIPTION "CryFS encrypts your files, so you can safely store them anywhere. It works well together with cloud services like Dropbox, iCloud, OneDrive and others.")
|
||||
set(CPACK_PACKAGE_CONTACT "Sebastian Messmer <messmer@cryfs.org>")
|
||||
set(CPACK_PACKAGE_VENDOR "Sebastian Messmer")
|
||||
set(CPACK_RESOURCE_FILE_LICENSE "${CMAKE_CURRENT_SOURCE_DIR}/../LICENSE.txt")
|
||||
|
||||
get_git_version(GITVERSION_VERSION_STRING)
|
||||
|
||||
if(WIN32 AND NOT UNIX)
|
||||
set(CPACK_GENERATOR WIX)
|
||||
|
||||
string(REGEX REPLACE "^([0-9\\.]+)([-+][0-9\\.a-zA-Z+-]+)?$" "\\1" STRIPPED_VERSION_NUMBER "${GITVERSION_VERSION_STRING}")
|
||||
append_build_number(${STRIPPED_VERSION_NUMBER} WIX_VERSION_NUMBER)
|
||||
message(STATUS "WIX package version is ${WIX_VERSION_NUMBER}")
|
||||
set(CPACK_PACKAGE_VERSION "${WIX_VERSION_NUMBER}")
|
||||
|
||||
set(CPACK_WIX_UPGRADE_GUID "8b872ce1-557d-48e6-ac57-9f5e574feabf")
|
||||
set(CPACK_WIX_PRODUCT_GUID "26116061-4f99-4c44-a178-2153fa396308")
|
||||
#set(CPACK_WIX_PRODUCT_ICON "...")
|
||||
set(CPACK_WIX_PROPERTY_ARPURLINFOABOUT "https://www.cryfs.org")
|
||||
set(CPACK_PACKAGE_INSTALL_DIRECTORY "CryFS/${GITVERSION_VERSION_STRING}")
|
||||
set(CPACK_WIX_PATCH_FILE "${CMAKE_CURRENT_SOURCE_DIR}/wix/change_path_env.xml")
|
||||
else()
|
||||
set(CPACK_GENERATOR TGZ DEB RPM)
|
||||
set(CPACK_PACKAGE_VERSION "${GITVERSION_VERSION_STRING}")
|
||||
set(CPACK_STRIP_FILES OFF)
|
||||
set(CPACK_SOURCE_STRIP_FILES OFF)
|
||||
endif()
|
||||
set(CPACK_PACKAGE_EXECUTABLES "cryfs" "CryFS")
|
||||
set(CPACK_DEBIAN_PACKAGE_SECTION "utils")
|
||||
set(CPACK_DEBIAN_PACKAGE_SHLIBDEPS ON)
|
||||
# Needs gnupg2, lsb-release for postinst script
|
||||
set(CPACK_DEBIAN_PACKAGE_DEPENDS "fuse, gnupg2, lsb-release")
|
||||
|
||||
set(CPACK_DEBIAN_PACKAGE_HOMEPAGE "https://www.cryfs.org")
|
||||
set(CPACK_RPM_PACKAGE_LICENSE "LGPLv3")
|
||||
set(CPACK_RPM_PACKAGE_DESCRIPTION ${CPACK_PACKAGE_DESCRIPTION})
|
||||
set(CPACK_RPM_EXCLUDE_FROM_AUTO_FILELIST_ADDITION "/usr/bin;/usr/share/man;/usr/share/man/man1")
|
||||
set(CPACK_DEBIAN_PACKAGE_CONTROL_EXTRA "${CMAKE_CURRENT_SOURCE_DIR}/debfiles/postinst;${CMAKE_CURRENT_SOURCE_DIR}/debfiles/postrm")
|
||||
|
||||
include(CPack)
|
||||
endif()
|
@ -1,124 +0,0 @@
|
||||
#!/bin/bash
|
||||
# This script is called after the cryfs .deb package is installed.
|
||||
# It sets up the package source so the user gets automatic updates for cryfs.
|
||||
|
||||
# DEVELOPER WARNING: There is a lot of redundancy between this file and the install.sh script in the cryfs-web repository. Please port modifications to there!
|
||||
|
||||
set -e
|
||||
|
||||
DEBIAN_REPO_URL="http://apt.cryfs.org/debian"
|
||||
UBUNTU_REPO_URL="http://apt.cryfs.org/ubuntu"
|
||||
|
||||
DISTRIBUTION=`lsb_release -s -i`
|
||||
DISTRIBUTION_VERSION=`lsb_release -s -c`
|
||||
|
||||
containsElement () {
|
||||
local e
|
||||
for e in "${@:2}"; do [[ "$e" == "$1" ]] && return 0; done
|
||||
return 1
|
||||
}
|
||||
|
||||
get_repo_url () {
|
||||
if [[ "$DISTRIBUTION" == "Debian" ]] || [[ "$DISTRIBUTION" == "Devuan" ]]; then
|
||||
echo $DEBIAN_REPO_URL
|
||||
elif [[ "$DISTRIBUTION" == "Ubuntu" ]]; then
|
||||
echo $UBUNTU_REPO_URL
|
||||
else
|
||||
echo Not adding package source because $DISTRIBUTION is not supported. Please keep CryFS manually up to date. 1>&2
|
||||
exit 0
|
||||
fi
|
||||
}
|
||||
|
||||
get_apt_config () {
|
||||
apt-config dump|grep "$1 "|sed -e "s/^$1\ \"\([^\"]*\)\"\;/\1/g"
|
||||
}
|
||||
|
||||
sources_list_dir () {
|
||||
root=$(get_apt_config "Dir")
|
||||
etc=$(get_apt_config "Dir::Etc")
|
||||
sourceparts=$(get_apt_config "Dir::Etc::sourceparts")
|
||||
echo "$root/$etc/$sourceparts"
|
||||
}
|
||||
|
||||
add_repository () {
|
||||
dir=$(sources_list_dir)
|
||||
repo_url=$(get_repo_url)
|
||||
echo "deb $repo_url $DISTRIBUTION_VERSION main" > $dir/cryfs.list
|
||||
}
|
||||
|
||||
install_key () {
|
||||
# Key from http://www.cryfs.org/apt.key
|
||||
apt-key add - > /dev/null <<KEYEND
|
||||
-----BEGIN PGP PUBLIC KEY BLOCK-----
|
||||
Version: GnuPG v1
|
||||
|
||||
mQINBFZxifMBEAC/KdA6/pf+0P7CMFPPxEPi9bVe8blG0xwMiFVeq9QOPYIjGsoO
|
||||
vE/KsvqAN6ig1mMBfD7K9l2CZLX2srzSD3vHSg7btqciRVzKyEVasoNllKmqm8//
|
||||
3UvYpqWL1mZkxtey5aSlcYxUo7+TyUK76hLjEiuT55ZHJeg9WZGWyoBzniRJ1zbl
|
||||
FjYc44+5o1ec7lfEzIClU+IRw0t69j1x3BbClM57JSgmFu2f0cGwGXKYqHVMXIY/
|
||||
qflD3AkY+eokKXWoxg6V8LiyHMy0MArpaMpKTwSQ6gCy6EnPmiH3xuckkSXtWcjm
|
||||
hYCZZZ7LHY9UQymYweaLnJLUX/9TB5P7kN+mH1Gyh/A5HJZ9WKtL18GK/+iHMxSP
|
||||
Ma1g9kniAqZYbsynxgcyM0YuxrQ1wPA4+Z89Z5E9os6Jlo7CcMq8YUQKZd+/3B1o
|
||||
BP0vplSnQRJyWHS8Gfoovmxsfwe29eUIlfo7hbTRo/F7BZRZoTDJH2Agn4HX2Uj+
|
||||
hZUqlu/enB2KGNMBZpzLyLPi6oTBPQTmdyGjgEEvxgry+3QAlvbUYMrDkScyY9HX
|
||||
8VlUeA7qkEQzCqE0ometqO0VpGnvk+fu625G0jVz1z4DdroPBaMLhqKmo4lTVXtI
|
||||
f4U1LZqYS1oQj1Nw/tZefOSNuz4w1+K6xBT2ChVxRZzD41rcTkT28XR6CwARAQAB
|
||||
tB5DcnlGUyBUZWFtIDxtZXNzbWVyQGNyeWZzLm9yZz6JAj4EEwECACgFAlZxifMC
|
||||
GwMFCQeEzgAGCwkIBwMCBhUIAgkKCwQWAgMBAh4BAheAAAoJEAF9yTJUnmWycBUP
|
||||
/RE/HeFIXhl974XhdvNk8u5TBIleYYksyNDDJa/Wij1X4x9UbS7KT4dnwr0jY0pT
|
||||
zi2brSsPqyZiO77lNxyOR6r8klL76rQiOg3gCnZyClwOMxpT7/RBTGEUBRWuI4KQ
|
||||
dewSpK+g09dpokuewjuVI0mg62PPI+ddeLfs5NTLT2dN21ZFNGPYsWPtnTcPwkBw
|
||||
rbPkQ5yUpY0NXexilq7UzicEF31vCrar05RW81Oj3XjnjO4cTSqIRqbcRIrkZHSA
|
||||
UC+hltwxh0L9QTrqlEZ2FNC3cy9SsCrU1sq6hh+9tEjlKQLS9EMOLKl9T4Bg/Bjj
|
||||
u2Ksk6W3ZletDBoZNoI1r8dPbKIiYBimoGOcNi/CKG4yeS3LRMceVYraLgAeU6BU
|
||||
aKdTUOMLm5Z84KZ1/WWkKH0VnpnZ+BD8VQ+uriDvFk/KAcLHc1BMf0IgBm3/WBOO
|
||||
YncYH7mECkvD7PpNzh5n6cfotcc2ciZaOrYp5MHcGRGKvFOIfefuUcWf2fakFHSH
|
||||
Hy/HWW/t0HFqvKOrwqrDyxhm3Hh3dwFHuma3nlSsg+L2WhqwnSodAqUsrlawdHwz
|
||||
8FM+4kXQPUxYJQI3Y4jjU3nd/noUhsZk2ZTMM8YgdrqN1bu4G/Q2KKrfaTq0SxDI
|
||||
m7JCCcYzS9cZiGYklYjruPo9OClJJ8FJWdlgDyDkua9SuQINBFZxifMBEADFUvQB
|
||||
FCPLEpQyz/4GMXDRpi/LGnGdkDT8huhvCHOAPAY+FGBgTpE60F8jXRO43woG212L
|
||||
A6+EuLGdvkR3vZNTIrYR2CQPdpHNVrSHAumhE0eRTeb0Cao068Ryc/vfVyOVKuOz
|
||||
UDRi1VdBFBtsM2Zv0Se581t+C7X2x9LMNVTyal+cMKHUM2/2dtXoU/LkJJieop9s
|
||||
Pv3oIBfCclxuK2O+gFi4ktW+nBoYQmSdWhUA5vZGy/g8+v42PZyG+VK9N+XFYm8B
|
||||
NmUu8WsLIS3dkxgIUQ/agUHIo9NRGwpMEMYCh7Ene2/8xpP6JlrSYTFxeUedG4BU
|
||||
X0zngno+l9X9ehYqkrb7QpxZnDVlD9LhTO+aX8pxiAPDbnqr4tqp9Q10TStxuOJA
|
||||
JG4nwjSYlLxIFhAey91fYvS0r8M4/f1SQKBz/4X/DJdZUFJLQZc5sjwmtBXnmLTt
|
||||
wokuX0Ecnif2pnN1Z3DEvUgL9j85M15BQk8F3aLOA35HgoF25W8AVRX9TI61uk9P
|
||||
yDIkyqLI0W7LFIFEtuNL/pJ4aPCJo2H9e8Cvd8DDZpIhtnskncwly9asPmxQANAs
|
||||
rNViEOOKi/DYTXxpOF7qW//gSmp6E930QWo8GS4eobTw177dh3c0f2EkRIHTO/F8
|
||||
7Q2sIh1SiC4mly3s3cmNtoICbc6L9we40rIE+wARAQABiQIlBBgBAgAPBQJWcYnz
|
||||
AhsMBQkHhM4AAAoJEAF9yTJUnmWy4coQAI746UpKJAliqVmdUVwAiTadR3vz0Gyq
|
||||
E/HLqi6Q73pdJcMJ11ml9ZDJn/QOFi9l8V93YLLBxVx5i0RQMHQNtj2dGg1LYgVR
|
||||
VZLzfvBhJkh1/uulaYWh4CYGTvLVf8Oihl/PIAonrweDQdXZCG6VvkxFx46/sCPg
|
||||
oeO8NGKkrNFbA7ztQElqWkTuQs1tMgWvylCng2BrltPGKHqnAiYM1qAp2YdD+k9v
|
||||
kd3Ppd9AfgaCg5T5Mks+rNMeDfprcC6FdiaJBV0QXasfSVCv5ph/YwRbrkM+tKAW
|
||||
H9K51Myi7J9k5sIhsA1h6Bx3cjbNES4cnYUJEC2z6JmITzHP0uBWMwNoy9nykWpK
|
||||
qswsGo4iD0syLOCkDIr2h0NKcCf0D20HPT8javVWz9cBfcKcXlSdO2Ju8+ONCrBe
|
||||
ydEBA91yqM8k89fDUyhXI+e7hueUDZ+KUHC87wys82XyGQ/++AiPec3TTYVL2fiN
|
||||
Q3mgKsOK0D/WziJdtL1i15CDUjhg0fUq4jNeHVnq7LUb/CmG7MB3jPI/jD+/pxGV
|
||||
cGZtlxRJvXCjpaGJL0+AV0wE4rmkrpSAVC/9AeDi3l+tohUYB2WKOkXZEo90ShS2
|
||||
cwOccYvBiIYhNMbgr5qsqPEJhcfdwXUpgJjZgFn9Y8ZwIJgjCEZllU2iEMKtbqwv
|
||||
2oSFOf0c0GiV
|
||||
=l3s3
|
||||
-----END PGP PUBLIC KEY BLOCK-----
|
||||
KEYEND
|
||||
}
|
||||
|
||||
case "$1" in
|
||||
configure)
|
||||
install_key
|
||||
add_repository
|
||||
;;
|
||||
|
||||
abort-upgrade|abort-remove|abort-deconfigure)
|
||||
;;
|
||||
|
||||
*)
|
||||
echo "postinst called with unknown argument '$1'" >&2
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
|
||||
set +e
|
||||
|
||||
exit 0
|
@ -1,45 +0,0 @@
|
||||
#!/bin/bash
|
||||
# This script is called after the cryfs .deb package is uninstalled.
|
||||
# It removes the package source that was used to get automatic updates.
|
||||
|
||||
set -e
|
||||
|
||||
get_apt_config () {
|
||||
apt-config dump|grep "$1 "|sed -e "s/^$1\ \"\([^\"]*\)\"\;/\1/g"
|
||||
}
|
||||
|
||||
sources_list_dir () {
|
||||
root=$(get_apt_config "Dir")
|
||||
etc=$(get_apt_config "Dir::Etc")
|
||||
sourceparts=$(get_apt_config "Dir::Etc::sourceparts")
|
||||
echo $root$etc$sourceparts
|
||||
}
|
||||
|
||||
remove_repository () {
|
||||
dir=$(sources_list_dir)
|
||||
rm -f $dir/cryfs.list
|
||||
}
|
||||
|
||||
remove_key () {
|
||||
# Don't fail if key was already removed
|
||||
apt-key rm 549E65B2 2>&1 > /dev/null || true
|
||||
}
|
||||
|
||||
case "$1" in
|
||||
purge)
|
||||
remove_repository
|
||||
remove_key
|
||||
;;
|
||||
|
||||
abort-install|abort-upgrade|remove|upgrade|failed-upgrade)
|
||||
;;
|
||||
|
||||
*)
|
||||
echo "postrm called with unknown argument '$1'" >&2
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
|
||||
set +e
|
||||
|
||||
exit 0
|
@ -1,7 +0,0 @@
|
||||
<CPackWiXPatch>
|
||||
<CPackWiXFragment Id="CM_CP_bin.cryfs.exe">
|
||||
<Environment Id="MyPath" Action="set" Part="first"
|
||||
Name="PATH" Value="[INSTALL_ROOT]bin"
|
||||
System="yes"/>
|
||||
</CPackWiXFragment>
|
||||
</CPackWiXPatch>
|
@ -1,20 +0,0 @@
|
||||
project (doc)
|
||||
|
||||
IF (WIN32)
|
||||
MESSAGE(STATUS "This is Windows. Will not install man page")
|
||||
ELSE (WIN32)
|
||||
INCLUDE(GNUInstallDirs)
|
||||
|
||||
find_program(GZIP gzip)
|
||||
|
||||
add_custom_command(
|
||||
OUTPUT ${CMAKE_CURRENT_BINARY_DIR}/cryfs.1.gz
|
||||
COMMAND ${GZIP} -c ${CMAKE_CURRENT_SOURCE_DIR}/man/cryfs.1 > ${CMAKE_CURRENT_BINARY_DIR}/cryfs.1.gz
|
||||
)
|
||||
add_custom_target(man ALL DEPENDS ${CMAKE_CURRENT_BINARY_DIR}/cryfs.1.gz)
|
||||
|
||||
install(FILES ${CMAKE_CURRENT_BINARY_DIR}/cryfs.1.gz
|
||||
DESTINATION ${CMAKE_INSTALL_MANDIR}/man1
|
||||
CONFIGURATIONS Release
|
||||
)
|
||||
ENDIF(WIN32)
|
288
doc/man/cryfs.1
288
doc/man/cryfs.1
@ -1,288 +0,0 @@
|
||||
.\" cryfs(1) man page
|
||||
.
|
||||
.TH cryfs 1
|
||||
.
|
||||
.
|
||||
.
|
||||
.SH NAME
|
||||
cryfs \- cryptographic filesystem for the cloud
|
||||
.
|
||||
.
|
||||
.
|
||||
.SH SYNOPSIS
|
||||
.\" mount/create syntax
|
||||
.B cryfs
|
||||
[\fB\-c\fR \fIfile\fR]
|
||||
[\fB\-f\fR]
|
||||
[\fIoptions\fR]
|
||||
.I basedir mountpoint
|
||||
.br
|
||||
.\" show-ciphers syntax
|
||||
.B cryfs \-\-help\fR|\fB\-\-version\fR|\fB\-\-show-ciphers
|
||||
.
|
||||
.
|
||||
.
|
||||
.SH DESCRIPTION
|
||||
.
|
||||
.B CryFS
|
||||
encrypts your files, so you can safely store them anywhere.
|
||||
.PP
|
||||
.
|
||||
The goal of CryFS is not only to keep file contents, but also
|
||||
file sizes, metadata and directory structure confidential.
|
||||
CryFS uses
|
||||
.B encrypted same-size blocks
|
||||
to store both the files themselves and the block's relations to another.
|
||||
These blocks are stored as individual files in the base directory,
|
||||
which can then be synchronized with cloud services such as Dropbox.
|
||||
.PP
|
||||
.
|
||||
The blocks are encrypted using a random key, which is stored in a
|
||||
.B configuration file
|
||||
encrypted by the user's passphrase.
|
||||
By default, it will be stored together with the data in the base directory,
|
||||
but you can choose a different location if you do not want it in your cloud
|
||||
or when using a weak passphrase.
|
||||
.
|
||||
.
|
||||
.
|
||||
.SH USING CRYFS
|
||||
.
|
||||
.SS Selecting base and mount directories
|
||||
.
|
||||
While you can access your files through your
|
||||
.B mount directory,
|
||||
CryFS actually places them in your
|
||||
.B base directory
|
||||
after encrypting.
|
||||
CryFS will encrypt and decrypt your files 'on the fly' as they are accessed,
|
||||
so files will never be stored on the disk in unencrypted form.
|
||||
.PP
|
||||
.
|
||||
You can choose any empty directory as your base, but your mount directory
|
||||
should be outside of any cloud storage, as your cloud may try to sync your
|
||||
(temporarily mounted) unencrypted files as well.
|
||||
.
|
||||
.SS Setup and usage of your encrypted directory
|
||||
.
|
||||
.TP
|
||||
Creating and mounting your encrypted storage use the same command-line syntax:
|
||||
.B cryfs
|
||||
.I basedir mountpoint
|
||||
.PP
|
||||
.
|
||||
If CryFS detects an encrypted storage in the given base directory, you will
|
||||
be asked for the passphrase to unlock and mount it. Otherwise, CryFS will
|
||||
help you with creating one, just follow the on-screen instructions.
|
||||
.PP
|
||||
.
|
||||
.TP
|
||||
After you are done working with your encrypted files, unmount your storage \
|
||||
with the command
|
||||
.B cryfs-unmount
|
||||
.I mountpoint
|
||||
.
|
||||
.
|
||||
.SS Changing your passphrase
|
||||
.
|
||||
As the encryption key to your CryFS storage is stored in your configuration
|
||||
file, it would be possible to re-encrypt it using a different passphrase
|
||||
(although this feature has not been implemented yet).
|
||||
.PP
|
||||
.
|
||||
However, this does not change the actual encryption key of your storage, so
|
||||
someone with access to the old passphrase and configuration file (for example
|
||||
through the file history of your cloud or your file system) could still access
|
||||
your files, even those created after the password change.
|
||||
.PP
|
||||
.
|
||||
For this reason, the recommended way to change your passphrase is to create a
|
||||
new CryFS storage with the new passphrase and move your files from the old to
|
||||
the new one.
|
||||
.
|
||||
.
|
||||
.
|
||||
.SH OPTIONS
|
||||
.
|
||||
.SS Getting help
|
||||
.
|
||||
.TP
|
||||
\fB\-h\fR, \fB\-\-help\fR
|
||||
.
|
||||
Show a help message containing short descriptions for all options.
|
||||
.
|
||||
.
|
||||
.TP
|
||||
\fB\-\-show\-ciphers\fR
|
||||
.
|
||||
Show a list of all supported encryption ciphers.
|
||||
.
|
||||
.
|
||||
.TP
|
||||
\fB\-\-version\fR
|
||||
.
|
||||
Show the CryFS version number.
|
||||
.
|
||||
.
|
||||
.SS Encryption parameters
|
||||
.
|
||||
.TP
|
||||
\fB\-\-blocksize\fR \fIarg\fR
|
||||
.
|
||||
Set the block size to \fIarg\fR bytes. Defaults to
|
||||
.BR 32768 .
|
||||
.br
|
||||
\" Intentional space
|
||||
.br
|
||||
A higher block size may help reducing the file count in your base directory
|
||||
(especially when storing large files), but will also waste more space when
|
||||
storing smaller files.
|
||||
.
|
||||
.
|
||||
.TP
|
||||
\fB\-\-cipher\fR \fIarg\fR
|
||||
.
|
||||
Use \fIarg\fR as the cipher for the encryption. Defaults to
|
||||
.BR aes-256-gcm .
|
||||
.
|
||||
.
|
||||
.TP
|
||||
\fB\-c\fR \fIfile\fR, \fB\-\-config\fR \fIfile\fR
|
||||
.
|
||||
Use \fIfile\fR as configuration file for this CryFS storage instead of
|
||||
\fIbasedir\fR/cryfs.config
|
||||
.
|
||||
.
|
||||
.SS General options
|
||||
.
|
||||
.TP
|
||||
\fB\-f\fR, \fB\-\-foreground\fI
|
||||
.
|
||||
Run CryFS in the foreground. Stop using CTRL-C.
|
||||
.
|
||||
.
|
||||
.TP
|
||||
\fB\-\-allow-filesystem-upgrade\fI
|
||||
.
|
||||
Allow upgrading the file system if it was created with an old CryFS version. After the upgrade, older CryFS versions might not be able to use the file system anymore.
|
||||
.
|
||||
.
|
||||
.TP
|
||||
\fB\-\-allow-integrity-violations\fI
|
||||
.
|
||||
By default, CryFS checks for integrity violations, i.e. will notice if an adversary modified or rolled back the file system. Using this flag, you can disable the integrity checks. This can for example be helpful for loading an old snapshot of your file system without CryFS thinking an adversary rolled it back.
|
||||
.
|
||||
.
|
||||
.TP
|
||||
\fB\-\-allow-replaced-filesystem\fI
|
||||
.
|
||||
By default, CryFS remembers file systems it has seen in this base directory and checks that it didn't get replaced by an attacker with an entirely different file system since the last time it was loaded. However, if you do want to replace the file system with an entirely new one, you can pass in this option to disable the check.
|
||||
.
|
||||
.
|
||||
.TP
|
||||
\fB\-\-create-missing-basedir\fI
|
||||
.
|
||||
Creates the base directory even if there is no directory currently there, skipping the normal confirmation message to create it later.
|
||||
.
|
||||
.
|
||||
.TP
|
||||
\fB\-\-create-missing-mountpoint\fI
|
||||
.
|
||||
Creates the mountpoint even if there is no directory currently there, skipping the normal confirmation message to create it later.
|
||||
.
|
||||
.
|
||||
.TP
|
||||
\fB\-\-missing-block-is-integrity-violation\fR=true
|
||||
.
|
||||
When CryFS encounters a missing ciphertext block, it cannot cannot (yet) know if it was deleted by an unauthorized adversary or by a second authorized client. This is one of the restrictions of the integrity checks currently in place. You can enable this flag to treat missing ciphertext blocks as integrity violations, but then your file system will not be usable by multiple clients anymore. By default, this flag is disabled.
|
||||
.
|
||||
.
|
||||
.TP
|
||||
\fB\-\-logfile\fR \fIfile\fR
|
||||
.
|
||||
Write status information to \fIfile\fR. If no logfile is given, CryFS will
|
||||
write them to syslog in background mode, or to stdout in foreground mode.
|
||||
.
|
||||
.
|
||||
.TP
|
||||
\fB\-\-unmount\-idle\fR \fIarg\fR
|
||||
.
|
||||
Unmount automatically after \fIarg\fR minutes of inactivity.
|
||||
.
|
||||
.
|
||||
.
|
||||
.SH FUSE Options
|
||||
.
|
||||
.TP
|
||||
\fB\-o\fR \fIoption\fR, \fB\-\-fuse\-option\fR \fIoption\fR
|
||||
.
|
||||
Pass through options to the FUSE filesystem driver.
|
||||
|
||||
.TP
|
||||
For example:
|
||||
.TP
|
||||
\fB\-o\fR \fIallow_other\fR
|
||||
This option overrides the security measure restricting file
|
||||
access to the filesystem owner, so that all users (including
|
||||
root) can access the files.
|
||||
.TP
|
||||
\fB\-o\fR \fIallow_root\fR
|
||||
This option is similar to allow_other but file access is
|
||||
limited to the filesystem owner and root. This option and
|
||||
allow_other are mutually exclusive.
|
||||
.
|
||||
.
|
||||
.
|
||||
.SH ENVIRONMENT
|
||||
.
|
||||
.TP
|
||||
\fBCRYFS_FRONTEND\fR=noninteractive
|
||||
.
|
||||
With this option set, CryFS will only ask for the encryption passphrase once.
|
||||
Instead of asking the user for parameters not specified on the command line,
|
||||
it will just use the default values. CryFS will also not ask you to confirm
|
||||
your passphrase when creating a new CryFS storage.
|
||||
.br
|
||||
\" Intentional space
|
||||
.br
|
||||
Set this environment variable when automating CryFS using external tools or
|
||||
shell scripts.
|
||||
.
|
||||
.
|
||||
.TP
|
||||
\fBCRYFS_NO_UPDATE_CHECK\fR=true
|
||||
.
|
||||
By default, CryFS connects to the internet to check for known security
|
||||
vulnerabilities and new versions. This option disables this.
|
||||
.
|
||||
.
|
||||
.TP
|
||||
\fBCRYFS_LOCAL_STATE_DIR\fR=[path]
|
||||
.
|
||||
Sets the directory cryfs uses to store local state. This local state
|
||||
is used to recognize known file systems and run integrity checks
|
||||
(i.e. check that they haven't been modified by an attacker.
|
||||
Default value: ${HOME}/.cryfs
|
||||
.
|
||||
.
|
||||
.
|
||||
.SH SEE ALSO
|
||||
.
|
||||
.BR mount.fuse (1),
|
||||
.BR fusermount (1)
|
||||
.PP
|
||||
.
|
||||
For more information about the design of CryFS, visit
|
||||
.B https://www.cryfs.org
|
||||
.PP
|
||||
.
|
||||
Visit the development repository at
|
||||
.B https://github.com/cryfs/cryfs
|
||||
for the source code and the full list of contributors to CryFS.
|
||||
.
|
||||
.
|
||||
.
|
||||
.SH AUTHORS
|
||||
.
|
||||
CryFS was created by Sebastian Messmer and contributors.
|
||||
This man page was written by Maximilian Wende.
|
@ -1,26 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Note: Call this from a cmake build directory (e.g. cmake/) for out-of-source builds
|
||||
# Examples:
|
||||
# mkdir cmake && cd cmake && ../run-clang-tidy.sh
|
||||
# mkdir cmake && cd cmake && ../run-clang-tidy.sh -fix
|
||||
# mkdir cmake && cd cmake && ../run-clang-tidy.sh -export-fixes fixes.yaml
|
||||
|
||||
set -e
|
||||
|
||||
CXX=clang++-11
|
||||
CC=clang-11
|
||||
SCRIPT=run-clang-tidy-11.py
|
||||
|
||||
export NUMCORES=`nproc` && if [ ! -n "$NUMCORES" ]; then export NUMCORES=`sysctl -n hw.ncpu`; fi
|
||||
echo Using ${NUMCORES} cores
|
||||
|
||||
# Run cmake in current working directory, but on source that is in the same directory as this script file
|
||||
cmake -DBUILD_TESTING=on -DCMAKE_CXX_COMPILER=${CXX} -DCMAKE_C_COMPILER=${CC} -DCMAKE_EXPORT_COMPILE_COMMANDS=ON "${0%/*}"
|
||||
|
||||
# Filter all third party code from the compilation database
|
||||
cat compile_commands.json|jq "map(select(.file | test(\"^$(realpath ${0%/*})/(src|test)/.*$\")))" > compile_commands2.json
|
||||
rm compile_commands.json
|
||||
mv compile_commands2.json compile_commands.json
|
||||
|
||||
${SCRIPT} -j${NUMCORES} -quiet -header-filter "$(realpath ${0%/*})/(src|test)/.*" $@
|
33
run-iwyu.sh
33
run-iwyu.sh
@ -1,33 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Note: Call this from a cmake build directory (e.g. cmake/) for out-of-source builds
|
||||
# Examples:
|
||||
# mkdir cmake && cd cmake && ../run-iwqu.sh
|
||||
# mkdir cmake && cd cmake && ../run-iwqu.sh -fix
|
||||
|
||||
set -e
|
||||
|
||||
export NUMCORES=`nproc` && if [ ! -n "$NUMCORES" ]; then export NUMCORES=`sysctl -n hw.ncpu`; fi
|
||||
echo Using ${NUMCORES} cores
|
||||
|
||||
# Run cmake in current working directory, but on source that is in the same directory as this script file
|
||||
cmake -DBUILD_TESTING=on -DCMAKE_EXPORT_COMPILE_COMMANDS=ON "${0%/*}"
|
||||
|
||||
# Filter all third party code from the compilation database
|
||||
cat compile_commands.json|jq "map(select(.file | test(\"^$(realpath ${0%/*})/(src|test)/.*$\")))" > compile_commands2.json
|
||||
rm compile_commands.json
|
||||
mv compile_commands2.json compile_commands.json
|
||||
|
||||
if [ "$1" = "-fix" ]; then
|
||||
TMPFILE=/tmp/iwyu.`cat /dev/urandom | tr -cd 'a-f0-9' | head -c 8`.out
|
||||
|
||||
function cleanup {
|
||||
rm ${TMPFILE}
|
||||
}
|
||||
trap cleanup EXIT
|
||||
|
||||
iwyu_tool -j${NUMCORES} -p. ${@:2} | tee ${TMPFILE}
|
||||
fix_include < ${TMPFILE}
|
||||
else
|
||||
iwyu_tool -j${NUMCORES} -p. $@
|
||||
fi
|
@ -3,10 +3,9 @@ include_directories(${CMAKE_CURRENT_SOURCE_DIR})
|
||||
add_subdirectory(gitversion)
|
||||
add_subdirectory(cpp-utils)
|
||||
add_subdirectory(fspp)
|
||||
add_subdirectory(jni)
|
||||
add_subdirectory(parallelaccessstore)
|
||||
add_subdirectory(blockstore)
|
||||
add_subdirectory(blobstore)
|
||||
add_subdirectory(cryfs)
|
||||
add_subdirectory(cryfs-cli)
|
||||
add_subdirectory(cryfs-unmount)
|
||||
add_subdirectory(stats)
|
||||
|
@ -3,7 +3,6 @@
|
||||
#include "KnownBlockVersions.h"
|
||||
#include <cpp-utils/data/SerializationHelper.h>
|
||||
#include <cpp-utils/process/SignalCatcher.h>
|
||||
#include <cpp-utils/io/ProgressBar.h>
|
||||
|
||||
using cpputils::Data;
|
||||
using cpputils::unique_ref;
|
||||
@ -203,14 +202,11 @@ void IntegrityBlockStore2::migrateFromBlockstoreWithoutVersionNumbers(BlockStore
|
||||
SignalCatcher signalCatcher;
|
||||
|
||||
KnownBlockVersions knownBlockVersions(integrityFilePath, myClientId);
|
||||
uint64_t numProcessedBlocks = 0;
|
||||
cpputils::ProgressBar progressbar("Migrating file system for integrity features. This can take a while...", baseBlockStore->numBlocks());
|
||||
baseBlockStore->forEachBlock([&] (const BlockId &blockId) {
|
||||
if (signalCatcher.signal_occurred()) {
|
||||
throw std::runtime_error("Caught signal");
|
||||
}
|
||||
migrateBlockFromBlockstoreWithoutVersionNumbers(baseBlockStore, blockId, &knownBlockVersions);
|
||||
progressbar.update(++numProcessedBlocks);
|
||||
});
|
||||
}
|
||||
|
||||
|
@ -16,15 +16,11 @@ set(SOURCES
|
||||
tempfile/TempFile.cpp
|
||||
tempfile/TempDir.cpp
|
||||
network/HttpClient.cpp
|
||||
network/CurlHttpClient.cpp
|
||||
network/WinHttpClient.cpp
|
||||
network/FakeHttpClient.cpp
|
||||
io/Console.cpp
|
||||
io/DontEchoStdinToStdoutRAII.cpp
|
||||
io/IOStreamConsole.cpp
|
||||
io/NoninteractiveConsole.cpp
|
||||
io/pipestream.cpp
|
||||
io/ProgressBar.cpp
|
||||
thread/LoopThread.cpp
|
||||
thread/ThreadSystem.cpp
|
||||
thread/debugging_nonwindows.cpp
|
||||
@ -76,20 +72,13 @@ else()
|
||||
endif()
|
||||
endif()
|
||||
|
||||
if (NOT MSVC)
|
||||
find_package(CURL REQUIRED)
|
||||
target_include_directories(${PROJECT_NAME} PUBLIC ${CURL_INCLUDE_DIRS})
|
||||
target_link_libraries(${PROJECT_NAME} PUBLIC ${CURL_LIBRARIES})
|
||||
else()
|
||||
target_link_libraries(${PROJECT_NAME} PUBLIC WinHttp)
|
||||
endif()
|
||||
|
||||
find_package(Threads REQUIRED)
|
||||
target_link_libraries(${PROJECT_NAME} PUBLIC ${CMAKE_THREAD_LIBS_INIT})
|
||||
|
||||
target_link_libraries(${PROJECT_NAME} PUBLIC ${CMAKE_DL_LIBS})
|
||||
|
||||
target_link_libraries(${PROJECT_NAME} PUBLIC CryfsDependencies_spdlog cryptopp CryfsDependencies_range-v3)
|
||||
target_link_libraries(${PROJECT_NAME} PUBLIC cryptopp boost spdlog)
|
||||
|
||||
target_add_boost(${PROJECT_NAME})
|
||||
target_enable_style_warnings(${PROJECT_NAME})
|
||||
|
@ -1,113 +0,0 @@
|
||||
#include "IOStreamConsole.h"
|
||||
#include <boost/algorithm/string/trim.hpp>
|
||||
#include "DontEchoStdinToStdoutRAII.h"
|
||||
#include <cpp-utils/assert/assert.h>
|
||||
|
||||
using std::ostream;
|
||||
using std::istream;
|
||||
using std::string;
|
||||
using std::vector;
|
||||
using std::flush;
|
||||
using std::function;
|
||||
using boost::optional;
|
||||
using boost::none;
|
||||
|
||||
namespace cpputils {
|
||||
|
||||
IOStreamConsole::IOStreamConsole(): IOStreamConsole(std::cout, std::cin) {
|
||||
}
|
||||
|
||||
IOStreamConsole::IOStreamConsole(ostream &output, istream &input): _output(output), _input(input) {
|
||||
}
|
||||
|
||||
optional<int> IOStreamConsole::_parseInt(const string &str) {
|
||||
try {
|
||||
string trimmed = str;
|
||||
boost::algorithm::trim(trimmed);
|
||||
int parsed = std::stoi(str);
|
||||
if (std::to_string(parsed) != trimmed) {
|
||||
return none;
|
||||
}
|
||||
return parsed;
|
||||
} catch (const std::invalid_argument &e) {
|
||||
return none;
|
||||
} catch (const std::out_of_range &e) {
|
||||
return none;
|
||||
}
|
||||
}
|
||||
|
||||
function<optional<unsigned int>(const string &input)> IOStreamConsole::_parseUIntWithMinMax(unsigned int min, unsigned int max) {
|
||||
return [min, max] (const string &input) {
|
||||
optional<int> parsed = _parseInt(input);
|
||||
if (parsed == none) {
|
||||
return optional<unsigned int>(none);
|
||||
}
|
||||
unsigned int value = static_cast<unsigned int>(*parsed);
|
||||
if (value < min || value > max) {
|
||||
return optional<unsigned int>(none);
|
||||
}
|
||||
return optional<unsigned int>(value);
|
||||
};
|
||||
}
|
||||
|
||||
template<typename Return>
|
||||
Return IOStreamConsole::_askForChoice(const string &question, function<optional<Return> (const string&)> parse) {
|
||||
optional<Return> choice = none;
|
||||
do {
|
||||
_output << question << flush;
|
||||
string choiceStr;
|
||||
getline(_input, choiceStr);
|
||||
choice = parse(choiceStr);
|
||||
} while(choice == none);
|
||||
return *choice;
|
||||
}
|
||||
|
||||
unsigned int IOStreamConsole::ask(const string &question, const vector<string> &options) {
|
||||
if(options.size() == 0) {
|
||||
throw std::invalid_argument("options should have at least one entry");
|
||||
}
|
||||
_output << question << "\n";
|
||||
for (size_t i = 0; i < options.size(); ++i) {
|
||||
_output << " [" << (i+1) << "] " << options[i] << "\n";
|
||||
}
|
||||
int choice = _askForChoice("Your choice [1-" + std::to_string(options.size()) + "]: ", _parseUIntWithMinMax(1, options.size()));
|
||||
return choice-1;
|
||||
}
|
||||
|
||||
function<optional<bool>(const string &input)> IOStreamConsole::_parseYesNo() {
|
||||
return [] (const string &input) {
|
||||
string trimmed = input;
|
||||
boost::algorithm::trim(trimmed);
|
||||
if(trimmed == "Y" || trimmed == "y" || trimmed == "Yes" || trimmed == "yes") {
|
||||
return optional<bool>(true);
|
||||
} else if (trimmed == "N" || trimmed == "n" || trimmed == "No" || trimmed == "no") {
|
||||
return optional<bool>(false);
|
||||
} else {
|
||||
return optional<bool>(none);
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
bool IOStreamConsole::askYesNo(const string &question, bool /*defaultValue*/) {
|
||||
_output << question << "\n";
|
||||
return _askForChoice("Your choice [y/n]: ", _parseYesNo());
|
||||
}
|
||||
|
||||
void IOStreamConsole::print(const string &output) {
|
||||
_output << output << std::flush;
|
||||
}
|
||||
|
||||
string IOStreamConsole::askPassword(const string &question) {
|
||||
DontEchoStdinToStdoutRAII _stdin_input_is_hidden_as_long_as_this_is_in_scope;
|
||||
|
||||
_output << question << std::flush;
|
||||
string result;
|
||||
std::getline(_input, result);
|
||||
_output << std::endl;
|
||||
|
||||
ASSERT(result.size() == 0 || result[result.size() - 1] != '\n', "Unexpected std::getline() behavior");
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
}
|
@ -1,31 +0,0 @@
|
||||
#pragma once
|
||||
#ifndef MESSMER_CPPUTILS_IO_IOSTREAMCONSOLE_H
|
||||
#define MESSMER_CPPUTILS_IO_IOSTREAMCONSOLE_H
|
||||
|
||||
#include "Console.h"
|
||||
|
||||
namespace cpputils {
|
||||
|
||||
class IOStreamConsole final: public Console {
|
||||
public:
|
||||
IOStreamConsole();
|
||||
IOStreamConsole(std::ostream &output, std::istream &input);
|
||||
unsigned int ask(const std::string &question, const std::vector<std::string> &options) override;
|
||||
bool askYesNo(const std::string &question, bool defaultValue) override;
|
||||
void print(const std::string &output) override;
|
||||
std::string askPassword(const std::string &question) override;
|
||||
private:
|
||||
template<typename Return>
|
||||
Return _askForChoice(const std::string &question, std::function<boost::optional<Return> (const std::string&)> parse);
|
||||
static std::function<boost::optional<bool>(const std::string &input)> _parseYesNo();
|
||||
static std::function<boost::optional<unsigned int>(const std::string &input)> _parseUIntWithMinMax(unsigned int min, unsigned int max);
|
||||
static boost::optional<int> _parseInt(const std::string &str);
|
||||
|
||||
std::ostream &_output;
|
||||
std::istream &_input;
|
||||
|
||||
DISALLOW_COPY_AND_ASSIGN(IOStreamConsole);
|
||||
};
|
||||
|
||||
}
|
||||
#endif
|
@ -1,35 +0,0 @@
|
||||
#include "ProgressBar.h"
|
||||
#include <iostream>
|
||||
#include <limits>
|
||||
#include <mutex>
|
||||
#include "IOStreamConsole.h"
|
||||
|
||||
using std::string;
|
||||
|
||||
namespace cpputils {
|
||||
|
||||
ProgressBar::ProgressBar(const char* preamble, uint64_t max_value)
|
||||
: ProgressBar(std::make_shared<IOStreamConsole>(), preamble, max_value) {}
|
||||
|
||||
ProgressBar::ProgressBar(std::shared_ptr<Console> console, const char* preamble, uint64_t max_value)
|
||||
: _console(std::move(console))
|
||||
, _preamble(string("\r") + preamble + " ")
|
||||
, _max_value(max_value)
|
||||
, _lastPercentage(std::numeric_limits<decltype(_lastPercentage)>::max()) {
|
||||
ASSERT(_max_value > 0, "Progress bar can't handle max_value of 0");
|
||||
|
||||
_console->print("\n");
|
||||
|
||||
// show progress bar. _lastPercentage is different to zero, so it shows.
|
||||
update(0);
|
||||
}
|
||||
|
||||
void ProgressBar::update(uint64_t value) {
|
||||
const size_t percentage = (100 * value) / _max_value;
|
||||
if (percentage != _lastPercentage) {
|
||||
_console->print(_preamble + std::to_string(percentage) + "%");
|
||||
_lastPercentage = percentage;
|
||||
}
|
||||
}
|
||||
|
||||
}
|
@ -1,31 +0,0 @@
|
||||
#pragma once
|
||||
#ifndef MESSMER_CPPUTILS_IO_PROGRESSBAR_H
|
||||
#define MESSMER_CPPUTILS_IO_PROGRESSBAR_H
|
||||
|
||||
#include <cpp-utils/macros.h>
|
||||
#include <string>
|
||||
#include <memory>
|
||||
#include "Console.h"
|
||||
|
||||
namespace cpputils {
|
||||
|
||||
class ProgressBar final {
|
||||
public:
|
||||
explicit ProgressBar(std::shared_ptr<Console> console, const char* preamble, uint64_t max_value);
|
||||
explicit ProgressBar(const char* preamble, uint64_t max_value);
|
||||
|
||||
void update(uint64_t value);
|
||||
|
||||
private:
|
||||
|
||||
std::shared_ptr<Console> _console;
|
||||
std::string _preamble;
|
||||
uint64_t _max_value;
|
||||
size_t _lastPercentage;
|
||||
|
||||
DISALLOW_COPY_AND_ASSIGN(ProgressBar);
|
||||
};
|
||||
|
||||
}
|
||||
|
||||
#endif
|
@ -3,10 +3,9 @@
|
||||
#define MESSMER_CPPUTILS_LOGGING_LOGGER_H
|
||||
|
||||
#include <spdlog/spdlog.h>
|
||||
#include <spdlog/sinks/android_sink.h>
|
||||
#include "../macros.h"
|
||||
|
||||
#include <spdlog/sinks/stdout_sinks.h>
|
||||
|
||||
namespace cpputils {
|
||||
namespace logging {
|
||||
class Logger final {
|
||||
@ -33,7 +32,7 @@ namespace logging {
|
||||
private:
|
||||
|
||||
static std::shared_ptr<spdlog::logger> _defaultLogger() {
|
||||
static auto singleton = spdlog::stderr_logger_mt("Log");
|
||||
static auto singleton = spdlog::android_logger_mt("libcryfs");
|
||||
return singleton;
|
||||
}
|
||||
|
||||
|
@ -5,13 +5,6 @@
|
||||
#include "Logger.h"
|
||||
#include <stdexcept>
|
||||
#include <spdlog/fmt/ostr.h>
|
||||
#include <spdlog/sinks/basic_file_sink.h>
|
||||
|
||||
#if defined(_MSC_VER)
|
||||
#include <spdlog/sinks/msvc_sink.h>
|
||||
#else
|
||||
#include <spdlog/sinks/syslog_sink.h>
|
||||
#endif
|
||||
|
||||
namespace cpputils {
|
||||
namespace logging {
|
||||
@ -77,14 +70,6 @@ namespace cpputils {
|
||||
inline void LOG(DEBUG_TYPE, const char* fmt, const Args&... args) {
|
||||
logger()->debug(fmt, args...);
|
||||
}
|
||||
|
||||
inline std::shared_ptr<spdlog::logger> system_logger(const std::string& name) {
|
||||
#if defined(_MSC_VER)
|
||||
return spdlog::create<spdlog::sinks::msvc_sink_mt>(name);
|
||||
#else
|
||||
return spdlog::syslog_logger_mt(name, name, LOG_PID);
|
||||
#endif
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1,73 +0,0 @@
|
||||
// Base version taken from https://techoverflow.net/blog/2013/03/15/c-simple-http-download-using-libcurl-easy-api/
|
||||
|
||||
#if !defined(_MSC_VER)
|
||||
|
||||
#include "CurlHttpClient.h"
|
||||
#include <sstream>
|
||||
#include <iostream>
|
||||
|
||||
using boost::none;
|
||||
using boost::optional;
|
||||
using std::string;
|
||||
using std::ostringstream;
|
||||
using std::mutex;
|
||||
using std::unique_lock;
|
||||
|
||||
namespace cpputils {
|
||||
|
||||
mutex CurlHttpClient::CurlInitializerRAII::_mutex;
|
||||
uint32_t CurlHttpClient::CurlInitializerRAII::_refcount = 0;
|
||||
|
||||
CurlHttpClient::CurlInitializerRAII::CurlInitializerRAII() {
|
||||
unique_lock<mutex> lock(_mutex);
|
||||
if (0 == _refcount) {
|
||||
curl_global_init(CURL_GLOBAL_ALL);
|
||||
}
|
||||
_refcount += 1;
|
||||
}
|
||||
|
||||
CurlHttpClient::CurlInitializerRAII::~CurlInitializerRAII() {
|
||||
unique_lock<mutex> lock(_mutex);
|
||||
_refcount -= 1;
|
||||
if (0 == _refcount) {
|
||||
curl_global_cleanup();
|
||||
}
|
||||
}
|
||||
|
||||
size_t CurlHttpClient::write_data(void *ptr, size_t size, size_t nmemb, ostringstream *stream) {
|
||||
stream->write(static_cast<const char *>(ptr), size * nmemb);
|
||||
return size * nmemb;
|
||||
}
|
||||
|
||||
CurlHttpClient::CurlHttpClient(): curlInitializer(), curl() {
|
||||
curl = curl_easy_init();
|
||||
}
|
||||
|
||||
CurlHttpClient::~CurlHttpClient() {
|
||||
curl_easy_cleanup(curl);
|
||||
}
|
||||
|
||||
string CurlHttpClient::get(const string &url, optional<long> timeoutMsec) {
|
||||
curl_easy_setopt(curl, CURLOPT_URL, url.c_str());
|
||||
// example.com is redirected, so we tell libcurl to follow redirection
|
||||
curl_easy_setopt(curl, CURLOPT_FOLLOWLOCATION, 1L);
|
||||
curl_easy_setopt(curl, CURLOPT_NOSIGNAL, 1); //Prevent "longjmp causes uninitialized stack frame" bug
|
||||
curl_easy_setopt(curl, CURLOPT_ENCODING, "deflate");
|
||||
ostringstream out;
|
||||
curl_easy_setopt(curl, CURLOPT_WRITEFUNCTION, &CurlHttpClient::write_data);
|
||||
curl_easy_setopt(curl, CURLOPT_WRITEDATA, &out);
|
||||
if (timeoutMsec != none) {
|
||||
curl_easy_setopt(curl, CURLOPT_TIMEOUT_MS, *timeoutMsec);
|
||||
}
|
||||
// Perform the request, res will get the return code
|
||||
CURLcode res = curl_easy_perform(curl);
|
||||
// Check for errors
|
||||
if (res != CURLE_OK) {
|
||||
throw std::runtime_error("Curl Error " + std::to_string(res) + ": " + curl_easy_strerror(res));
|
||||
}
|
||||
return out.str();
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
#endif
|
@ -1,48 +0,0 @@
|
||||
#pragma once
|
||||
#ifndef MESSMER_CPPUTILS_NETWORK_CURLHTTPCLIENT_HPP
|
||||
#define MESSMER_CPPUTILS_NETWORK_CURLHTTPCLIENT_HPP
|
||||
|
||||
#if !defined(_MSC_VER)
|
||||
|
||||
#include "HttpClient.h"
|
||||
#include "../macros.h"
|
||||
#include <mutex>
|
||||
#include <curl/curl.h>
|
||||
|
||||
namespace cpputils {
|
||||
|
||||
class CurlHttpClient final : public HttpClient {
|
||||
public:
|
||||
CurlHttpClient();
|
||||
|
||||
~CurlHttpClient();
|
||||
|
||||
std::string get(const std::string &url, boost::optional<long> timeoutMsec = boost::none) override;
|
||||
|
||||
private:
|
||||
// When the first object of this class is created, it will initialize curl using curl_global_init().
|
||||
// When the last object is destroyed, it will deinitialize curl using curl_global_cleanup().
|
||||
class CurlInitializerRAII final {
|
||||
public:
|
||||
CurlInitializerRAII();
|
||||
~CurlInitializerRAII();
|
||||
private:
|
||||
static std::mutex _mutex;
|
||||
static uint32_t _refcount;
|
||||
|
||||
DISALLOW_COPY_AND_ASSIGN(CurlInitializerRAII);
|
||||
};
|
||||
|
||||
CurlInitializerRAII curlInitializer;
|
||||
CURL *curl;
|
||||
|
||||
static size_t write_data(void *ptr, size_t size, size_t nmemb, std::ostringstream *stream);
|
||||
|
||||
DISALLOW_COPY_AND_ASSIGN(CurlHttpClient);
|
||||
};
|
||||
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
#endif
|
@ -1,260 +0,0 @@
|
||||
#if defined(_MSC_VER)
|
||||
|
||||
#include "WinHttpClient.h"
|
||||
#include <sstream>
|
||||
#include <iostream>
|
||||
#include <cpp-utils/assert/assert.h>
|
||||
#include <cpp-utils/data/Data.h>
|
||||
#include <codecvt>
|
||||
#include <Windows.h>
|
||||
#include <Winhttp.h>
|
||||
#include <VersionHelpers.h>
|
||||
|
||||
using boost::none;
|
||||
using boost::optional;
|
||||
using std::string;
|
||||
using std::wstring;
|
||||
using std::wstring_convert;
|
||||
using std::ostringstream;
|
||||
|
||||
namespace cpputils {
|
||||
|
||||
namespace {
|
||||
struct HttpHandleRAII final {
|
||||
HINTERNET handle;
|
||||
|
||||
HttpHandleRAII(HINTERNET handle_) : handle(handle_) {}
|
||||
|
||||
HttpHandleRAII(HttpHandleRAII&& rhs) : handle(rhs.handle) {
|
||||
rhs.handle = nullptr;
|
||||
}
|
||||
|
||||
~HttpHandleRAII() {
|
||||
if (nullptr != handle) {
|
||||
BOOL success = WinHttpCloseHandle(handle);
|
||||
if (!success) {
|
||||
throw std::runtime_error("Error calling WinHttpCloseHandle. Error code: " + std::to_string(GetLastError()));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
DISALLOW_COPY_AND_ASSIGN(HttpHandleRAII);
|
||||
};
|
||||
|
||||
URL_COMPONENTS parse_url(const wstring &url) {
|
||||
URL_COMPONENTS result;
|
||||
result.dwStructSize = sizeof(result);
|
||||
// Declare fields we want. Setting a field to nullptr and the length to non-zero means the field will be returned.
|
||||
result.lpszScheme = nullptr;
|
||||
result.dwSchemeLength = 1;
|
||||
result.lpszHostName = nullptr;
|
||||
result.dwHostNameLength = 1;
|
||||
result.lpszUserName = nullptr;
|
||||
result.dwUserNameLength = 1;
|
||||
result.lpszPassword = nullptr;
|
||||
result.dwPasswordLength = 1;
|
||||
result.lpszUrlPath = nullptr;
|
||||
result.dwUrlPathLength = 1;
|
||||
result.lpszExtraInfo = nullptr;
|
||||
result.dwExtraInfoLength = 1;
|
||||
|
||||
BOOL success = WinHttpCrackUrl(url.c_str(), url.size(), ICU_REJECT_USERPWD, &result);
|
||||
if (!success) {
|
||||
throw std::runtime_error("Error parsing url '" + wstring_convert<std::codecvt_utf8_utf16<wchar_t>>().to_bytes(url) + "'. Error code: " + std::to_string(GetLastError()));
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
INTERNET_PORT get_port_from_url(const URL_COMPONENTS& parsedUrl) {
|
||||
wstring scheme_str(parsedUrl.lpszScheme, parsedUrl.dwSchemeLength);
|
||||
string s_(wstring_convert < std::codecvt_utf8_utf16<wchar_t>>().to_bytes(scheme_str));
|
||||
if (parsedUrl.nScheme == INTERNET_SCHEME_HTTP) {
|
||||
ASSERT(scheme_str == L"http", "Scheme mismatch");
|
||||
if (parsedUrl.nPort != 80) {
|
||||
throw std::runtime_error("We don't support non-default ports");
|
||||
}
|
||||
return INTERNET_DEFAULT_HTTP_PORT;
|
||||
}
|
||||
else if (parsedUrl.nScheme == INTERNET_SCHEME_HTTPS) {
|
||||
ASSERT(scheme_str == L"https", "Scheme mismatch");
|
||||
if (parsedUrl.nPort != 443) {
|
||||
throw std::runtime_error("We don't support non-default ports");
|
||||
}
|
||||
return INTERNET_DEFAULT_HTTPS_PORT;
|
||||
}
|
||||
else {
|
||||
throw std::runtime_error("Unsupported scheme: " + wstring_convert<std::codecvt_utf8_utf16<wchar_t>>().to_bytes(scheme_str));
|
||||
}
|
||||
}
|
||||
|
||||
class Request final {
|
||||
public:
|
||||
Request(HttpHandleRAII request) : request_(std::move(request)) {}
|
||||
|
||||
void set_redirect_policy(DWORD redirectPolicy) {
|
||||
BOOL success = WinHttpSetOption(request_.handle, WINHTTP_OPTION_REDIRECT_POLICY, &redirectPolicy, sizeof(redirectPolicy));
|
||||
if (!success) {
|
||||
throw std::runtime_error("Error calling WinHttpSetOption. Error code: " + std::to_string(GetLastError()));
|
||||
}
|
||||
}
|
||||
|
||||
void set_timeouts(long timeoutMsec) {
|
||||
// TODO Timeout should be a total timeout, not per step as we're doing it here.
|
||||
BOOL success = WinHttpSetTimeouts(request_.handle, timeoutMsec, timeoutMsec, timeoutMsec, timeoutMsec);
|
||||
if (!success) {
|
||||
throw std::runtime_error("Error calling WinHttpSetTimeouts. Error code: " + std::to_string(GetLastError()));
|
||||
}
|
||||
}
|
||||
|
||||
void send() {
|
||||
BOOL success = WinHttpSendRequest(request_.handle, WINHTTP_NO_ADDITIONAL_HEADERS, 0, WINHTTP_NO_REQUEST_DATA, 0, 0, 0);
|
||||
if (!success) {
|
||||
throw std::runtime_error("Error calling WinHttpSendRequest. Error code: " + std::to_string(GetLastError()));
|
||||
}
|
||||
}
|
||||
|
||||
void wait_for_response() {
|
||||
BOOL success = WinHttpReceiveResponse(request_.handle, nullptr);
|
||||
if (!success) {
|
||||
throw std::runtime_error("Error calling WinHttpReceiveResponse. Error code: " + std::to_string(GetLastError()));
|
||||
}
|
||||
}
|
||||
|
||||
DWORD get_status_code() {
|
||||
DWORD statusCode;
|
||||
DWORD statusCodeSize = sizeof(statusCode);
|
||||
BOOL success = WinHttpQueryHeaders(request_.handle, WINHTTP_QUERY_STATUS_CODE | WINHTTP_QUERY_FLAG_NUMBER, WINHTTP_HEADER_NAME_BY_INDEX, &statusCode, &statusCodeSize, WINHTTP_NO_HEADER_INDEX);
|
||||
if (!success) {
|
||||
throw std::runtime_error("Eror calling WinHttpQueryHeaders. Error code: " + std::to_string(GetLastError()));
|
||||
}
|
||||
return statusCode;
|
||||
}
|
||||
|
||||
string read_response() {
|
||||
ostringstream result;
|
||||
|
||||
while (true) {
|
||||
DWORD size = num_bytes_readable();
|
||||
if (size == 0) {
|
||||
break;
|
||||
}
|
||||
|
||||
cpputils::Data buffer(size + 1);
|
||||
buffer.FillWithZeroes();
|
||||
|
||||
DWORD num_read;
|
||||
BOOL success = WinHttpReadData(request_.handle, buffer.data(), buffer.size(), &num_read);
|
||||
if (!success) {
|
||||
throw std::runtime_error("Error calling WinHttpReadData. Error code: " + std::to_string(GetLastError()));
|
||||
}
|
||||
ASSERT(0 != num_read, "Weird behavior of WinHttpReadData.It should never read zero bytes since WinHttpQueryDataAvailable said there are bytes readable.");
|
||||
|
||||
result.write(reinterpret_cast<char*>(buffer.data()), num_read);
|
||||
ASSERT(result.good(), "Error writing to ostringstream");
|
||||
}
|
||||
|
||||
return result.str();
|
||||
}
|
||||
|
||||
private:
|
||||
DWORD num_bytes_readable() {
|
||||
DWORD result;
|
||||
BOOL success = WinHttpQueryDataAvailable(request_.handle, &result);
|
||||
if (!success) {
|
||||
throw std::runtime_error("Error calling WinHttpQueryDataAvailable. Error code: " + std::to_string(GetLastError()));
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
HttpHandleRAII request_;
|
||||
};
|
||||
|
||||
struct Connection final {
|
||||
public:
|
||||
Connection(HttpHandleRAII connection) : connection_(std::move(connection)) {}
|
||||
|
||||
Request create_request(const URL_COMPONENTS& parsedUrl) {
|
||||
const INTERNET_PORT port = get_port_from_url(parsedUrl);
|
||||
const wstring path = wstring(parsedUrl.lpszUrlPath, parsedUrl.dwUrlPathLength) + wstring(parsedUrl.lpszExtraInfo, parsedUrl.dwExtraInfoLength);
|
||||
const DWORD flags = (port == INTERNET_DEFAULT_HTTPS_PORT) ? WINHTTP_FLAG_SECURE : 0;
|
||||
|
||||
HttpHandleRAII request_handle(WinHttpOpenRequest(connection_.handle, L"GET", path.c_str(), nullptr, WINHTTP_NO_REFERER, WINHTTP_DEFAULT_ACCEPT_TYPES, flags));
|
||||
if (nullptr == request_handle.handle) {
|
||||
throw std::runtime_error("Error calling WinHttpOpenRequest. Error code: " + std::to_string(GetLastError()));
|
||||
}
|
||||
return Request(std::move(request_handle));
|
||||
}
|
||||
|
||||
private:
|
||||
HttpHandleRAII connection_;
|
||||
};
|
||||
}
|
||||
|
||||
struct WinHttpSession final {
|
||||
public:
|
||||
WinHttpSession(HttpHandleRAII session) : session_(std::move(session)) {}
|
||||
|
||||
Connection create_connection(const URL_COMPONENTS& parsedUrl) {
|
||||
const INTERNET_PORT port = get_port_from_url(parsedUrl);
|
||||
const wstring host(parsedUrl.lpszHostName, parsedUrl.dwHostNameLength);
|
||||
|
||||
HttpHandleRAII connection_handle = WinHttpConnect(session_.handle, host.c_str(), port, 0);
|
||||
if (nullptr == connection_handle.handle) {
|
||||
throw std::runtime_error("Error calling WinHttpConnect. Error code: " + std::to_string(GetLastError()));
|
||||
}
|
||||
|
||||
return Connection(std::move(connection_handle));
|
||||
}
|
||||
|
||||
private:
|
||||
HttpHandleRAII session_;
|
||||
};
|
||||
|
||||
namespace {
|
||||
cpputils::unique_ref<WinHttpSession> create_session() {
|
||||
const DWORD dwAccessType = IsWindows8Point1OrGreater() ? WINHTTP_ACCESS_TYPE_AUTOMATIC_PROXY : WINHTTP_ACCESS_TYPE_DEFAULT_PROXY;
|
||||
HttpHandleRAII session_handle = WinHttpOpen(L"cpputils::HttpClient", dwAccessType, WINHTTP_NO_PROXY_NAME, WINHTTP_NO_PROXY_BYPASS, 0);
|
||||
if(nullptr == session_handle.handle) {
|
||||
throw std::runtime_error("Error calling WinHttpOpen. Error code: " + std::to_string(GetLastError()));
|
||||
}
|
||||
|
||||
return cpputils::make_unique_ref<WinHttpSession>(std::move(session_handle));
|
||||
}
|
||||
}
|
||||
|
||||
WinHttpClient::WinHttpClient() : session_(create_session()) {}
|
||||
|
||||
WinHttpClient::~WinHttpClient() {}
|
||||
|
||||
string WinHttpClient::get(const string &url, optional<long> timeoutMsec) {
|
||||
wstring wurl = wstring_convert<std::codecvt_utf8_utf16<wchar_t>>().from_bytes(url);
|
||||
const URL_COMPONENTS parsedUrl = parse_url(wurl);
|
||||
|
||||
ASSERT(parsedUrl.dwUserNameLength == 0, "Authentication not supported");
|
||||
ASSERT(parsedUrl.dwPasswordLength == 0, "Authentication not supported");
|
||||
|
||||
Connection connection = session_->create_connection(parsedUrl);
|
||||
Request request = connection.create_request(parsedUrl);
|
||||
|
||||
// allow redirects but not from https to http
|
||||
request.set_redirect_policy(WINHTTP_OPTION_REDIRECT_POLICY_DISALLOW_HTTPS_TO_HTTP);
|
||||
|
||||
if (timeoutMsec != none) {
|
||||
request.set_timeouts(*timeoutMsec);
|
||||
}
|
||||
|
||||
request.send();
|
||||
request.wait_for_response();
|
||||
|
||||
DWORD statusCode = request.get_status_code();
|
||||
if (statusCode != HTTP_STATUS_OK) {
|
||||
throw std::runtime_error("HTTP Server returned unsupported status code: " + std::to_string(statusCode));
|
||||
}
|
||||
|
||||
return request.read_response();
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
#endif
|
@ -1,31 +0,0 @@
|
||||
#pragma once
|
||||
#ifndef MESSMER_CPPUTILS_NETWORK_WINHTTPCLIENT_HPP
|
||||
#define MESSMER_CPPUTILS_NETWORK_WINHTTPCLIENT_HPP
|
||||
|
||||
#if defined(_MSC_VER)
|
||||
|
||||
#include "HttpClient.h"
|
||||
#include "../macros.h"
|
||||
#include "../pointer/unique_ref.h"
|
||||
|
||||
namespace cpputils {
|
||||
|
||||
class WinHttpSession;
|
||||
|
||||
class WinHttpClient final : public HttpClient {
|
||||
public:
|
||||
WinHttpClient();
|
||||
~WinHttpClient();
|
||||
|
||||
std::string get(const std::string &url, boost::optional<long> timeoutMsec = boost::none) override;
|
||||
|
||||
private:
|
||||
unique_ref<WinHttpSession> session_;
|
||||
|
||||
DISALLOW_COPY_AND_ASSIGN(WinHttpClient);
|
||||
};
|
||||
|
||||
}
|
||||
|
||||
#endif
|
||||
#endif
|
@ -14,6 +14,8 @@
|
||||
#include <sys/types.h>
|
||||
#endif
|
||||
|
||||
#include <array>
|
||||
|
||||
namespace cpputils {
|
||||
|
||||
namespace {
|
||||
|
@ -8,7 +8,6 @@ set(SOURCES
|
||||
Environment.cpp
|
||||
program_options/utils.cpp
|
||||
program_options/ProgramOptions.cpp
|
||||
program_options/Parser.cpp
|
||||
)
|
||||
|
||||
add_library(${PROJECT_NAME} ${SOURCES})
|
||||
@ -19,13 +18,3 @@ target_activate_cpp14(${PROJECT_NAME})
|
||||
if(NOT CRYFS_UPDATE_CHECKS)
|
||||
target_compile_definitions(${PROJECT_NAME} PRIVATE -DCRYFS_NO_UPDATE_CHECKS)
|
||||
endif(NOT CRYFS_UPDATE_CHECKS)
|
||||
|
||||
add_executable(${PROJECT_NAME}_bin main.cpp)
|
||||
set_target_properties(${PROJECT_NAME}_bin PROPERTIES OUTPUT_NAME cryfs)
|
||||
target_link_libraries(${PROJECT_NAME}_bin PUBLIC ${PROJECT_NAME})
|
||||
target_enable_style_warnings(${PROJECT_NAME}_bin)
|
||||
target_activate_cpp14(${PROJECT_NAME}_bin)
|
||||
|
||||
install(TARGETS ${PROJECT_NAME}_bin
|
||||
DESTINATION ${CMAKE_INSTALL_BINDIR}
|
||||
)
|
||||
|
@ -6,14 +6,12 @@
|
||||
#include <cstdlib>
|
||||
#include <cpp-utils/assert/backtrace.h>
|
||||
|
||||
#include <fspp/fuse/Fuse.h>
|
||||
#include <fspp/impl/FilesystemImpl.h>
|
||||
#include <cpp-utils/process/subprocess.h>
|
||||
#include <cpp-utils/io/DontEchoStdinToStdoutRAII.h>
|
||||
#include <cryfs/impl/filesystem/CryDevice.h>
|
||||
#include <cryfs/impl/config/CryConfigLoader.h>
|
||||
#include <cryfs/impl/config/CryPasswordBasedKeyProvider.h>
|
||||
#include "program_options/Parser.h"
|
||||
#include <cryfs/impl/config/CryPresetPasswordBasedKeyProvider.h>
|
||||
#include <boost/filesystem.hpp>
|
||||
|
||||
#include <cryfs/impl/filesystem/CryDir.h>
|
||||
@ -49,7 +47,6 @@ using cpputils::either;
|
||||
using cpputils::SCryptSettings;
|
||||
using cpputils::Console;
|
||||
using cpputils::HttpClient;
|
||||
using std::cout;
|
||||
using std::string;
|
||||
using std::endl;
|
||||
using std::shared_ptr;
|
||||
@ -71,114 +68,7 @@ using gitversion::VersionCompare;
|
||||
|
||||
namespace cryfs_cli {
|
||||
|
||||
Cli::Cli(RandomGenerator &keyGenerator, const SCryptSettings &scryptSettings, shared_ptr<Console> console):
|
||||
_keyGenerator(keyGenerator), _scryptSettings(scryptSettings), _console(), _noninteractive(false), _idleUnmounter(none), _device(none) {
|
||||
_noninteractive = Environment::isNoninteractive();
|
||||
if (_noninteractive) {
|
||||
_console = make_shared<NoninteractiveConsole>(console);
|
||||
} else {
|
||||
_console = console;
|
||||
}
|
||||
}
|
||||
|
||||
void Cli::_showVersion(unique_ref<HttpClient> httpClient) {
|
||||
cout << "CryFS Version " << gitversion::VersionString() << endl;
|
||||
if (gitversion::IsDevVersion()) {
|
||||
cout << "WARNING! This is a development version based on git commit " << gitversion::GitCommitId() <<
|
||||
". Please do not use in production!" << endl;
|
||||
} else if (!gitversion::IsStableVersion()) {
|
||||
cout << "WARNING! This is an experimental version. Please backup your data frequently!" << endl;
|
||||
}
|
||||
#ifndef NDEBUG
|
||||
cout << "WARNING! This is a debug build. Performance might be slow." << endl;
|
||||
#endif
|
||||
#ifndef CRYFS_NO_UPDATE_CHECKS
|
||||
if (Environment::noUpdateCheck()) {
|
||||
cout << "Automatic checking for security vulnerabilities and updates is disabled." << endl;
|
||||
} else if (Environment::isNoninteractive()) {
|
||||
cout << "Automatic checking for security vulnerabilities and updates is disabled in noninteractive mode." << endl;
|
||||
} else {
|
||||
_checkForUpdates(std::move(httpClient));
|
||||
}
|
||||
#else
|
||||
# warning Update checks are disabled. The resulting executable will not go online to check for newer versions or known security vulnerabilities.
|
||||
UNUSED(httpClient);
|
||||
#endif
|
||||
cout << endl;
|
||||
}
|
||||
|
||||
void Cli::_checkForUpdates(unique_ref<HttpClient> httpClient) {
|
||||
VersionChecker versionChecker(httpClient.get());
|
||||
optional<string> newestVersion = versionChecker.newestVersion();
|
||||
if (newestVersion == none) {
|
||||
cout << "Could not check for updates." << endl;
|
||||
} else if (VersionCompare::isOlderThan(gitversion::VersionString(), *newestVersion)) {
|
||||
cout << "CryFS " << *newestVersion << " is released. Please update." << endl;
|
||||
}
|
||||
optional<string> securityWarning = versionChecker.securityWarningFor(gitversion::VersionString());
|
||||
if (securityWarning != none) {
|
||||
cout << *securityWarning << endl;
|
||||
}
|
||||
}
|
||||
|
||||
bool Cli::_checkPassword(const string &password) {
|
||||
if (password == "") {
|
||||
std::cerr << "Empty password not allowed. Please try again." << std::endl;
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
function<string()> Cli::_askPasswordForExistingFilesystem(std::shared_ptr<cpputils::Console> console) {
|
||||
return [console] () {
|
||||
string password = console->askPassword("Password: ");
|
||||
while (!_checkPassword(password)) {
|
||||
password = console->askPassword("Password: ");
|
||||
}
|
||||
return password;
|
||||
};
|
||||
};
|
||||
|
||||
function<string()> Cli::_askPasswordForNewFilesystem(std::shared_ptr<cpputils::Console> console) {
|
||||
//TODO Ask confirmation if using insecure password (<8 characters)
|
||||
return [console] () {
|
||||
string password;
|
||||
bool again = false;
|
||||
do {
|
||||
password = console->askPassword("Password: ");
|
||||
if (!_checkPassword(password)) {
|
||||
again = true;
|
||||
continue;
|
||||
}
|
||||
if (!_confirmPassword(console.get(), password)) {
|
||||
again = true;
|
||||
continue;
|
||||
}
|
||||
again = false;
|
||||
} while (again);
|
||||
return password;
|
||||
};
|
||||
}
|
||||
|
||||
bool Cli::_confirmPassword(cpputils::Console* console, const string &password) {
|
||||
string confirmPassword = console->askPassword("Confirm Password: ");
|
||||
if (password != confirmPassword) {
|
||||
std::cout << "Passwords don't match" << std::endl;
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
function<string()> Cli::_askPasswordNoninteractive(std::shared_ptr<cpputils::Console> console) {
|
||||
//TODO Test
|
||||
return [console] () {
|
||||
string password = console->askPassword("Password: ");
|
||||
if (!_checkPassword(password)) {
|
||||
throw CryfsException("Invalid password. Password cannot be empty.", ErrorCode::EmptyPassword);
|
||||
}
|
||||
return password;
|
||||
};
|
||||
}
|
||||
Cli::Cli(RandomGenerator &keyGenerator, const SCryptSettings &scryptSettings): _keyGenerator(keyGenerator), _scryptSettings(scryptSettings), _idleUnmounter(none), _device(none) {}
|
||||
|
||||
bf::path Cli::_determineConfigFile(const ProgramOptions &options) {
|
||||
auto configFile = options.configFile();
|
||||
@ -191,19 +81,17 @@ namespace cryfs_cli {
|
||||
void Cli::_checkConfigIntegrity(const bf::path& basedir, const LocalStateDir& localStateDir, const CryConfigFile& config, bool allowReplacedFilesystem) {
|
||||
auto basedirMetadata = BasedirMetadata::load(localStateDir);
|
||||
if (!allowReplacedFilesystem && !basedirMetadata.filesystemIdForBasedirIsCorrect(basedir, config.config()->FilesystemId())) {
|
||||
if (!_console->askYesNo("The filesystem id in the config file is different to the last time we loaded a filesystem from this basedir. This can be genuine if you replaced the filesystem with a different one. If you didn't do that, it is possible that an attacker did. Do you want to continue loading the file system?", false)) {
|
||||
throw CryfsException(
|
||||
"The filesystem id in the config file is different to the last time we loaded a filesystem from this basedir.", ErrorCode::FilesystemIdChanged);
|
||||
}
|
||||
throw CryfsException(
|
||||
"The filesystem id in the config file is different to the last time we loaded a filesystem from this basedir.", ErrorCode::FilesystemIdChanged);
|
||||
}
|
||||
// Update local state (or create it if it didn't exist yet)
|
||||
basedirMetadata.updateFilesystemIdForBasedir(basedir, config.config()->FilesystemId());
|
||||
basedirMetadata.save();
|
||||
}
|
||||
|
||||
CryConfigLoader::ConfigLoadResult Cli::_loadOrCreateConfig(const ProgramOptions &options, const LocalStateDir& localStateDir) {
|
||||
CryConfigLoader::ConfigLoadResult Cli::_loadOrCreateConfig(const ProgramOptions &options, const LocalStateDir& localStateDir, unique_ptr<string> password) {
|
||||
auto configFile = _determineConfigFile(options);
|
||||
auto config = _loadOrCreateConfigFile(std::move(configFile), localStateDir, options.cipher(), options.blocksizeBytes(), options.allowFilesystemUpgrade(), options.missingBlockIsIntegrityViolation(), options.allowReplacedFilesystem());
|
||||
auto config = _loadOrCreateConfigFile(std::move(configFile), localStateDir, std::move(password), options.cipher(), options.blocksizeBytes(), options.allowFilesystemUpgrade(), options.missingBlockIsIntegrityViolation(), options.allowReplacedFilesystem());
|
||||
if (config.is_left()) {
|
||||
switch(config.left()) {
|
||||
case CryConfigFile::LoadError::DecryptionFailed:
|
||||
@ -216,60 +104,30 @@ namespace cryfs_cli {
|
||||
return std::move(config.right());
|
||||
}
|
||||
|
||||
either<CryConfigFile::LoadError, CryConfigLoader::ConfigLoadResult> Cli::_loadOrCreateConfigFile(bf::path configFilePath, LocalStateDir localStateDir, const optional<string> &cipher, const optional<uint32_t> &blocksizeBytes, bool allowFilesystemUpgrade, const optional<bool> &missingBlockIsIntegrityViolation, bool allowReplacedFilesystem) {
|
||||
either<CryConfigFile::LoadError, CryConfigLoader::ConfigLoadResult> Cli::_loadOrCreateConfigFile(bf::path configFilePath, LocalStateDir localStateDir, unique_ptr<string> password, const optional<string> &cipher, const optional<uint32_t> &blocksizeBytes, bool allowFilesystemUpgrade, const optional<bool> &missingBlockIsIntegrityViolation, bool allowReplacedFilesystem) {
|
||||
// TODO Instead of passing in _askPasswordXXX functions to KeyProvider, only pass in console and move logic to the key provider,
|
||||
// for example by having a separate CryPasswordBasedKeyProvider / CryNoninteractivePasswordBasedKeyProvider.
|
||||
auto keyProvider = make_unique_ref<CryPasswordBasedKeyProvider>(
|
||||
_console,
|
||||
_noninteractive ? Cli::_askPasswordNoninteractive(_console) : Cli::_askPasswordForExistingFilesystem(_console),
|
||||
_noninteractive ? Cli::_askPasswordNoninteractive(_console) : Cli::_askPasswordForNewFilesystem(_console),
|
||||
auto keyProvider = make_unique_ref<CryPresetPasswordBasedKeyProvider>(
|
||||
*password.get(),
|
||||
make_unique_ref<SCrypt>(_scryptSettings)
|
||||
);
|
||||
return CryConfigLoader(_console, _keyGenerator, std::move(keyProvider), std::move(localStateDir),
|
||||
cipher, blocksizeBytes, missingBlockIsIntegrityViolation).loadOrCreate(std::move(configFilePath), allowFilesystemUpgrade, allowReplacedFilesystem);
|
||||
return CryConfigLoader(_keyGenerator, std::move(keyProvider), std::move(localStateDir), cipher, blocksizeBytes, missingBlockIsIntegrityViolation).loadOrCreate(std::move(configFilePath), allowFilesystemUpgrade, allowReplacedFilesystem);
|
||||
}
|
||||
|
||||
namespace {
|
||||
void printConfig(const CryConfig& oldConfig, const CryConfig& updatedConfig) {
|
||||
auto printValue = [&] (const char* prefix, const char* suffix, auto member) {
|
||||
std::cout << prefix;
|
||||
auto oldConfigValue = member(oldConfig);
|
||||
auto updatedConfigValue = member(updatedConfig);
|
||||
if (oldConfigValue == updatedConfigValue) {
|
||||
std::cout << oldConfigValue;
|
||||
} else {
|
||||
std::cout << oldConfigValue << " -> " << updatedConfigValue;
|
||||
}
|
||||
std::cout << suffix;
|
||||
};
|
||||
std::cout
|
||||
<< "\n----------------------------------------------------"
|
||||
<< "\nFilesystem configuration:"
|
||||
<< "\n----------------------------------------------------";
|
||||
printValue("\n- Filesystem format version: ", "", [] (const CryConfig& config) {return config.Version(); });
|
||||
printValue("\n- Created with: CryFS ", "", [] (const CryConfig& config) { return config.CreatedWithVersion(); });
|
||||
printValue("\n- Last opened with: CryFS ", "", [] (const CryConfig& config) { return config.LastOpenedWithVersion(); });
|
||||
printValue("\n- Cipher: ", "", [] (const CryConfig& config) { return config.Cipher(); });
|
||||
printValue("\n- Blocksize: ", " bytes", [] (const CryConfig& config) { return config.BlocksizeBytes(); });
|
||||
printValue("\n- Filesystem Id: ", "", [] (const CryConfig& config) { return config.FilesystemId().ToString(); });
|
||||
std::cout << "\n----------------------------------------------------\n";
|
||||
}
|
||||
}
|
||||
|
||||
void Cli::_runFilesystem(const ProgramOptions &options, std::function<void()> onMounted) {
|
||||
fspp::fuse::Fuse* Cli::initFilesystem(const ProgramOptions &options, unique_ptr<string> password) {
|
||||
cpputils::showBacktraceOnCrash();
|
||||
cpputils::set_thread_name("cryfs");
|
||||
try {
|
||||
LocalStateDir localStateDir(Environment::localStateDir());
|
||||
_sanityChecks(options);
|
||||
LocalStateDir localStateDir(options.localStateDir());
|
||||
auto blockStore = make_unique_ref<OnDiskBlockStore2>(options.baseDir());
|
||||
auto config = _loadOrCreateConfig(options, localStateDir);
|
||||
printConfig(config.oldConfig, *config.configFile->config());
|
||||
unique_ptr<fspp::fuse::Fuse> fuse = nullptr;
|
||||
bool stoppedBecauseOfIntegrityViolation = false;
|
||||
auto config = _loadOrCreateConfig(options, localStateDir, std::move(password));
|
||||
fspp::fuse::Fuse* fuse = nullptr;
|
||||
|
||||
auto onIntegrityViolation = [&fuse, &stoppedBecauseOfIntegrityViolation] () {
|
||||
if (fuse.get() != nullptr) {
|
||||
auto onIntegrityViolation = [&fuse] () {
|
||||
if (fuse != nullptr) {
|
||||
LOG(ERR, "Integrity violation detected. Unmounting.");
|
||||
stoppedBecauseOfIntegrityViolation = true;
|
||||
fuse->stop();
|
||||
fuse->destroy();
|
||||
} else {
|
||||
// Usually on an integrity violation, the file system is unmounted.
|
||||
// Here, the file system isn't initialized yet, i.e. we failed in the initial steps when
|
||||
@ -282,45 +140,26 @@ namespace cryfs_cli {
|
||||
_device = optional<unique_ref<CryDevice>>(make_unique_ref<CryDevice>(std::move(config.configFile), std::move(blockStore), std::move(localStateDir), config.myClientId, options.allowIntegrityViolations(), missingBlockIsIntegrityViolation, std::move(onIntegrityViolation)));
|
||||
_sanityCheckFilesystem(_device->get());
|
||||
|
||||
auto initFilesystem = [&] (fspp::fuse::Fuse *fs){
|
||||
auto initFilesystem = [&] (){
|
||||
ASSERT(_device != none, "File system not ready to be initialized. Was it already initialized before?");
|
||||
|
||||
//TODO Test auto unmounting after idle timeout
|
||||
const boost::optional<double> idle_minutes = options.unmountAfterIdleMinutes();
|
||||
_idleUnmounter = _createIdleCallback(idle_minutes, [fs, idle_minutes] {
|
||||
LOG(INFO, "Unmounting because file system was idle for {} minutes", *idle_minutes);
|
||||
fs->stop();
|
||||
});
|
||||
if (_idleUnmounter != none) {
|
||||
(*_device)->onFsAction(std::bind(&CallAfterTimeout::resetTimer, _idleUnmounter->get()));
|
||||
}
|
||||
|
||||
return make_shared<fspp::FilesystemImpl>(std::move(*_device));
|
||||
};
|
||||
|
||||
fuse = make_unique<fspp::fuse::Fuse>(initFilesystem, std::move(onMounted), "cryfs", "cryfs@" + options.baseDir().string());
|
||||
fuse = new fspp::fuse::Fuse(initFilesystem, "cryfs", "cryfs@" + options.baseDir().string());
|
||||
|
||||
_initLogfile(options);
|
||||
|
||||
std::cout << "\nMounting filesystem. To unmount, call:\n$ cryfs-unmount " << options.mountDir() << "\n"
|
||||
<< std::endl;
|
||||
|
||||
if (options.foreground()) {
|
||||
fuse->runInForeground(options.mountDir(), options.fuseOptions());
|
||||
} else {
|
||||
fuse->runInBackground(options.mountDir(), options.fuseOptions());
|
||||
}
|
||||
|
||||
if (stoppedBecauseOfIntegrityViolation) {
|
||||
throw CryfsException("Integrity violation detected. Unmounting.", ErrorCode::IntegrityViolation);
|
||||
}
|
||||
fuse->init();
|
||||
return fuse;
|
||||
} catch (const CryfsException &e) {
|
||||
throw; // CryfsException is only thrown if setup goes wrong. Throw it through so that we get the correct process exit code.
|
||||
if (e.what() != string()) {
|
||||
LOG(ERR, "Error {}: {}", static_cast<int>(e.errorCode()), e.what());
|
||||
}
|
||||
} catch (const std::exception &e) {
|
||||
LOG(ERR, "Crashed: {}", e.what());
|
||||
} catch (...) {
|
||||
LOG(ERR, "Crashed");
|
||||
}
|
||||
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
void Cli::_sanityCheckFilesystem(CryDevice *device) {
|
||||
@ -336,49 +175,14 @@ namespace cryfs_cli {
|
||||
(*rootDir)->children(); // Load children
|
||||
}
|
||||
|
||||
optional<unique_ref<CallAfterTimeout>> Cli::_createIdleCallback(optional<double> minutes, function<void()> callback) {
|
||||
if (minutes == none) {
|
||||
return none;
|
||||
}
|
||||
uint64_t millis = std::llround(60000 * (*minutes));
|
||||
return make_unique_ref<CallAfterTimeout>(milliseconds(millis), callback, "idlecallback");
|
||||
}
|
||||
|
||||
void Cli::_initLogfile(const ProgramOptions &options) {
|
||||
spdlog::drop("cryfs");
|
||||
//TODO Test that --logfile parameter works. Should be: file if specified, otherwise stderr if foreground, else syslog.
|
||||
if (options.logFile() != none) {
|
||||
cpputils::logging::setLogger(
|
||||
spdlog::create<spdlog::sinks::basic_file_sink_mt>("cryfs", options.logFile()->string()));
|
||||
} else if (options.foreground()) {
|
||||
cpputils::logging::setLogger(spdlog::stderr_logger_mt("cryfs"));
|
||||
} else {
|
||||
cpputils::logging::setLogger(cpputils::logging::system_logger("cryfs"));
|
||||
}
|
||||
}
|
||||
|
||||
void Cli::_sanityChecks(const ProgramOptions &options) {
|
||||
_checkDirAccessible(bf::absolute(options.baseDir()), "base directory", options.createMissingBasedir(), ErrorCode::InaccessibleBaseDir);
|
||||
|
||||
if (!options.mountDirIsDriveLetter()) {
|
||||
_checkDirAccessible(options.mountDir(), "mount directory", options.createMissingMountpoint(), ErrorCode::InaccessibleMountDir);
|
||||
_checkMountdirDoesntContainBasedir(options);
|
||||
} else {
|
||||
if (bf::exists(options.mountDir())) {
|
||||
throw CryfsException("Drive " + options.mountDir().string() + " already exists.", ErrorCode::InaccessibleMountDir);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void Cli::_checkDirAccessible(const bf::path &dir, const std::string &name, bool createMissingDir, ErrorCode errorCode) {
|
||||
if (!bf::exists(dir)) {
|
||||
bool create = createMissingDir;
|
||||
if (create) {
|
||||
if (createMissingDir) {
|
||||
LOG(INFO, "Automatically creating {}", name);
|
||||
} else {
|
||||
create = _console->askYesNo("Could not find " + name + ". Do you want to create it?", false);
|
||||
}
|
||||
if (create) {
|
||||
if (!bf::create_directory(dir)) {
|
||||
throw CryfsException("Error creating "+name, errorCode);
|
||||
}
|
||||
@ -390,77 +194,5 @@ namespace cryfs_cli {
|
||||
if (!bf::is_directory(dir)) {
|
||||
throw CryfsException(name+" is not a directory.", errorCode);
|
||||
}
|
||||
auto file = _checkDirWriteable(dir, name, errorCode);
|
||||
_checkDirReadable(dir, file, name, errorCode);
|
||||
}
|
||||
|
||||
shared_ptr<TempFile> Cli::_checkDirWriteable(const bf::path &dir, const std::string &name, ErrorCode errorCode) {
|
||||
auto path = dir / "tempfile";
|
||||
try {
|
||||
return make_shared<TempFile>(path);
|
||||
} catch (const std::runtime_error &e) {
|
||||
throw CryfsException("Could not write to "+name+".", errorCode);
|
||||
}
|
||||
}
|
||||
|
||||
void Cli::_checkDirReadable(const bf::path &dir, shared_ptr<TempFile> tempfile, const std::string &name, ErrorCode errorCode) {
|
||||
ASSERT(bf::equivalent(dir, tempfile->path().parent_path()), "This function should be called with a file inside the directory");
|
||||
try {
|
||||
bool found = false;
|
||||
bf::directory_iterator end;
|
||||
for (auto iter = bf::directory_iterator(dir); iter != end; ++iter) {
|
||||
if (bf::equivalent(*iter, tempfile->path())) {
|
||||
found = true;
|
||||
}
|
||||
}
|
||||
if (!found) {
|
||||
//This should not happen. Can only happen if the written temp file got deleted inbetween or maybe was not written at all.
|
||||
throw std::runtime_error("Error accessing "+name+".");
|
||||
}
|
||||
} catch (const boost::filesystem::filesystem_error &e) {
|
||||
throw CryfsException("Could not read from "+name+".", errorCode);
|
||||
}
|
||||
}
|
||||
|
||||
void Cli::_checkMountdirDoesntContainBasedir(const ProgramOptions &options) {
|
||||
if (_pathContains(options.mountDir(), options.baseDir())) {
|
||||
throw CryfsException("base directory can't be inside the mount directory.", ErrorCode::BaseDirInsideMountDir);
|
||||
}
|
||||
}
|
||||
|
||||
bool Cli::_pathContains(const bf::path &parent, const bf::path &child) {
|
||||
bf::path absParent = bf::canonical(parent);
|
||||
bf::path current = bf::canonical(child);
|
||||
if (absParent.empty() && current.empty()) {
|
||||
return true;
|
||||
}
|
||||
while(!current.empty()) {
|
||||
if (bf::equivalent(current, absParent)) {
|
||||
return true;
|
||||
}
|
||||
current = current.parent_path();
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
int Cli::main(int argc, const char **argv, unique_ref<HttpClient> httpClient, std::function<void()> onMounted) {
|
||||
cpputils::showBacktraceOnCrash();
|
||||
cpputils::set_thread_name("cryfs");
|
||||
|
||||
try {
|
||||
_showVersion(std::move(httpClient));
|
||||
ProgramOptions options = program_options::Parser(argc, argv).parse(CryCiphers::supportedCipherNames());
|
||||
_sanityChecks(options);
|
||||
_runFilesystem(options, std::move(onMounted));
|
||||
} catch (const CryfsException &e) {
|
||||
if (e.what() != string()) {
|
||||
std::cerr << "Error " << static_cast<int>(e.errorCode()) << ": " << e.what() << std::endl;
|
||||
}
|
||||
return exitCode(e.errorCode());
|
||||
} catch (const std::runtime_error &e) {
|
||||
std::cerr << "Error: " << e.what() << std::endl;
|
||||
return exitCode(ErrorCode::UnspecifiedError);
|
||||
}
|
||||
return exitCode(ErrorCode::Success);
|
||||
}
|
||||
}
|
||||
|
@ -2,6 +2,7 @@
|
||||
#ifndef MESSMER_CRYFSCLI_CLI_H
|
||||
#define MESSMER_CRYFSCLI_CLI_H
|
||||
|
||||
#include <fspp/fuse/Fuse.h>
|
||||
#include "program_options/ProgramOptions.h"
|
||||
#include <cryfs/impl/config/CryConfigFile.h>
|
||||
#include <boost/filesystem/path.hpp>
|
||||
@ -17,37 +18,23 @@
|
||||
namespace cryfs_cli {
|
||||
class Cli final {
|
||||
public:
|
||||
Cli(cpputils::RandomGenerator &keyGenerator, const cpputils::SCryptSettings& scryptSettings, std::shared_ptr<cpputils::Console> console);
|
||||
int main(int argc, const char **argv, cpputils::unique_ref<cpputils::HttpClient> httpClient, std::function<void()> onMounted);
|
||||
Cli(cpputils::RandomGenerator &keyGenerator, const cpputils::SCryptSettings& scryptSettings);
|
||||
fspp::fuse::Fuse* initFilesystem(const program_options::ProgramOptions &options, std::unique_ptr<string> password);
|
||||
|
||||
private:
|
||||
void _checkForUpdates(cpputils::unique_ref<cpputils::HttpClient> httpClient);
|
||||
void _runFilesystem(const program_options::ProgramOptions &options, std::function<void()> onMounted);
|
||||
cryfs::CryConfigLoader::ConfigLoadResult _loadOrCreateConfig(const program_options::ProgramOptions &options, const cryfs::LocalStateDir& localStateDir);
|
||||
cryfs::CryConfigLoader::ConfigLoadResult _loadOrCreateConfig(const program_options::ProgramOptions &options, const cryfs::LocalStateDir& localStateDir, std::unique_ptr<string> password);
|
||||
void _checkConfigIntegrity(const boost::filesystem::path& basedir, const cryfs::LocalStateDir& localStateDir, const cryfs::CryConfigFile& config, bool allowReplacedFilesystem);
|
||||
cpputils::either<cryfs::CryConfigFile::LoadError, cryfs::CryConfigLoader::ConfigLoadResult> _loadOrCreateConfigFile(boost::filesystem::path configFilePath, cryfs::LocalStateDir localStateDir, const boost::optional<std::string> &cipher, const boost::optional<uint32_t> &blocksizeBytes, bool allowFilesystemUpgrade, const boost::optional<bool> &missingBlockIsIntegrityViolation, bool allowReplacedFilesystem);
|
||||
cpputils::either<cryfs::CryConfigFile::LoadError, cryfs::CryConfigLoader::ConfigLoadResult> _loadOrCreateConfigFile(boost::filesystem::path configFilePath, cryfs::LocalStateDir localStateDir, std::unique_ptr<string> password, const boost::optional<std::string> &cipher, const boost::optional<uint32_t> &blocksizeBytes, bool allowFilesystemUpgrade, const boost::optional<bool> &missingBlockIsIntegrityViolation, bool allowReplacedFilesystem);
|
||||
boost::filesystem::path _determineConfigFile(const program_options::ProgramOptions &options);
|
||||
static std::function<std::string()> _askPasswordForExistingFilesystem(std::shared_ptr<cpputils::Console> console);
|
||||
static std::function<std::string()> _askPasswordForNewFilesystem(std::shared_ptr<cpputils::Console> console);
|
||||
static std::function<std::string()> _askPasswordNoninteractive(std::shared_ptr<cpputils::Console> console);
|
||||
static bool _confirmPassword(cpputils::Console* console, const std::string &password);
|
||||
static bool _checkPassword(const std::string &password);
|
||||
void _showVersion(cpputils::unique_ref<cpputils::HttpClient> httpClient);
|
||||
void _initLogfile(const program_options::ProgramOptions &options);
|
||||
void _initLogfile();
|
||||
void _sanityChecks(const program_options::ProgramOptions &options);
|
||||
void _checkMountdirDoesntContainBasedir(const program_options::ProgramOptions &options);
|
||||
bool _pathContains(const boost::filesystem::path &parent, const boost::filesystem::path &child);
|
||||
void _checkDirAccessible(const boost::filesystem::path &dir, const std::string &name, bool createMissingDir, cryfs::ErrorCode errorCode);
|
||||
std::shared_ptr<cpputils::TempFile> _checkDirWriteable(const boost::filesystem::path &dir, const std::string &name, cryfs::ErrorCode errorCode);
|
||||
void _checkDirReadable(const boost::filesystem::path &dir, std::shared_ptr<cpputils::TempFile> tempfile, const std::string &name, cryfs::ErrorCode errorCode);
|
||||
boost::optional<cpputils::unique_ref<CallAfterTimeout>> _createIdleCallback(boost::optional<double> minutes, std::function<void()> callback);
|
||||
void _sanityCheckFilesystem(cryfs::CryDevice *device);
|
||||
|
||||
|
||||
cpputils::RandomGenerator &_keyGenerator;
|
||||
cpputils::SCryptSettings _scryptSettings;
|
||||
std::shared_ptr<cpputils::Console> _console;
|
||||
bool _noninteractive;
|
||||
boost::optional<cpputils::unique_ref<CallAfterTimeout>> _idleUnmounter;
|
||||
boost::optional<cpputils::unique_ref<cryfs::CryDevice>> _device;
|
||||
|
||||
|
@ -1,6 +1,5 @@
|
||||
#include "VersionChecker.h"
|
||||
#include <sstream>
|
||||
#include <cpp-utils/network/CurlHttpClient.h>
|
||||
#include <boost/property_tree/json_parser.hpp>
|
||||
#include <cpp-utils/logging/logging.h>
|
||||
#include <boost/foreach.hpp>
|
||||
|
@ -1,47 +0,0 @@
|
||||
#include "Cli.h"
|
||||
#include <cpp-utils/random/Random.h>
|
||||
#include <cpp-utils/io/IOStreamConsole.h>
|
||||
#include <cryfs/impl/CryfsException.h>
|
||||
|
||||
#if defined(_MSC_VER)
|
||||
#include <cpp-utils/network/WinHttpClient.h>
|
||||
#include <VersionHelpers.h>
|
||||
#else
|
||||
#include <cpp-utils/network/CurlHttpClient.h>
|
||||
#endif
|
||||
|
||||
using namespace cryfs_cli;
|
||||
using cpputils::Random;
|
||||
using cpputils::SCrypt;
|
||||
using cpputils::IOStreamConsole;
|
||||
using cpputils::make_unique_ref;
|
||||
using std::make_shared;
|
||||
using std::cerr;
|
||||
|
||||
int main(int argc, const char *argv[]) {
|
||||
#if defined(_MSC_VER)
|
||||
if (!IsWindows7SP1OrGreater()) {
|
||||
std::cerr << "CryFS is currently only supported on Windows 7 SP1 (or later)." << std::endl;
|
||||
exit(1);
|
||||
}
|
||||
#endif
|
||||
|
||||
try {
|
||||
auto &keyGenerator = Random::OSRandom();
|
||||
#if defined(_MSC_VER)
|
||||
auto httpClient = make_unique_ref<cpputils::WinHttpClient>();
|
||||
#else
|
||||
auto httpClient = make_unique_ref<cpputils::CurlHttpClient>();
|
||||
#endif
|
||||
return Cli(keyGenerator, SCrypt::DefaultSettings, make_shared<IOStreamConsole>())
|
||||
.main(argc, argv, std::move(httpClient), []{});
|
||||
} catch (const cryfs::CryfsException &e) {
|
||||
if (e.what() != string()) {
|
||||
std::cerr << "Error: " << e.what() << std::endl;
|
||||
}
|
||||
return exitCode(e.errorCode());
|
||||
} catch (const std::exception &e) {
|
||||
cerr << "Error: " << e.what();
|
||||
return exitCode(cryfs::ErrorCode::UnspecifiedError);
|
||||
}
|
||||
}
|
@ -1,228 +0,0 @@
|
||||
#include "Parser.h"
|
||||
#include "utils.h"
|
||||
#include <iostream>
|
||||
#include <boost/optional.hpp>
|
||||
#include <cryfs/impl/config/CryConfigConsole.h>
|
||||
#include <cryfs/impl/CryfsException.h>
|
||||
#include <cryfs-cli/Environment.h>
|
||||
|
||||
namespace po = boost::program_options;
|
||||
namespace bf = boost::filesystem;
|
||||
using namespace cryfs_cli::program_options;
|
||||
using cryfs::CryConfigConsole;
|
||||
using cryfs::CryfsException;
|
||||
using cryfs::ErrorCode;
|
||||
using std::vector;
|
||||
using std::cerr;
|
||||
using std::endl;
|
||||
using std::string;
|
||||
using boost::optional;
|
||||
using boost::none;
|
||||
using namespace cpputils::logging;
|
||||
|
||||
Parser::Parser(int argc, const char **argv)
|
||||
:_options(_argsToVector(argc, argv)) {
|
||||
}
|
||||
|
||||
vector<string> Parser::_argsToVector(int argc, const char **argv) {
|
||||
vector<string> result;
|
||||
for(int i = 0; i < argc; ++i) {
|
||||
result.push_back(argv[i]);
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
ProgramOptions Parser::parse(const vector<string> &supportedCiphers) const {
|
||||
vector<string> cryfsOptions;
|
||||
vector<string> fuseOptions;
|
||||
std::tie(cryfsOptions, fuseOptions) = splitAtDoubleDash(_options);
|
||||
|
||||
if (fuseOptions.size() != 0) {
|
||||
LOG(WARN, "Passing fuse mount options after a double dash '--' is deprecated. Please pass them directly (e.g. 'cryfs basedir mountdir -o allow_other'");
|
||||
}
|
||||
|
||||
po::variables_map vm = _parseOptionsOrShowHelp(cryfsOptions, supportedCiphers);
|
||||
|
||||
if (!vm.count("base-dir")) {
|
||||
_showHelpAndExit("Please specify a base directory.", ErrorCode::InvalidArguments);
|
||||
}
|
||||
if (!vm.count("mount-dir")) {
|
||||
_showHelpAndExit("Please specify a mount directory.", ErrorCode::InvalidArguments);
|
||||
}
|
||||
bf::path baseDir = vm["base-dir"].as<string>();
|
||||
bf::path mountDir = vm["mount-dir"].as<string>();
|
||||
optional<bf::path> configfile = none;
|
||||
if (vm.count("config")) {
|
||||
configfile = bf::absolute(vm["config"].as<string>());
|
||||
}
|
||||
bool foreground = vm.count("foreground");
|
||||
bool allowFilesystemUpgrade = vm.count("allow-filesystem-upgrade");
|
||||
bool allowReplacedFilesystem = vm.count("allow-replaced-filesystem");
|
||||
bool createMissingBasedir = vm.count("create-missing-basedir");
|
||||
bool createMissingMountpoint = vm.count("create-missing-mountpoint");
|
||||
optional<double> unmountAfterIdleMinutes = 0.0; // first setting to 0 and then to none is somehow needed to silence a GCC warning from -Wmaybe-uninitialized
|
||||
unmountAfterIdleMinutes = none;
|
||||
if (vm.count("unmount-idle")) {
|
||||
unmountAfterIdleMinutes = vm["unmount-idle"].as<double>();
|
||||
}
|
||||
optional<bf::path> logfile = none;
|
||||
if (vm.count("logfile")) {
|
||||
logfile = bf::absolute(vm["logfile"].as<string>());
|
||||
}
|
||||
optional<string> cipher = none;
|
||||
if (vm.count("cipher")) {
|
||||
cipher = vm["cipher"].as<string>();
|
||||
_checkValidCipher(*cipher, supportedCiphers);
|
||||
}
|
||||
optional<uint32_t> blocksizeBytes = none;
|
||||
if (vm.count("blocksize")) {
|
||||
blocksizeBytes = vm["blocksize"].as<uint32_t>();
|
||||
}
|
||||
bool allowIntegrityViolations = vm.count("allow-integrity-violations");
|
||||
optional<bool> missingBlockIsIntegrityViolation = none;
|
||||
if (vm.count("missing-block-is-integrity-violation")) {
|
||||
missingBlockIsIntegrityViolation = vm["missing-block-is-integrity-violation"].as<bool>();
|
||||
}
|
||||
|
||||
if (vm.count("fuse-option")) {
|
||||
auto options = vm["fuse-option"].as<vector<string>>();
|
||||
for (const auto& option: options) {
|
||||
fuseOptions.push_back("-o");
|
||||
fuseOptions.push_back(option);
|
||||
}
|
||||
}
|
||||
|
||||
return ProgramOptions(std::move(baseDir), std::move(mountDir), std::move(configfile), foreground, allowFilesystemUpgrade, allowReplacedFilesystem, createMissingBasedir, createMissingMountpoint, std::move(unmountAfterIdleMinutes), std::move(logfile), std::move(cipher), blocksizeBytes, allowIntegrityViolations, std::move(missingBlockIsIntegrityViolation), std::move(fuseOptions));
|
||||
}
|
||||
|
||||
void Parser::_checkValidCipher(const string &cipher, const vector<string> &supportedCiphers) {
|
||||
if (std::find(supportedCiphers.begin(), supportedCiphers.end(), cipher) == supportedCiphers.end()) {
|
||||
throw CryfsException("Invalid cipher: " + cipher, ErrorCode::InvalidArguments);
|
||||
}
|
||||
}
|
||||
|
||||
po::variables_map Parser::_parseOptionsOrShowHelp(const vector<string> &options, const vector<string> &supportedCiphers) {
|
||||
try {
|
||||
return _parseOptions(options, supportedCiphers);
|
||||
} catch (const CryfsException& e) {
|
||||
// If CryfsException is thrown, we already know what's wrong.
|
||||
// Show usage information and pass through the exception, don't catch it.
|
||||
if (e.errorCode() != ErrorCode::Success) {
|
||||
_showHelp();
|
||||
}
|
||||
throw;
|
||||
} catch(const std::exception &e) {
|
||||
std::cerr << e.what() << std::endl;
|
||||
_showHelpAndExit("Invalid arguments", ErrorCode::InvalidArguments);
|
||||
}
|
||||
}
|
||||
|
||||
po::variables_map Parser::_parseOptions(const vector<string> &options, const vector<string> &supportedCiphers) {
|
||||
po::options_description desc;
|
||||
po::positional_options_description positional_desc;
|
||||
_addAllowedOptions(&desc);
|
||||
_addPositionalOptionForBaseDir(&desc, &positional_desc);
|
||||
|
||||
po::variables_map vm;
|
||||
vector<const char*> _options = _to_const_char_vector(options);
|
||||
po::store(po::command_line_parser(_options.size(), _options.data())
|
||||
.options(desc).positional(positional_desc).run(), vm);
|
||||
if (vm.count("help")) {
|
||||
_showHelpAndExit("", ErrorCode::Success);
|
||||
}
|
||||
if (vm.count("show-ciphers")) {
|
||||
_showCiphersAndExit(supportedCiphers);
|
||||
}
|
||||
if (vm.count("version")) {
|
||||
_showVersionAndExit();
|
||||
}
|
||||
po::notify(vm);
|
||||
|
||||
return vm;
|
||||
}
|
||||
|
||||
vector<const char*> Parser::_to_const_char_vector(const vector<string> &options) {
|
||||
vector<const char*> result;
|
||||
result.reserve(options.size());
|
||||
for (const string &option : options) {
|
||||
result.push_back(option.c_str());
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
void Parser::_addAllowedOptions(po::options_description *desc) {
|
||||
po::options_description options("Allowed options");
|
||||
string cipher_description = "Cipher to use for encryption. See possible values by calling cryfs with --show-ciphers. Default: ";
|
||||
cipher_description += CryConfigConsole::DEFAULT_CIPHER;
|
||||
string blocksize_description = "The block size used when storing ciphertext blocks (in bytes). Default: ";
|
||||
blocksize_description += std::to_string(CryConfigConsole::DEFAULT_BLOCKSIZE_BYTES);
|
||||
options.add_options()
|
||||
("help,h", "show help message")
|
||||
("config,c", po::value<string>(), "Configuration file")
|
||||
("foreground,f", "Run CryFS in foreground.")
|
||||
("fuse-option,o", po::value<vector<string>>(), "Add a fuse mount option. Example: atime or noatime.")
|
||||
("cipher", po::value<string>(), cipher_description.c_str())
|
||||
("blocksize", po::value<uint32_t>(), blocksize_description.c_str())
|
||||
("missing-block-is-integrity-violation", po::value<bool>(), "Whether to treat a missing block as an integrity violation. This makes sure you notice if an attacker deleted some of your files, but only works in single-client mode. You will not be able to use the file system on other devices.")
|
||||
("allow-integrity-violations", "Disable integrity checks. Integrity checks ensure that your file system was not manipulated or rolled back to an earlier version. Disabling them is needed if you want to load an old snapshot of your file system.")
|
||||
("allow-filesystem-upgrade", "Allow upgrading the file system if it was created with an old CryFS version. After the upgrade, older CryFS versions might not be able to use the file system anymore.")
|
||||
("allow-replaced-filesystem", "By default, CryFS remembers file systems it has seen in this base directory and checks that it didn't get replaced by an attacker with an entirely different file system since the last time it was loaded. However, if you do want to replace the file system with an entirely new one, you can pass in this option to disable the check.")
|
||||
("create-missing-basedir", "Creates the base directory even if there is no directory currently there, skipping the normal confirmation message to create it later.")
|
||||
("create-missing-mountpoint", "Creates the mountpoint even if there is no directory currently there, skipping the normal confirmation message to create it later.")
|
||||
("show-ciphers", "Show list of supported ciphers.")
|
||||
("unmount-idle", po::value<double>(), "Automatically unmount after specified number of idle minutes.")
|
||||
("logfile", po::value<string>(), "Specify the file to write log messages to. If this is not specified, log messages will go to stdout, or syslog if CryFS is running in the background.")
|
||||
("version", "Show CryFS version number")
|
||||
;
|
||||
desc->add(options);
|
||||
}
|
||||
|
||||
void Parser::_addPositionalOptionForBaseDir(po::options_description *desc, po::positional_options_description *positional) {
|
||||
positional->add("base-dir", 1);
|
||||
positional->add("mount-dir", 1);
|
||||
po::options_description hidden("Hidden options");
|
||||
hidden.add_options()
|
||||
("base-dir", po::value<string>(), "Base directory")
|
||||
("mount-dir", po::value<string>(), "Mount directory")
|
||||
;
|
||||
desc->add(hidden);
|
||||
}
|
||||
|
||||
[[noreturn]] void Parser::_showCiphersAndExit(const vector<string> &supportedCiphers) {
|
||||
for (const auto &cipher : supportedCiphers) {
|
||||
std::cerr << cipher << "\n";
|
||||
}
|
||||
throw CryfsException("", ErrorCode::Success);
|
||||
}
|
||||
|
||||
void Parser::_showHelp() {
|
||||
cerr << "Usage: cryfs [options] baseDir mountPoint [-- [FUSE Mount Options]]\n";
|
||||
po::options_description desc;
|
||||
_addAllowedOptions(&desc);
|
||||
cerr << desc << endl;
|
||||
cerr << "Environment variables:\n"
|
||||
<< " " << Environment::FRONTEND_KEY << "=" << Environment::FRONTEND_NONINTERACTIVE << "\n"
|
||||
<< "\tWork better together with tools.\n"
|
||||
<< "\tWith this option set, CryFS won't ask anything, but use default values\n"
|
||||
<< "\tfor options you didn't specify on command line. Furthermore, it won't\n"
|
||||
<< "\task you to enter a new password a second time (password confirmation).\n"
|
||||
<< " " << Environment::NOUPDATECHECK_KEY << "=true\n"
|
||||
<< "\tBy default, CryFS connects to the internet to check for known\n"
|
||||
<< "\tsecurity vulnerabilities and new versions. This option disables this.\n"
|
||||
<< " " << Environment::LOCALSTATEDIR_KEY << "=[path]\n"
|
||||
<< "\tSets the directory cryfs uses to store local state. This local state\n"
|
||||
<< "\tis used to recognize known file systems and run integrity checks,\n"
|
||||
<< "\ti.e. check that they haven't been modified by an attacker.\n"
|
||||
<< "\tDefault value: " << Environment::defaultLocalStateDir().string() << "\n"
|
||||
<< endl;
|
||||
}
|
||||
|
||||
[[noreturn]] void Parser::_showHelpAndExit(const std::string& message, ErrorCode errorCode) {
|
||||
_showHelp();
|
||||
throw CryfsException(message, errorCode);
|
||||
}
|
||||
|
||||
[[noreturn]] void Parser::_showVersionAndExit() {
|
||||
// no need to show version because it was already shown in the CryFS header before parsing program options
|
||||
throw CryfsException("", ErrorCode::Success);
|
||||
}
|
@ -1,38 +0,0 @@
|
||||
#pragma once
|
||||
#ifndef MESSMER_CRYFSCLI_PROGRAMOPTIONS_PARSER_H
|
||||
#define MESSMER_CRYFSCLI_PROGRAMOPTIONS_PARSER_H
|
||||
|
||||
#include "ProgramOptions.h"
|
||||
#include <boost/program_options.hpp>
|
||||
#include <cryfs/impl/ErrorCodes.h>
|
||||
|
||||
namespace cryfs_cli {
|
||||
namespace program_options {
|
||||
class Parser final {
|
||||
public:
|
||||
Parser(int argc, const char **argv);
|
||||
|
||||
ProgramOptions parse(const std::vector<std::string> &supportedCiphers) const;
|
||||
|
||||
private:
|
||||
static std::vector<std::string> _argsToVector(int argc, const char **argv);
|
||||
static std::vector<const char*> _to_const_char_vector(const std::vector<std::string> &options);
|
||||
static void _addAllowedOptions(boost::program_options::options_description *desc);
|
||||
static void _addPositionalOptionForBaseDir(boost::program_options::options_description *desc,
|
||||
boost::program_options::positional_options_description *positional);
|
||||
static void _showHelp();
|
||||
[[noreturn]] static void _showHelpAndExit(const std::string& message, cryfs::ErrorCode errorCode);
|
||||
[[noreturn]] static void _showCiphersAndExit(const std::vector<std::string> &supportedCiphers);
|
||||
[[noreturn]] static void _showVersionAndExit();
|
||||
static boost::program_options::variables_map _parseOptionsOrShowHelp(const std::vector<std::string> &options, const std::vector<std::string> &supportedCiphers);
|
||||
static boost::program_options::variables_map _parseOptions(const std::vector<std::string> &options, const std::vector<std::string> &supportedCiphers);
|
||||
static void _checkValidCipher(const std::string &cipher, const std::vector<std::string> &supportedCiphers);
|
||||
|
||||
std::vector<std::string> _options;
|
||||
|
||||
DISALLOW_COPY_AND_ASSIGN(Parser);
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
#endif
|
@ -9,48 +9,33 @@ using std::vector;
|
||||
using boost::optional;
|
||||
namespace bf = boost::filesystem;
|
||||
|
||||
ProgramOptions::ProgramOptions(bf::path baseDir, bf::path mountDir, optional<bf::path> configFile,
|
||||
bool foreground, bool allowFilesystemUpgrade, bool allowReplacedFilesystem,
|
||||
bool createMissingBasedir, bool createMissingMountpoint,
|
||||
optional<double> unmountAfterIdleMinutes,
|
||||
optional<bf::path> logFile, optional<string> cipher,
|
||||
ProgramOptions::ProgramOptions(bf::path baseDir, optional<bf::path> configFile,
|
||||
boost::filesystem::path localStateDir,
|
||||
bool allowFilesystemUpgrade, bool allowReplacedFilesystem,
|
||||
bool createMissingBasedir,
|
||||
optional<string> cipher,
|
||||
optional<uint32_t> blocksizeBytes,
|
||||
bool allowIntegrityViolations,
|
||||
boost::optional<bool> missingBlockIsIntegrityViolation,
|
||||
vector<string> fuseOptions)
|
||||
: _baseDir(bf::absolute(std::move(baseDir))), _mountDir(std::move(mountDir)), _configFile(std::move(configFile)),
|
||||
_foreground(foreground),
|
||||
boost::optional<bool> missingBlockIsIntegrityViolation)
|
||||
: _baseDir(bf::absolute(std::move(baseDir))), _configFile(std::move(configFile)),
|
||||
_localStateDir(std::move(localStateDir)),
|
||||
_allowFilesystemUpgrade(allowFilesystemUpgrade), _allowReplacedFilesystem(allowReplacedFilesystem),
|
||||
_createMissingBasedir(createMissingBasedir), _createMissingMountpoint(createMissingMountpoint),
|
||||
_unmountAfterIdleMinutes(std::move(unmountAfterIdleMinutes)), _logFile(std::move(logFile)),
|
||||
_createMissingBasedir(createMissingBasedir),
|
||||
_cipher(std::move(cipher)), _blocksizeBytes(std::move(blocksizeBytes)),
|
||||
_allowIntegrityViolations(allowIntegrityViolations),
|
||||
_missingBlockIsIntegrityViolation(std::move(missingBlockIsIntegrityViolation)),
|
||||
_fuseOptions(std::move(fuseOptions)),
|
||||
_mountDirIsDriveLetter(cpputils::path_is_just_drive_letter(_mountDir)) {
|
||||
if (!_mountDirIsDriveLetter) {
|
||||
_mountDir = bf::absolute(std::move(_mountDir));
|
||||
}
|
||||
_missingBlockIsIntegrityViolation(std::move(missingBlockIsIntegrityViolation)) {
|
||||
}
|
||||
|
||||
const bf::path &ProgramOptions::baseDir() const {
|
||||
return _baseDir;
|
||||
}
|
||||
|
||||
const bf::path &ProgramOptions::mountDir() const {
|
||||
return _mountDir;
|
||||
}
|
||||
|
||||
bool ProgramOptions::mountDirIsDriveLetter() const {
|
||||
return _mountDirIsDriveLetter;
|
||||
}
|
||||
|
||||
const optional<bf::path> &ProgramOptions::configFile() const {
|
||||
return _configFile;
|
||||
}
|
||||
|
||||
bool ProgramOptions::foreground() const {
|
||||
return _foreground;
|
||||
const bf::path &ProgramOptions::localStateDir() const {
|
||||
return _localStateDir;
|
||||
}
|
||||
|
||||
bool ProgramOptions::allowFilesystemUpgrade() const {
|
||||
@ -61,18 +46,6 @@ bool ProgramOptions::createMissingBasedir() const {
|
||||
return _createMissingBasedir;
|
||||
}
|
||||
|
||||
bool ProgramOptions::createMissingMountpoint() const {
|
||||
return _createMissingMountpoint;
|
||||
}
|
||||
|
||||
const optional<double> &ProgramOptions::unmountAfterIdleMinutes() const {
|
||||
return _unmountAfterIdleMinutes;
|
||||
}
|
||||
|
||||
const optional<bf::path> &ProgramOptions::logFile() const {
|
||||
return _logFile;
|
||||
}
|
||||
|
||||
const optional<string> &ProgramOptions::cipher() const {
|
||||
return _cipher;
|
||||
}
|
||||
@ -92,7 +65,3 @@ bool ProgramOptions::allowReplacedFilesystem() const {
|
||||
const optional<bool> &ProgramOptions::missingBlockIsIntegrityViolation() const {
|
||||
return _missingBlockIsIntegrityViolation;
|
||||
}
|
||||
|
||||
const vector<string> &ProgramOptions::fuseOptions() const {
|
||||
return _fuseOptions;
|
||||
}
|
||||
|
@ -12,53 +12,39 @@ namespace cryfs_cli {
|
||||
namespace program_options {
|
||||
class ProgramOptions final {
|
||||
public:
|
||||
ProgramOptions(boost::filesystem::path baseDir, boost::filesystem::path mountDir,
|
||||
ProgramOptions(boost::filesystem::path baseDir,
|
||||
boost::optional<boost::filesystem::path> configFile,
|
||||
bool foreground, bool allowFilesystemUpgrade, bool allowReplacedFilesystem,
|
||||
bool createMissingBasedir, bool createMissingMountpoint,
|
||||
boost::optional<double> unmountAfterIdleMinutes,
|
||||
boost::optional<boost::filesystem::path> logFile,
|
||||
boost::filesystem::path localStateDir,
|
||||
bool allowFilesystemUpgrade, bool allowReplacedFilesystem,
|
||||
bool createMissingBasedir,
|
||||
boost::optional<std::string> cipher,
|
||||
boost::optional<uint32_t> blocksizeBytes,
|
||||
bool allowIntegrityViolations,
|
||||
boost::optional<bool> missingBlockIsIntegrityViolation,
|
||||
std::vector<std::string> fuseOptions);
|
||||
boost::optional<bool> missingBlockIsIntegrityViolation);
|
||||
ProgramOptions(ProgramOptions &&rhs) = default;
|
||||
|
||||
const boost::filesystem::path &baseDir() const;
|
||||
const boost::filesystem::path &mountDir() const;
|
||||
const boost::optional<boost::filesystem::path> &configFile() const;
|
||||
bool foreground() const;
|
||||
const boost::filesystem::path &localStateDir() const;
|
||||
bool allowFilesystemUpgrade() const;
|
||||
bool allowReplacedFilesystem() const;
|
||||
bool createMissingBasedir() const;
|
||||
bool createMissingMountpoint() const;
|
||||
const boost::optional<double> &unmountAfterIdleMinutes() const;
|
||||
const boost::optional<boost::filesystem::path> &logFile() const;
|
||||
const boost::optional<std::string> &cipher() const;
|
||||
const boost::optional<uint32_t> &blocksizeBytes() const;
|
||||
bool allowIntegrityViolations() const;
|
||||
const boost::optional<bool> &missingBlockIsIntegrityViolation() const;
|
||||
const std::vector<std::string> &fuseOptions() const;
|
||||
bool mountDirIsDriveLetter() const;
|
||||
|
||||
private:
|
||||
boost::filesystem::path _baseDir; // this is always absolute
|
||||
boost::filesystem::path _mountDir; // this is absolute iff !_mountDirIsDriveLetter
|
||||
boost::optional<boost::filesystem::path> _configFile;
|
||||
bool _foreground;
|
||||
boost::filesystem::path _localStateDir;
|
||||
bool _allowFilesystemUpgrade;
|
||||
bool _allowReplacedFilesystem;
|
||||
bool _createMissingBasedir;
|
||||
bool _createMissingMountpoint;
|
||||
boost::optional<double> _unmountAfterIdleMinutes;
|
||||
boost::optional<boost::filesystem::path> _logFile;
|
||||
boost::optional<std::string> _cipher;
|
||||
boost::optional<uint32_t> _blocksizeBytes;
|
||||
bool _allowIntegrityViolations;
|
||||
boost::optional<bool> _missingBlockIsIntegrityViolation;
|
||||
std::vector<std::string> _fuseOptions;
|
||||
bool _mountDirIsDriveLetter;
|
||||
|
||||
DISALLOW_COPY_AND_ASSIGN(ProgramOptions);
|
||||
};
|
||||
|
@ -1,24 +0,0 @@
|
||||
project (cryfs-unmount)
|
||||
INCLUDE(GNUInstallDirs)
|
||||
|
||||
set(SOURCES
|
||||
program_options/ProgramOptions.cpp
|
||||
program_options/Parser.cpp
|
||||
Cli.cpp
|
||||
)
|
||||
|
||||
add_library(${PROJECT_NAME} ${SOURCES})
|
||||
target_link_libraries(${PROJECT_NAME} PUBLIC cpp-utils cryfs fspp-fuse)
|
||||
target_enable_style_warnings(${PROJECT_NAME})
|
||||
target_activate_cpp14(${PROJECT_NAME})
|
||||
target_add_boost(${PROJECT_NAME})
|
||||
|
||||
add_executable(${PROJECT_NAME}_bin main_unmount.cpp)
|
||||
set_target_properties(${PROJECT_NAME}_bin PROPERTIES OUTPUT_NAME cryfs-unmount)
|
||||
target_link_libraries(${PROJECT_NAME}_bin PUBLIC ${PROJECT_NAME})
|
||||
target_enable_style_warnings(${PROJECT_NAME}_bin)
|
||||
target_activate_cpp14(${PROJECT_NAME}_bin)
|
||||
|
||||
install(TARGETS ${PROJECT_NAME}_bin
|
||||
DESTINATION ${CMAKE_INSTALL_BINDIR}
|
||||
)
|
@ -1,57 +0,0 @@
|
||||
#include "Cli.h"
|
||||
#include <fspp/fuse/Fuse.h>
|
||||
#include <cryfs-unmount/program_options/Parser.h>
|
||||
#include <gitversion/gitversion.h>
|
||||
#include <cryfs/impl/CryfsException.h>
|
||||
|
||||
#include <iostream>
|
||||
|
||||
using fspp::fuse::Fuse;
|
||||
using cryfs_unmount::program_options::Parser;
|
||||
using cryfs_unmount::program_options::ProgramOptions;
|
||||
|
||||
namespace cryfs_unmount {
|
||||
|
||||
namespace {
|
||||
void _showVersion() {
|
||||
std::cout << "CryFS Version " << gitversion::VersionString() << std::endl;
|
||||
}
|
||||
}
|
||||
|
||||
void Cli::main(int argc, const char **argv) {
|
||||
_showVersion();
|
||||
ProgramOptions options = Parser(argc, argv).parse();
|
||||
|
||||
if (!boost::filesystem::exists(options.mountDir())) {
|
||||
throw cryfs::CryfsException("Given mountdir doesn't exist", cryfs::ErrorCode::InaccessibleMountDir);
|
||||
}
|
||||
|
||||
bool immediate = options.immediate();
|
||||
#if defined(__APPLE__)
|
||||
if (options.immediate()) {
|
||||
std::cerr << "Warning: OSX doesn't support the --immediate flag. Ignoring it.";
|
||||
immediate = false;
|
||||
}
|
||||
#elif defined(_MSC_VER)
|
||||
if (options.immediate()) {
|
||||
std::cerr << "Warning: Windows doesn't support the --immediate flag. Ignoring it.";
|
||||
immediate = false;
|
||||
}
|
||||
#endif
|
||||
|
||||
// TODO This doesn't seem to work with relative paths
|
||||
std::cout << "Unmounting CryFS filesystem at " << options.mountDir() << "." << std::endl;
|
||||
if (immediate) {
|
||||
Fuse::unmount(options.mountDir(), true);
|
||||
|
||||
// TODO Wait until it is actually unmounted and then show a better success message?
|
||||
std::cout << "Filesystem is unmounting." << std::endl;
|
||||
} else {
|
||||
Fuse::unmount(options.mountDir(), false);
|
||||
|
||||
// TODO Wait until it is actually unmounted and then show a better success message?
|
||||
std::cout << "Filesystem will unmount as soon as nothing is accessing it anymore." << std::endl;
|
||||
}
|
||||
}
|
||||
|
||||
}
|
@ -1,14 +0,0 @@
|
||||
#pragma once
|
||||
#ifndef MESSMER_CRYFSUNMOUNT_CLI_H
|
||||
#define MESSMER_CRYFSUNMOUNT_CLI_H
|
||||
|
||||
namespace cryfs_unmount {
|
||||
|
||||
class Cli final {
|
||||
public:
|
||||
void main(int argc, const char **argv);
|
||||
};
|
||||
|
||||
}
|
||||
|
||||
#endif
|
@ -1,38 +0,0 @@
|
||||
#if defined(_MSC_VER)
|
||||
#include <Windows.h>
|
||||
#include <VersionHelpers.h>
|
||||
#endif
|
||||
|
||||
#include <iostream>
|
||||
#include <cryfs/impl/CryfsException.h>
|
||||
#include <cpp-utils/assert/backtrace.h>
|
||||
#include "Cli.h"
|
||||
|
||||
using std::cerr;
|
||||
using cryfs::ErrorCode;
|
||||
|
||||
int main(int argc, const char *argv[]) {
|
||||
#if defined(_MSC_VER)
|
||||
if (!IsWindows7SP1OrGreater()) {
|
||||
std::cerr << "CryFS is currently only supported on Windows 7 SP1 (or later)." << std::endl;
|
||||
exit(1);
|
||||
}
|
||||
#endif
|
||||
|
||||
cpputils::showBacktraceOnCrash();
|
||||
|
||||
try {
|
||||
cryfs_unmount::Cli().main(argc, argv);
|
||||
}
|
||||
catch (const cryfs::CryfsException &e) {
|
||||
if (e.what() != std::string()) {
|
||||
std::cerr << "Error " << static_cast<int>(e.errorCode()) << ": " << e.what() << std::endl;
|
||||
}
|
||||
return exitCode(e.errorCode());
|
||||
}
|
||||
catch (const std::runtime_error &e) {
|
||||
std::cerr << "Error: " << e.what() << std::endl;
|
||||
return exitCode(ErrorCode::UnspecifiedError);
|
||||
}
|
||||
return exitCode(ErrorCode::Success);
|
||||
}
|
@ -1,130 +0,0 @@
|
||||
#include "Parser.h"
|
||||
#include <iostream>
|
||||
#include <boost/optional.hpp>
|
||||
#include <cryfs/impl/config/CryConfigConsole.h>
|
||||
#include <cryfs/impl/CryfsException.h>
|
||||
#include <cryfs-cli/Environment.h>
|
||||
|
||||
namespace po = boost::program_options;
|
||||
namespace bf = boost::filesystem;
|
||||
using namespace cryfs_unmount::program_options;
|
||||
using cryfs::CryConfigConsole;
|
||||
using cryfs::CryfsException;
|
||||
using cryfs::ErrorCode;
|
||||
using std::vector;
|
||||
using std::cerr;
|
||||
using std::endl;
|
||||
using std::string;
|
||||
using namespace cpputils::logging;
|
||||
|
||||
Parser::Parser(int argc, const char **argv)
|
||||
:_options(_argsToVector(argc, argv)) {
|
||||
}
|
||||
|
||||
vector<string> Parser::_argsToVector(int argc, const char **argv) {
|
||||
vector<string> result;
|
||||
for (int i = 0; i < argc; ++i) {
|
||||
result.push_back(argv[i]);
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
ProgramOptions Parser::parse() const {
|
||||
po::variables_map vm = _parseOptionsOrShowHelp(_options);
|
||||
|
||||
if (!vm.count("mount-dir")) {
|
||||
_showHelpAndExit("Please specify a mount directory.", ErrorCode::InvalidArguments);
|
||||
}
|
||||
bf::path mountDir = vm["mount-dir"].as<string>();
|
||||
bool immediate = vm.count("immediate");
|
||||
|
||||
return ProgramOptions(std::move(mountDir), immediate);
|
||||
}
|
||||
|
||||
po::variables_map Parser::_parseOptionsOrShowHelp(const vector<string> &options) {
|
||||
try {
|
||||
return _parseOptions(options);
|
||||
}
|
||||
catch (const CryfsException& e) {
|
||||
// If CryfsException is thrown, we already know what's wrong.
|
||||
// Show usage information and pass through the exception, don't catch it.
|
||||
if (e.errorCode() != ErrorCode::Success) {
|
||||
_showHelp();
|
||||
}
|
||||
throw;
|
||||
}
|
||||
catch (const std::exception &e) {
|
||||
std::cerr << e.what() << std::endl;
|
||||
_showHelpAndExit("Invalid arguments", ErrorCode::InvalidArguments);
|
||||
}
|
||||
}
|
||||
|
||||
po::variables_map Parser::_parseOptions(const vector<string> &options) {
|
||||
po::options_description desc;
|
||||
po::positional_options_description positional_desc;
|
||||
_addAllowedOptions(&desc);
|
||||
_addPositionalOptionForBaseDir(&desc, &positional_desc);
|
||||
|
||||
po::variables_map vm;
|
||||
vector<const char*> _options = _to_const_char_vector(options);
|
||||
po::store(po::command_line_parser(_options.size(), _options.data())
|
||||
.options(desc).positional(positional_desc).run(), vm);
|
||||
if (vm.count("help")) {
|
||||
_showHelpAndExit("", ErrorCode::Success);
|
||||
}
|
||||
if (vm.count("version")) {
|
||||
_showVersionAndExit();
|
||||
}
|
||||
po::notify(vm);
|
||||
|
||||
return vm;
|
||||
}
|
||||
|
||||
vector<const char*> Parser::_to_const_char_vector(const vector<string> &options) {
|
||||
vector<const char*> result;
|
||||
result.reserve(options.size());
|
||||
for (const string &option : options) {
|
||||
result.push_back(option.c_str());
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
void Parser::_addAllowedOptions(po::options_description *desc) {
|
||||
po::options_description options("Allowed options");
|
||||
string cipher_description = "Cipher to use for encryption. See possible values by calling cryfs with --show-ciphers. Default: ";
|
||||
cipher_description += CryConfigConsole::DEFAULT_CIPHER;
|
||||
string blocksize_description = "The block size used when storing ciphertext blocks (in bytes). Default: ";
|
||||
blocksize_description += std::to_string(CryConfigConsole::DEFAULT_BLOCKSIZE_BYTES);
|
||||
options.add_options()
|
||||
("immediate", "unmount immediately without waiting for processes that currently access the file system to finish their file system operations. With this flag, unmounting can fail if there's processes having a lock on the file system.")
|
||||
("help,h", "show help message")
|
||||
("version", "show CryFS version number")
|
||||
;
|
||||
desc->add(options);
|
||||
}
|
||||
|
||||
void Parser::_addPositionalOptionForBaseDir(po::options_description *desc, po::positional_options_description *positional) {
|
||||
positional->add("mount-dir", 1);
|
||||
po::options_description hidden("Hidden options");
|
||||
hidden.add_options()
|
||||
("mount-dir", po::value<string>(), "Mount directory")
|
||||
;
|
||||
desc->add(hidden);
|
||||
}
|
||||
|
||||
void Parser::_showHelp() {
|
||||
cerr << "Usage: cryfs-unmount [mountPoint]\n";
|
||||
po::options_description desc;
|
||||
_addAllowedOptions(&desc);
|
||||
cerr << desc << endl;
|
||||
}
|
||||
|
||||
[[noreturn]] void Parser::_showHelpAndExit(const std::string& message, ErrorCode errorCode) {
|
||||
_showHelp();
|
||||
throw CryfsException(message, errorCode);
|
||||
}
|
||||
|
||||
[[noreturn]] void Parser::_showVersionAndExit() {
|
||||
// no need to show version because it was already shown in the CryFS header before parsing program options
|
||||
throw CryfsException("", ErrorCode::Success);
|
||||
}
|
@ -1,37 +0,0 @@
|
||||
#pragma once
|
||||
#ifndef MESSMER_CRYFSUNMOUNT_PROGRAMOPTIONS_PARSER_H
|
||||
#define MESSMER_CRYFSUNMOUNT_PROGRAMOPTIONS_PARSER_H
|
||||
|
||||
#include "ProgramOptions.h"
|
||||
#include <boost/program_options.hpp>
|
||||
#include <cryfs/impl/ErrorCodes.h>
|
||||
|
||||
namespace cryfs_unmount {
|
||||
namespace program_options {
|
||||
class Parser final {
|
||||
public:
|
||||
Parser(int argc, const char **argv);
|
||||
|
||||
ProgramOptions parse() const;
|
||||
|
||||
private:
|
||||
static std::vector<std::string> _argsToVector(int argc, const char **argv);
|
||||
static std::vector<const char*> _to_const_char_vector(const std::vector<std::string> &options);
|
||||
static void _addAllowedOptions(boost::program_options::options_description *desc);
|
||||
static void _addPositionalOptionForBaseDir(boost::program_options::options_description *desc,
|
||||
boost::program_options::positional_options_description *positional);
|
||||
static void _showHelp();
|
||||
[[noreturn]] static void _showHelpAndExit(const std::string& message, cryfs::ErrorCode errorCode);
|
||||
[[noreturn]] static void _showCiphersAndExit(const std::vector<std::string> &supportedCiphers);
|
||||
[[noreturn]] static void _showVersionAndExit();
|
||||
static boost::program_options::variables_map _parseOptionsOrShowHelp(const std::vector<std::string> &options);
|
||||
static boost::program_options::variables_map _parseOptions(const std::vector<std::string> &options);
|
||||
|
||||
std::vector<std::string> _options;
|
||||
|
||||
DISALLOW_COPY_AND_ASSIGN(Parser);
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
#endif
|
@ -1,34 +0,0 @@
|
||||
#include "ProgramOptions.h"
|
||||
#include <cstring>
|
||||
#include <cpp-utils/assert/assert.h>
|
||||
#include <cpp-utils/system/path.h>
|
||||
|
||||
using namespace cryfs_unmount::program_options;
|
||||
using std::string;
|
||||
namespace bf = boost::filesystem;
|
||||
|
||||
ProgramOptions::ProgramOptions(bf::path mountDir, bool immediate)
|
||||
: _mountDir(std::move(mountDir)),
|
||||
_mountDirIsDriveLetter(cpputils::path_is_just_drive_letter(_mountDir)),
|
||||
_immediate(immediate)
|
||||
{
|
||||
if (!_mountDirIsDriveLetter)
|
||||
{
|
||||
_mountDir = bf::absolute(std::move(_mountDir));
|
||||
}
|
||||
}
|
||||
|
||||
const bf::path &ProgramOptions::mountDir() const
|
||||
{
|
||||
return _mountDir;
|
||||
}
|
||||
|
||||
bool ProgramOptions::mountDirIsDriveLetter() const
|
||||
{
|
||||
return _mountDirIsDriveLetter;
|
||||
}
|
||||
|
||||
bool ProgramOptions::immediate() const
|
||||
{
|
||||
return _immediate;
|
||||
}
|
@ -1,35 +0,0 @@
|
||||
#pragma once
|
||||
#ifndef MESSMER_CRYFSUNMOUNT_PROGRAMOPTIONS_PROGRAMOPTIONS_H
|
||||
#define MESSMER_CRYFSUNMOUNT_PROGRAMOPTIONS_PROGRAMOPTIONS_H
|
||||
|
||||
#include <vector>
|
||||
#include <string>
|
||||
#include <boost/optional.hpp>
|
||||
#include <cpp-utils/macros.h>
|
||||
#include <boost/filesystem.hpp>
|
||||
|
||||
namespace cryfs_unmount
|
||||
{
|
||||
namespace program_options
|
||||
{
|
||||
class ProgramOptions final
|
||||
{
|
||||
public:
|
||||
ProgramOptions(boost::filesystem::path mountDir, bool immediate);
|
||||
ProgramOptions(ProgramOptions &&rhs) = default;
|
||||
|
||||
const boost::filesystem::path &mountDir() const;
|
||||
bool mountDirIsDriveLetter() const;
|
||||
bool immediate() const;
|
||||
|
||||
private:
|
||||
boost::filesystem::path _mountDir;
|
||||
bool _mountDirIsDriveLetter;
|
||||
bool _immediate;
|
||||
|
||||
DISALLOW_COPY_AND_ASSIGN(ProgramOptions);
|
||||
};
|
||||
} // namespace program_options
|
||||
} // namespace cryfs_unmount
|
||||
|
||||
#endif
|
@ -11,79 +11,23 @@ namespace cryfs {
|
||||
constexpr const char *CryConfigConsole::DEFAULT_CIPHER;
|
||||
constexpr uint32_t CryConfigConsole::DEFAULT_BLOCKSIZE_BYTES;
|
||||
|
||||
CryConfigConsole::CryConfigConsole(shared_ptr<Console> console)
|
||||
: _console(std::move(console)), _useDefaultSettings(none) {
|
||||
CryConfigConsole::CryConfigConsole()
|
||||
: _useDefaultSettings(none) {
|
||||
}
|
||||
|
||||
string CryConfigConsole::askCipher() {
|
||||
if (_checkUseDefaultSettings()) {
|
||||
return DEFAULT_CIPHER;
|
||||
} else {
|
||||
return _askCipher();
|
||||
}
|
||||
}
|
||||
|
||||
string CryConfigConsole::_askCipher() const {
|
||||
vector<string> ciphers = CryCiphers::supportedCipherNames();
|
||||
string cipherName = "";
|
||||
bool askAgain = true;
|
||||
while(askAgain) {
|
||||
_console->print("\n");
|
||||
unsigned int cipherIndex = _console->ask("Which block cipher do you want to use?", ciphers);
|
||||
cipherName = ciphers[cipherIndex];
|
||||
askAgain = !_showWarningForCipherAndReturnIfOk(cipherName);
|
||||
};
|
||||
return cipherName;
|
||||
}
|
||||
|
||||
bool CryConfigConsole::_showWarningForCipherAndReturnIfOk(const string &cipherName) const {
|
||||
auto warning = CryCiphers::find(cipherName).warning();
|
||||
if (warning == none) {
|
||||
return true;
|
||||
}
|
||||
return _console->askYesNo(string() + (*warning) + " Do you want to take this cipher nevertheless?", true);
|
||||
return DEFAULT_CIPHER;
|
||||
}
|
||||
|
||||
uint32_t CryConfigConsole::askBlocksizeBytes() {
|
||||
if (_checkUseDefaultSettings()) {
|
||||
return DEFAULT_BLOCKSIZE_BYTES;
|
||||
} else {
|
||||
return _askBlocksizeBytes();
|
||||
}
|
||||
}
|
||||
|
||||
uint32_t CryConfigConsole::_askBlocksizeBytes() const {
|
||||
vector<string> sizes = {"4KB", "8KB", "16KB", "32KB", "64KB", "512KB", "1MB", "4MB"};
|
||||
unsigned int index = _console->ask("Which block size do you want to use?", sizes);
|
||||
switch(index) {
|
||||
case 0: return 4*1024;
|
||||
case 1: return 8*1024;
|
||||
case 2: return 16*1024;
|
||||
case 3: return 32*1024;
|
||||
case 4: return 64*1024;
|
||||
case 5: return 512*1024;
|
||||
case 6: return 1024*1024;
|
||||
case 7: return 4*1024*1024;
|
||||
default: ASSERT(false, "Unhandled case");
|
||||
}
|
||||
return DEFAULT_BLOCKSIZE_BYTES;
|
||||
}
|
||||
|
||||
bool CryConfigConsole::askMissingBlockIsIntegrityViolation() {
|
||||
if (_checkUseDefaultSettings()) {
|
||||
return DEFAULT_MISSINGBLOCKISINTEGRITYVIOLATION;
|
||||
} else {
|
||||
return _askMissingBlockIsIntegrityViolation();
|
||||
}
|
||||
}
|
||||
|
||||
bool CryConfigConsole::_askMissingBlockIsIntegrityViolation() const {
|
||||
return _console->askYesNo("\nMost integrity checks are enabled by default. However, by default CryFS does not treat missing blocks as integrity violations.\nThat is, if CryFS finds a block missing, it will assume that this is due to a synchronization delay and not because an attacker deleted the block.\nIf you are in a single-client setting, you can let it treat missing blocks as integrity violations, which will ensure that you notice if an attacker deletes one of your files.\nHowever, in this case, you will not be able to use the file system with other devices anymore.\nDo you want to treat missing blocks as integrity violations?", false);
|
||||
return DEFAULT_MISSINGBLOCKISINTEGRITYVIOLATION;
|
||||
}
|
||||
|
||||
bool CryConfigConsole::_checkUseDefaultSettings() {
|
||||
if (_useDefaultSettings == none) {
|
||||
_useDefaultSettings = _console->askYesNo("Use default settings?", true);
|
||||
}
|
||||
return *_useDefaultSettings;
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
@ -9,7 +9,7 @@
|
||||
namespace cryfs {
|
||||
class CryConfigConsole final {
|
||||
public:
|
||||
CryConfigConsole(std::shared_ptr<cpputils::Console> console);
|
||||
CryConfigConsole();
|
||||
CryConfigConsole(CryConfigConsole &&rhs) = default;
|
||||
|
||||
std::string askCipher();
|
||||
@ -24,12 +24,6 @@ namespace cryfs {
|
||||
|
||||
bool _checkUseDefaultSettings();
|
||||
|
||||
std::string _askCipher() const;
|
||||
bool _showWarningForCipherAndReturnIfOk(const std::string &cipherName) const;
|
||||
uint32_t _askBlocksizeBytes() const;
|
||||
bool _askMissingBlockIsIntegrityViolation() const;
|
||||
|
||||
std::shared_ptr<cpputils::Console> _console;
|
||||
boost::optional<bool> _useDefaultSettings;
|
||||
|
||||
DISALLOW_COPY_AND_ASSIGN(CryConfigConsole);
|
||||
|
@ -15,8 +15,8 @@ using boost::none;
|
||||
|
||||
namespace cryfs {
|
||||
|
||||
CryConfigCreator::CryConfigCreator(shared_ptr<Console> console, RandomGenerator &encryptionKeyGenerator, LocalStateDir localStateDir)
|
||||
:_console(console), _configConsole(console), _encryptionKeyGenerator(encryptionKeyGenerator), _localStateDir(std::move(localStateDir)) {
|
||||
CryConfigCreator::CryConfigCreator(RandomGenerator &encryptionKeyGenerator, LocalStateDir localStateDir)
|
||||
:_configConsole(), _encryptionKeyGenerator(encryptionKeyGenerator), _localStateDir(std::move(localStateDir)) {
|
||||
}
|
||||
|
||||
CryConfigCreator::ConfigCreateResult CryConfigCreator::create(const optional<string> &cipherFromCommandLine, const optional<uint32_t> &blocksizeBytesFromCommandLine, const optional<bool> &missingBlockIsIntegrityViolationFromCommandLine, bool allowReplacedFilesystem) {
|
||||
@ -73,9 +73,7 @@ namespace cryfs {
|
||||
}
|
||||
|
||||
string CryConfigCreator::_generateEncKey(const std::string &cipher) {
|
||||
_console->print("\nGenerating secure encryption key. This can take some time...");
|
||||
auto key = CryCiphers::find(cipher).createKey(_encryptionKeyGenerator);
|
||||
_console->print("done\n");
|
||||
return key;
|
||||
}
|
||||
|
||||
|
@ -12,7 +12,7 @@
|
||||
namespace cryfs {
|
||||
class CryConfigCreator final {
|
||||
public:
|
||||
CryConfigCreator(std::shared_ptr<cpputils::Console> console, cpputils::RandomGenerator &encryptionKeyGenerator, LocalStateDir localStateDir);
|
||||
CryConfigCreator(cpputils::RandomGenerator &encryptionKeyGenerator, LocalStateDir localStateDir);
|
||||
CryConfigCreator(CryConfigCreator &&rhs) = default;
|
||||
|
||||
struct ConfigCreateResult {
|
||||
@ -30,7 +30,6 @@ namespace cryfs {
|
||||
boost::optional<uint32_t> _generateExclusiveClientId(const boost::optional<bool> &missingBlockIsIntegrityViolationFromCommandLine, uint32_t myClientId);
|
||||
bool _generateMissingBlockIsIntegrityViolation(const boost::optional<bool> &missingBlockIsIntegrityViolationFromCommandLine);
|
||||
|
||||
std::shared_ptr<cpputils::Console> _console;
|
||||
CryConfigConsole _configConsole;
|
||||
cpputils::RandomGenerator &_encryptionKeyGenerator;
|
||||
LocalStateDir _localStateDir;
|
||||
|
@ -11,7 +11,6 @@
|
||||
#include "cryfs/impl/CryfsException.h"
|
||||
|
||||
namespace bf = boost::filesystem;
|
||||
using cpputils::Console;
|
||||
using cpputils::RandomGenerator;
|
||||
using cpputils::unique_ref;
|
||||
using cpputils::either;
|
||||
@ -25,8 +24,8 @@ using namespace cpputils::logging;
|
||||
|
||||
namespace cryfs {
|
||||
|
||||
CryConfigLoader::CryConfigLoader(shared_ptr<Console> console, RandomGenerator &keyGenerator, unique_ref<CryKeyProvider> keyProvider, LocalStateDir localStateDir, const optional<string> &cipherFromCommandLine, const boost::optional<uint32_t> &blocksizeBytesFromCommandLine, const boost::optional<bool> &missingBlockIsIntegrityViolationFromCommandLine)
|
||||
: _console(console), _creator(std::move(console), keyGenerator, localStateDir), _keyProvider(std::move(keyProvider)),
|
||||
CryConfigLoader::CryConfigLoader(RandomGenerator &keyGenerator, unique_ref<CryKeyProvider> keyProvider, LocalStateDir localStateDir, const optional<string> &cipherFromCommandLine, const boost::optional<uint32_t> &blocksizeBytesFromCommandLine, const boost::optional<bool> &missingBlockIsIntegrityViolationFromCommandLine)
|
||||
: _creator(keyGenerator, localStateDir), _keyProvider(std::move(keyProvider)),
|
||||
_cipherFromCommandLine(cipherFromCommandLine), _blocksizeBytesFromCommandLine(blocksizeBytesFromCommandLine),
|
||||
_missingBlockIsIntegrityViolationFromCommandLine(missingBlockIsIntegrityViolationFromCommandLine),
|
||||
_localStateDir(std::move(localStateDir)) {
|
||||
@ -69,14 +68,10 @@ void CryConfigLoader::_checkVersion(const CryConfig &config, bool allowFilesyste
|
||||
throw CryfsException("This filesystem is for CryFS " + config.Version() + ". This format is not supported anymore. Please migrate the file system to a supported version first by opening it with CryFS 0.9.x (x>=4).", ErrorCode::TooOldFilesystemFormat);
|
||||
}
|
||||
if (gitversion::VersionCompare::isOlderThan(CryConfig::FilesystemFormatVersion, config.Version())) {
|
||||
if (!_console->askYesNo("This filesystem is for CryFS " + config.Version() + " or later and should not be opened with older versions. It is strongly recommended to update your CryFS version. However, if you have backed up your base directory and know what you're doing, you can continue trying to load it. Do you want to continue?", false)) {
|
||||
throw CryfsException("This filesystem is for CryFS " + config.Version() + " or later. Please update your CryFS version.", ErrorCode::TooNewFilesystemFormat);
|
||||
}
|
||||
throw CryfsException("This filesystem is for CryFS " + config.Version() + " or later. Please update your CryFS version.", ErrorCode::TooNewFilesystemFormat);
|
||||
}
|
||||
if (!allowFilesystemUpgrade && gitversion::VersionCompare::isOlderThan(config.Version(), CryConfig::FilesystemFormatVersion)) {
|
||||
if (!_console->askYesNo("This filesystem is for CryFS " + config.Version() + " (or a later version with the same storage format). You're running a CryFS version using storage format " + CryConfig::FilesystemFormatVersion + ". It is recommended to create a new filesystem with CryFS 0.10 and copy your files into it. If you don't want to do that, we can also attempt to migrate the existing filesystem, but that can take a long time, you won't be getting some of the performance advantages of the 0.10 release series, and if the migration fails, your data may be lost. If you decide to continue, please make sure you have a backup of your data. Do you want to attempt a migration now?", false)) {
|
||||
throw CryfsException("This filesystem is for CryFS " + config.Version() + " (or a later version with the same storage format). It has to be migrated.", ErrorCode::TooOldFilesystemFormat);
|
||||
}
|
||||
throw CryfsException("This filesystem is for CryFS " + config.Version() + " (or a later version with the same storage format). It has to be migrated.", ErrorCode::TooOldFilesystemFormat);
|
||||
}
|
||||
}
|
||||
|
||||
@ -97,9 +92,7 @@ void CryConfigLoader::_checkMissingBlocksAreIntegrityViolations(CryConfigFile *c
|
||||
// If the file system is set up to treat missing blocks as integrity violations, but we're accessing from a different client, ask whether they want to disable the feature.
|
||||
auto exclusiveClientId = configFile->config()->ExclusiveClientId();
|
||||
if (exclusiveClientId != none && *exclusiveClientId != myClientId) {
|
||||
if (!_console->askYesNo("\nThis filesystem is setup to treat missing blocks as integrity violations and therefore only works in single-client mode. You are trying to access it from a different client.\nDo you want to disable this integrity feature and stop treating missing blocks as integrity violations?\nChoosing yes will not affect the confidentiality of your data, but in future you might not notice if an attacker deletes one of your files.", false)) {
|
||||
throw CryfsException("File system is in single-client mode and can only be used from the client that created it.", ErrorCode::SingleClientFileSystem);
|
||||
}
|
||||
throw CryfsException("File system is in single-client mode and can only be used from the client that created it.", ErrorCode::SingleClientFileSystem);
|
||||
configFile->config()->SetExclusiveClientId(none);
|
||||
configFile->save();
|
||||
}
|
||||
|
@ -15,7 +15,7 @@ namespace cryfs {
|
||||
class CryConfigLoader final {
|
||||
public:
|
||||
// note: keyGenerator generates the inner (i.e. file system) key. keyProvider asks for the password and generates the outer (i.e. config file) key.
|
||||
CryConfigLoader(std::shared_ptr<cpputils::Console> console, cpputils::RandomGenerator &keyGenerator, cpputils::unique_ref<CryKeyProvider> keyProvider, LocalStateDir localStateDir, const boost::optional<std::string> &cipherFromCommandLine, const boost::optional<uint32_t> &blocksizeBytesFromCommandLine, const boost::optional<bool> &missingBlockIsIntegrityViolationFromCommandLine);
|
||||
CryConfigLoader(cpputils::RandomGenerator &keyGenerator, cpputils::unique_ref<CryKeyProvider> keyProvider, LocalStateDir localStateDir, const boost::optional<std::string> &cipherFromCommandLine, const boost::optional<uint32_t> &blocksizeBytesFromCommandLine, const boost::optional<bool> &missingBlockIsIntegrityViolationFromCommandLine);
|
||||
CryConfigLoader(CryConfigLoader &&rhs) = default;
|
||||
|
||||
struct ConfigLoadResult {
|
||||
@ -34,7 +34,6 @@ private:
|
||||
void _checkCipher(const CryConfig &config) const;
|
||||
void _checkMissingBlocksAreIntegrityViolations(CryConfigFile *configFile, uint32_t myClientId);
|
||||
|
||||
std::shared_ptr<cpputils::Console> _console;
|
||||
CryConfigCreator _creator;
|
||||
cpputils::unique_ref<CryKeyProvider> _keyProvider;
|
||||
boost::optional<std::string> _cipherFromCommandLine;
|
||||
|
@ -3,7 +3,6 @@
|
||||
#include "DirBlob.h"
|
||||
#include "SymlinkBlob.h"
|
||||
#include <cryfs/impl/config/CryConfigFile.h>
|
||||
#include <cpp-utils/io/ProgressBar.h>
|
||||
#include <cpp-utils/process/SignalCatcher.h>
|
||||
|
||||
using cpputils::unique_ref;
|
||||
@ -46,10 +45,8 @@ boost::optional<unique_ref<FsBlob>> FsBlobStore::load(const blockstore::BlockId
|
||||
auto fsBlobStore = make_unique_ref<FsBlobStore>(std::move(blobStore));
|
||||
|
||||
uint64_t migratedBlocks = 0;
|
||||
cpputils::ProgressBar progressbar("Migrating file system for conflict resolution features. This can take a while...", fsBlobStore->numBlocks());
|
||||
fsBlobStore->_migrate(std::move(*rootBlob), blockstore::BlockId::Null(), &signalCatcher, [&] (uint32_t numNodes) {
|
||||
migratedBlocks += numNodes;
|
||||
progressbar.update(migratedBlocks);
|
||||
});
|
||||
|
||||
return fsBlobStore;
|
||||
|
@ -8,8 +8,8 @@ set(SOURCES
|
||||
|
||||
add_library(${PROJECT_NAME} STATIC ${SOURCES})
|
||||
|
||||
target_compile_definitions(${PROJECT_NAME} PUBLIC _FILE_OFFSET_BITS=64)
|
||||
target_link_libraries(${PROJECT_NAME} PUBLIC cpp-utils fspp-interface)
|
||||
#target_compile_definitions(${PROJECT_NAME} PUBLIC _FILE_OFFSET_BITS=64)
|
||||
target_link_libraries(${PROJECT_NAME} PUBLIC cpp-utils fspp-interface cryfs-cli)
|
||||
|
||||
target_add_boost(${PROJECT_NAME})
|
||||
target_enable_style_warnings(${PROJECT_NAME})
|
||||
@ -34,11 +34,6 @@ if(${CMAKE_SYSTEM_NAME} MATCHES "Windows")
|
||||
install(FILES "${DOKAN_LIB_PATH}/dokan1.dll" "${DOKAN_LIB_PATH}/dokanfuse1.dll"
|
||||
DESTINATION "${CMAKE_INSTALL_BINDIR}"
|
||||
)
|
||||
|
||||
else() # Linux and macOS
|
||||
find_package(PkgConfig REQUIRED)
|
||||
pkg_check_modules(Fuse REQUIRED IMPORTED_TARGET fuse)
|
||||
target_link_libraries(${PROJECT_NAME} PUBLIC PkgConfig::Fuse)
|
||||
endif()
|
||||
|
||||
if(${CMAKE_SYSTEM_NAME} MATCHES "Darwin")
|
||||
|
@ -19,11 +19,6 @@
|
||||
#include <codecvt>
|
||||
#include <boost/algorithm/string/replace.hpp>
|
||||
|
||||
#include <range/v3/view/split.hpp>
|
||||
#include <range/v3/view/join.hpp>
|
||||
#include <range/v3/view/filter.hpp>
|
||||
#include <range/v3/range/conversion.hpp>
|
||||
|
||||
#if defined(_MSC_VER)
|
||||
#include <dokan/dokan.h>
|
||||
#endif
|
||||
@ -60,199 +55,9 @@ public:
|
||||
};
|
||||
}
|
||||
|
||||
#define FUSE_OBJ (static_cast<Fuse *>(fuse_get_context()->private_data))
|
||||
|
||||
// Remove the following line, if you don't want to output each fuse operation on the console
|
||||
//#define FSPP_LOG 1
|
||||
|
||||
namespace {
|
||||
int fusepp_getattr(const char *path, fspp::fuse::STAT *stbuf) {
|
||||
int rs = FUSE_OBJ->getattr(bf::path(path), stbuf);
|
||||
return rs;
|
||||
}
|
||||
|
||||
int fusepp_fgetattr(const char *path, fspp::fuse::STAT *stbuf, fuse_file_info *fileinfo) {
|
||||
return FUSE_OBJ->fgetattr(bf::path(path), stbuf, fileinfo);
|
||||
}
|
||||
|
||||
int fusepp_readlink(const char *path, char *buf, size_t size) {
|
||||
return FUSE_OBJ->readlink(bf::path(path), buf, size);
|
||||
}
|
||||
|
||||
int fusepp_mknod(const char *path, ::mode_t mode, dev_t rdev) {
|
||||
return FUSE_OBJ->mknod(bf::path(path), mode, rdev);
|
||||
}
|
||||
|
||||
int fusepp_mkdir(const char *path, ::mode_t mode) {
|
||||
return FUSE_OBJ->mkdir(bf::path(path), mode);
|
||||
}
|
||||
|
||||
int fusepp_unlink(const char *path) {
|
||||
return FUSE_OBJ->unlink(bf::path(path));
|
||||
}
|
||||
|
||||
int fusepp_rmdir(const char *path) {
|
||||
return FUSE_OBJ->rmdir(bf::path(path));
|
||||
}
|
||||
|
||||
int fusepp_symlink(const char *to, const char *from) {
|
||||
return FUSE_OBJ->symlink(bf::path(to), bf::path(from));
|
||||
}
|
||||
|
||||
int fusepp_rename(const char *from, const char *to) {
|
||||
return FUSE_OBJ->rename(bf::path(from), bf::path(to));
|
||||
}
|
||||
|
||||
int fusepp_link(const char *from, const char *to) {
|
||||
return FUSE_OBJ->link(bf::path(from), bf::path(to));
|
||||
}
|
||||
|
||||
int fusepp_chmod(const char *path, ::mode_t mode) {
|
||||
return FUSE_OBJ->chmod(bf::path(path), mode);
|
||||
}
|
||||
|
||||
int fusepp_chown(const char *path, ::uid_t uid, ::gid_t gid) {
|
||||
return FUSE_OBJ->chown(bf::path(path), uid, gid);
|
||||
}
|
||||
|
||||
int fusepp_truncate(const char *path, int64_t size) {
|
||||
return FUSE_OBJ->truncate(bf::path(path), size);
|
||||
}
|
||||
|
||||
int fusepp_ftruncate(const char *path, int64_t size, fuse_file_info *fileinfo) {
|
||||
return FUSE_OBJ->ftruncate(bf::path(path), size, fileinfo);
|
||||
}
|
||||
|
||||
int fusepp_utimens(const char *path, const timespec times[2]) { // NOLINT(cppcoreguidelines-avoid-c-arrays)
|
||||
return FUSE_OBJ->utimens(bf::path(path), {times[0], times[1]});
|
||||
}
|
||||
|
||||
int fusepp_open(const char *path, fuse_file_info *fileinfo) {
|
||||
return FUSE_OBJ->open(bf::path(path), fileinfo);
|
||||
}
|
||||
|
||||
int fusepp_release(const char *path, fuse_file_info *fileinfo) {
|
||||
return FUSE_OBJ->release(bf::path(path), fileinfo);
|
||||
}
|
||||
|
||||
int fusepp_read(const char *path, char *buf, size_t size, int64_t offset, fuse_file_info *fileinfo) {
|
||||
return FUSE_OBJ->read(bf::path(path), buf, size, offset, fileinfo);
|
||||
}
|
||||
|
||||
int fusepp_write(const char *path, const char *buf, size_t size, int64_t offset, fuse_file_info *fileinfo) {
|
||||
return FUSE_OBJ->write(bf::path(path), buf, size, offset, fileinfo);
|
||||
}
|
||||
|
||||
int fusepp_statfs(const char *path, struct statvfs *fsstat) {
|
||||
return FUSE_OBJ->statfs(bf::path(path), fsstat);
|
||||
}
|
||||
|
||||
int fusepp_flush(const char *path, fuse_file_info *fileinfo) {
|
||||
return FUSE_OBJ->flush(bf::path(path), fileinfo);
|
||||
}
|
||||
|
||||
int fusepp_fsync(const char *path, int datasync, fuse_file_info *fileinfo) {
|
||||
return FUSE_OBJ->fsync(bf::path(path), datasync, fileinfo);
|
||||
}
|
||||
|
||||
//int fusepp_setxattr(const char*, const char*, const char*, size_t, int)
|
||||
//int fusepp_getxattr(const char*, const char*, char*, size_t)
|
||||
//int fusepp_listxattr(const char*, char*, size_t)
|
||||
//int fusepp_removexattr(const char*, const char*)
|
||||
|
||||
int fusepp_opendir(const char *path, fuse_file_info *fileinfo) {
|
||||
return FUSE_OBJ->opendir(bf::path(path), fileinfo);
|
||||
}
|
||||
|
||||
int fusepp_readdir(const char *path, void *buf, fuse_fill_dir_t filler, int64_t offset, fuse_file_info *fileinfo) {
|
||||
return FUSE_OBJ->readdir(bf::path(path), buf, filler, offset, fileinfo);
|
||||
}
|
||||
|
||||
int fusepp_releasedir(const char *path, fuse_file_info *fileinfo) {
|
||||
return FUSE_OBJ->releasedir(bf::path(path), fileinfo);
|
||||
}
|
||||
|
||||
int fusepp_fsyncdir(const char *path, int datasync, fuse_file_info *fileinfo) {
|
||||
return FUSE_OBJ->fsyncdir(bf::path(path), datasync, fileinfo);
|
||||
}
|
||||
|
||||
void* fusepp_init(fuse_conn_info *conn) {
|
||||
auto f = FUSE_OBJ;
|
||||
f->init(conn);
|
||||
return f;
|
||||
}
|
||||
|
||||
void fusepp_destroy(void *userdata) {
|
||||
auto f = FUSE_OBJ;
|
||||
ASSERT(userdata == f, "Wrong userdata set");
|
||||
UNUSED(userdata); //In case the assert is disabled
|
||||
f->destroy();
|
||||
}
|
||||
|
||||
int fusepp_access(const char *path, int mask) {
|
||||
return FUSE_OBJ->access(bf::path(path), mask);
|
||||
}
|
||||
|
||||
int fusepp_create(const char *path, ::mode_t mode, fuse_file_info *fileinfo) {
|
||||
return FUSE_OBJ->create(bf::path(path), mode, fileinfo);
|
||||
}
|
||||
|
||||
/*int fusepp_lock(const char*, fuse_file_info*, int cmd, flock*)
|
||||
int fusepp_bmap(const char*, size_t blocksize, uint64_t *idx)
|
||||
int fusepp_ioctl(const char*, int cmd, void *arg, fuse_file_info*, unsigned int flags, void *data)
|
||||
int fusepp_poll(const char*, fuse_file_info*, fuse_pollhandle *ph, unsigned *reventsp)
|
||||
int fusepp_write_buf(const char*, fuse_bufvec *buf, int64_t off, fuse_file_info*)
|
||||
int fusepp_read_buf(const chas*, struct fuse_bufvec **bufp, size_t size, int64_t off, fuse_file_info*)
|
||||
int fusepp_flock(const char*, fuse_file_info*, int op)
|
||||
int fusepp_fallocate(const char*, int, int64_t, int64_t, fuse_file_info*)*/
|
||||
|
||||
fuse_operations *operations() {
|
||||
static std::unique_ptr<fuse_operations> singleton(nullptr);
|
||||
|
||||
if (!singleton) {
|
||||
singleton = std::make_unique<fuse_operations>();
|
||||
singleton->getattr = &fusepp_getattr;
|
||||
singleton->fgetattr = &fusepp_fgetattr;
|
||||
singleton->readlink = &fusepp_readlink;
|
||||
singleton->mknod = &fusepp_mknod;
|
||||
singleton->mkdir = &fusepp_mkdir;
|
||||
singleton->unlink = &fusepp_unlink;
|
||||
singleton->rmdir = &fusepp_rmdir;
|
||||
singleton->symlink = &fusepp_symlink;
|
||||
singleton->rename = &fusepp_rename;
|
||||
singleton->link = &fusepp_link;
|
||||
singleton->chmod = &fusepp_chmod;
|
||||
singleton->chown = &fusepp_chown;
|
||||
singleton->truncate = &fusepp_truncate;
|
||||
singleton->utimens = &fusepp_utimens;
|
||||
singleton->open = &fusepp_open;
|
||||
singleton->read = &fusepp_read;
|
||||
singleton->write = &fusepp_write;
|
||||
singleton->statfs = &fusepp_statfs;
|
||||
singleton->flush = &fusepp_flush;
|
||||
singleton->release = &fusepp_release;
|
||||
singleton->fsync = &fusepp_fsync;
|
||||
/*#ifdef HAVE_SYS_XATTR_H
|
||||
singleton->setxattr = &fusepp_setxattr;
|
||||
singleton->getxattr = &fusepp_getxattr;
|
||||
singleton->listxattr = &fusepp_listxattr;
|
||||
singleton->removexattr = &fusepp_removexattr;
|
||||
#endif*/
|
||||
singleton->opendir = &fusepp_opendir;
|
||||
singleton->readdir = &fusepp_readdir;
|
||||
singleton->releasedir = &fusepp_releasedir;
|
||||
singleton->fsyncdir = &fusepp_fsyncdir;
|
||||
singleton->init = &fusepp_init;
|
||||
singleton->destroy = &fusepp_destroy;
|
||||
singleton->access = &fusepp_access;
|
||||
singleton->create = &fusepp_create;
|
||||
singleton->ftruncate = &fusepp_ftruncate;
|
||||
}
|
||||
|
||||
return singleton.get();
|
||||
}
|
||||
}
|
||||
|
||||
Fuse::~Fuse() {
|
||||
for(char *arg : _argv) {
|
||||
delete[] arg;
|
||||
@ -261,10 +66,9 @@ Fuse::~Fuse() {
|
||||
_argv.clear();
|
||||
}
|
||||
|
||||
Fuse::Fuse(std::function<shared_ptr<Filesystem> (Fuse *fuse)> init, std::function<void()> onMounted, std::string fstype, boost::optional<std::string> fsname)
|
||||
:_init(std::move(init)), _onMounted(std::move(onMounted)), _fs(make_shared<InvalidFilesystem>()), _mountdir(), _running(false), _fstype(std::move(fstype)), _fsname(std::move(fsname)) {
|
||||
Fuse::Fuse(std::function<shared_ptr<Filesystem> ()> init, std::string fstype, boost::optional<std::string> fsname)
|
||||
:_init(std::move(init)), _fs(make_shared<InvalidFilesystem>()), _running(false), _fstype(std::move(fstype)), _fsname(std::move(fsname)) {
|
||||
ASSERT(static_cast<bool>(_init), "Invalid init given");
|
||||
ASSERT(static_cast<bool>(_onMounted), "Invalid onMounted given");
|
||||
}
|
||||
|
||||
void Fuse::_logException(const std::exception &e) {
|
||||
@ -275,21 +79,6 @@ void Fuse::_logUnknownException() {
|
||||
LOG(ERR, "Unknown exception thrown");
|
||||
}
|
||||
|
||||
void Fuse::runInForeground(const bf::path &mountdir, vector<string> fuseOptions) {
|
||||
vector<string> realFuseOptions = std::move(fuseOptions);
|
||||
if (std::find(realFuseOptions.begin(), realFuseOptions.end(), "-f") == realFuseOptions.end()) {
|
||||
realFuseOptions.push_back("-f");
|
||||
}
|
||||
_run(mountdir, std::move(realFuseOptions));
|
||||
}
|
||||
|
||||
void Fuse::runInBackground(const bf::path &mountdir, vector<string> fuseOptions) {
|
||||
vector<string> realFuseOptions = std::move(fuseOptions);
|
||||
_removeAndWarnIfExists(&realFuseOptions, "-f");
|
||||
_removeAndWarnIfExists(&realFuseOptions, "-d");
|
||||
_run(mountdir, std::move(realFuseOptions));
|
||||
}
|
||||
|
||||
void Fuse::_removeAndWarnIfExists(vector<string> *fuseOptions, const std::string &option) {
|
||||
auto found = std::find(fuseOptions->begin(), fuseOptions->end(), option);
|
||||
if (found != fuseOptions->end()) {
|
||||
@ -301,202 +90,10 @@ void Fuse::_removeAndWarnIfExists(vector<string> *fuseOptions, const std::string
|
||||
}
|
||||
}
|
||||
|
||||
namespace {
|
||||
void extractAllAtimeOptionsAndRemoveOnesUnknownToLibfuse_(string* csv_options, vector<string>* result) {
|
||||
const auto is_fuse_supported_atime_flag = [] (const std::string& flag) {
|
||||
constexpr std::array<const char*, 2> flags = {"noatime", "atime"};
|
||||
return flags.end() != std::find(flags.begin(), flags.end(), flag);
|
||||
};
|
||||
const auto is_fuse_unsupported_atime_flag = [] (const std::string& flag) {
|
||||
constexpr std::array<const char*, 3> flags = {"strictatime", "relatime", "nodiratime"};
|
||||
return flags.end() != std::find(flags.begin(), flags.end(), flag);
|
||||
};
|
||||
*csv_options = ranges::make_subrange(csv_options->begin(), csv_options->end())
|
||||
| ranges::views::split(',')
|
||||
| ranges::views::filter(
|
||||
[&] (auto&& elem_) {
|
||||
// TODO string_view would be better
|
||||
std::string elem(&*elem_.begin(), ranges::distance(elem_));
|
||||
if (is_fuse_unsupported_atime_flag(elem)) {
|
||||
result->push_back(elem);
|
||||
return false;
|
||||
}
|
||||
if (is_fuse_supported_atime_flag(elem)) {
|
||||
result->push_back(elem);
|
||||
}
|
||||
return true;
|
||||
})
|
||||
| ranges::views::join(',')
|
||||
| ranges::to<string>();
|
||||
}
|
||||
|
||||
// Return a list of all atime options (e.g. atime, noatime, relatime, strictatime, nodiratime) that occur in the
|
||||
// fuseOptions input. They must be preceded by a '-o', i.e. {..., '-o', 'noatime', ...} and multiple ones can be
|
||||
// csv-concatenated, i.e. {..., '-o', 'atime,nodiratime', ...}.
|
||||
// Also, this function removes all of these atime options that are unknown to libfuse (i.e. all except atime and noatime)
|
||||
// from the input fuseOptions so we can pass it on to libfuse without crashing.
|
||||
vector<string> extractAllAtimeOptionsAndRemoveOnesUnknownToLibfuse_(vector<string>* fuseOptions) {
|
||||
vector<string> result;
|
||||
bool lastOptionWasDashO = false;
|
||||
for (string& option : *fuseOptions) {
|
||||
if (lastOptionWasDashO) {
|
||||
extractAllAtimeOptionsAndRemoveOnesUnknownToLibfuse_(&option, &result);
|
||||
}
|
||||
lastOptionWasDashO = (option == "-o");
|
||||
}
|
||||
return result;
|
||||
}
|
||||
}
|
||||
|
||||
void Fuse::_run(const bf::path &mountdir, vector<string> fuseOptions) {
|
||||
#if defined(__GLIBC__)|| defined(__APPLE__) || defined(_MSC_VER)
|
||||
// Avoid encoding errors for non-utf8 characters, see https://github.com/cryfs/cryfs/issues/247
|
||||
// this is ifdef'd out for non-glibc linux, because musl doesn't handle this correctly.
|
||||
bf::path::imbue(std::locale(std::locale(), new std::codecvt_utf8_utf16<wchar_t>()));
|
||||
#endif
|
||||
|
||||
_mountdir = mountdir;
|
||||
|
||||
ASSERT(_argv.size() == 0, "Filesystem already started");
|
||||
|
||||
vector<string> atimeOptions = extractAllAtimeOptionsAndRemoveOnesUnknownToLibfuse_(&fuseOptions);
|
||||
_createContext(atimeOptions);
|
||||
|
||||
_argv = _build_argv(mountdir, fuseOptions);
|
||||
|
||||
fuse_main(_argv.size(), _argv.data(), operations(), this);
|
||||
}
|
||||
|
||||
void Fuse::_createContext(const vector<string> &fuseOptions) {
|
||||
const bool has_atime_flag = fuseOptions.end() != std::find(fuseOptions.begin(), fuseOptions.end(), "atime");
|
||||
const bool has_noatime_flag = fuseOptions.end() != std::find(fuseOptions.begin(), fuseOptions.end(), "noatime");
|
||||
const bool has_relatime_flag = fuseOptions.end() != std::find(fuseOptions.begin(), fuseOptions.end(), "relatime");
|
||||
const bool has_strictatime_flag = fuseOptions.end() != std::find(fuseOptions.begin(), fuseOptions.end(), "strictatime");
|
||||
const bool has_nodiratime_flag = fuseOptions.end() != std::find(fuseOptions.begin(), fuseOptions.end(), "nodiratime");
|
||||
|
||||
// Default is NOATIME, this reduces the probability for synchronization conflicts
|
||||
_context = Context(noatime());
|
||||
|
||||
if (has_noatime_flag) {
|
||||
ASSERT(!has_atime_flag, "Cannot have both, noatime and atime flags set.");
|
||||
ASSERT(!has_relatime_flag, "Cannot have both, noatime and relatime flags set.");
|
||||
ASSERT(!has_strictatime_flag, "Cannot have both, noatime and strictatime flags set.");
|
||||
// note: can have nodiratime flag set but that is ignored because it is already included in the noatime policy.
|
||||
_context->setTimestampUpdateBehavior(noatime());
|
||||
} else if (has_relatime_flag) {
|
||||
// note: can have atime and relatime both set, they're identical
|
||||
ASSERT(!has_noatime_flag, "This shouldn't happen, or we would have hit a case above.");
|
||||
ASSERT(!has_strictatime_flag, "Cannot have both, relatime and strictatime flags set.");
|
||||
if (has_nodiratime_flag) {
|
||||
_context->setTimestampUpdateBehavior(nodiratime_relatime());
|
||||
} else {
|
||||
_context->setTimestampUpdateBehavior(relatime());
|
||||
}
|
||||
} else if (has_atime_flag) {
|
||||
// note: can have atime and relatime both set, they're identical
|
||||
ASSERT(!has_noatime_flag, "This shouldn't happen, or we would have hit a case above");
|
||||
ASSERT(!has_strictatime_flag, "Cannot have both, atime and strictatime flags set.");
|
||||
if (has_nodiratime_flag) {
|
||||
_context->setTimestampUpdateBehavior(nodiratime_relatime());
|
||||
} else {
|
||||
_context->setTimestampUpdateBehavior(relatime());
|
||||
}
|
||||
} else if (has_strictatime_flag) {
|
||||
ASSERT(!has_noatime_flag, "This shouldn't happen, or we would have hit a case above");
|
||||
ASSERT(!has_atime_flag, "This shouldn't happen, or we would have hit a case above");
|
||||
ASSERT(!has_relatime_flag, "This shouldn't happen, or we would have hit a case above");
|
||||
if (has_nodiratime_flag) {
|
||||
_context->setTimestampUpdateBehavior(nodiratime_strictatime());
|
||||
} else {
|
||||
_context->setTimestampUpdateBehavior(strictatime());
|
||||
}
|
||||
} else if (has_nodiratime_flag) {
|
||||
ASSERT(!has_noatime_flag, "This shouldn't happen, or we would have hit a case above");
|
||||
ASSERT(!has_atime_flag, "This shouldn't happen, or we would have hit a case above");
|
||||
ASSERT(!has_relatime_flag, "This shouldn't happen, or we would have hit a case above");
|
||||
ASSERT(!has_strictatime_flag, "This shouldn't happen, or we would have hit a case above");
|
||||
_context->setTimestampUpdateBehavior(noatime()); // use noatime by default
|
||||
}
|
||||
}
|
||||
|
||||
vector<char *> Fuse::_build_argv(const bf::path &mountdir, const vector<string> &fuseOptions) {
|
||||
vector<char *> argv;
|
||||
argv.reserve(6 + fuseOptions.size()); // fuseOptions + executable name + mountdir + 2x fuse options (subtype, fsname), each taking 2 entries ("-o", "key=value").
|
||||
argv.push_back(_create_c_string(_fstype)); // The first argument (executable name) is the file system type
|
||||
argv.push_back(_create_c_string(mountdir.string())); // The second argument is the mountdir
|
||||
for (const string &option : fuseOptions) {
|
||||
argv.push_back(_create_c_string(option));
|
||||
}
|
||||
_add_fuse_option_if_not_exists(&argv, "subtype", _fstype);
|
||||
auto fsname = _fsname.get_value_or(_fstype);
|
||||
boost::replace_all(fsname, ",", "\\,"); // Avoid fuse options parser bug where a comma in the fsname is misinterpreted as an options delimiter, see https://github.com/cryfs/cryfs/issues/326
|
||||
_add_fuse_option_if_not_exists(&argv, "fsname", fsname);
|
||||
#ifdef __APPLE__
|
||||
// Make volume name default to mountdir on macOS
|
||||
_add_fuse_option_if_not_exists(&argv, "volname", mountdir.filename().string());
|
||||
#endif
|
||||
// TODO Also set read/write size for macFUSE. The options there are called differently.
|
||||
// large_read not necessary because reads are large anyhow. This option is only important for 2.4.
|
||||
//argv.push_back(_create_c_string("-o"));
|
||||
//argv.push_back(_create_c_string("large_read"));
|
||||
argv.push_back(_create_c_string("-o"));
|
||||
argv.push_back(_create_c_string("big_writes"));
|
||||
return argv;
|
||||
}
|
||||
|
||||
void Fuse::_add_fuse_option_if_not_exists(vector<char *> *argv, const string &key, const string &value) {
|
||||
if(!_has_option(*argv, key)) {
|
||||
argv->push_back(_create_c_string("-o"));
|
||||
argv->push_back(_create_c_string(key + "=" + value));
|
||||
}
|
||||
}
|
||||
|
||||
bool Fuse::_has_option(const vector<char *> &vec, const string &key) {
|
||||
// The fuse option can either be present as "-okey=value" or as "-o key=value", we have to check both.
|
||||
return _has_entry_with_prefix(key + "=", vec) || _has_entry_with_prefix("-o" + key + "=", vec);
|
||||
}
|
||||
|
||||
bool Fuse::_has_entry_with_prefix(const string &prefix, const vector<char *> &vec) {
|
||||
auto found = std::find_if(vec.begin(), vec.end(), [&prefix](const char *entry) {
|
||||
return 0 == std::strncmp(prefix.c_str(), entry, prefix.size());
|
||||
});
|
||||
return found != vec.end();
|
||||
}
|
||||
|
||||
char *Fuse::_create_c_string(const string &str) {
|
||||
// The memory allocated here is destroyed in the destructor of the Fuse class.
|
||||
char *c_str = new char[str.size()+1];
|
||||
std::memcpy(c_str, str.c_str(), str.size()+1);
|
||||
return c_str;
|
||||
}
|
||||
|
||||
bool Fuse::running() const {
|
||||
return _running;
|
||||
}
|
||||
|
||||
void Fuse::stop() {
|
||||
unmount(_mountdir, false);
|
||||
}
|
||||
|
||||
void Fuse::unmount(const bf::path& mountdir, bool force) {
|
||||
//TODO Find better way to unmount (i.e. don't use external fusermount). Unmounting by kill(getpid(), SIGINT) worked, but left the mount directory transport endpoint as not connected.
|
||||
#if defined(__APPLE__)
|
||||
UNUSED(force);
|
||||
int returncode = cpputils::Subprocess::call("umount", {mountdir.string()}, "").exitcode;
|
||||
#elif defined(_MSC_VER)
|
||||
UNUSED(force);
|
||||
std::wstring mountdir_ = std::wstring_convert<std::codecvt_utf8_utf16<wchar_t>>().from_bytes(mountdir.string());
|
||||
BOOL success = DokanRemoveMountPoint(mountdir_.c_str());
|
||||
int returncode = success ? 0 : -1;
|
||||
#else
|
||||
std::vector<std::string> args = force ? std::vector<std::string>({"-u", mountdir.string()}) : std::vector<std::string>({"-u", "-z", mountdir.string()}); // "-z" takes care that if the filesystem can't be unmounted right now because something is opened, it will be unmounted as soon as it can be.
|
||||
int returncode = cpputils::Subprocess::call("fusermount", args, "").exitcode;
|
||||
#endif
|
||||
if (returncode != 0) {
|
||||
throw std::runtime_error("Could not unmount filesystem");
|
||||
}
|
||||
}
|
||||
|
||||
int Fuse::getattr(const bf::path &path, fspp::fuse::STAT *stbuf) {
|
||||
ThreadNameForDebugging _threadName("getattr");
|
||||
#ifdef FSPP_LOG
|
||||
@ -526,7 +123,7 @@ int Fuse::getattr(const bf::path &path, fspp::fuse::STAT *stbuf) {
|
||||
}
|
||||
}
|
||||
|
||||
int Fuse::fgetattr(const bf::path &path, fspp::fuse::STAT *stbuf, fuse_file_info *fileinfo) {
|
||||
int Fuse::fgetattr(const bf::path &path, fspp::fuse::STAT *stbuf, uint64_t fh) {
|
||||
ThreadNameForDebugging _threadName("fgetattr");
|
||||
#ifdef FSPP_LOG
|
||||
LOG(DEBUG, "fgetattr({}, _, _)", path);
|
||||
@ -547,7 +144,7 @@ int Fuse::fgetattr(const bf::path &path, fspp::fuse::STAT *stbuf, fuse_file_info
|
||||
|
||||
try {
|
||||
ASSERT(is_valid_fspp_path(path), "has to be an absolute path");
|
||||
_fs->fstat(fileinfo->fh, stbuf);
|
||||
_fs->fstat(fh, stbuf);
|
||||
#ifdef FSPP_LOG
|
||||
LOG(DEBUG, "fgetattr({}, _, _): success", path);
|
||||
#endif
|
||||
@ -598,15 +195,6 @@ int Fuse::readlink(const bf::path &path, char *buf, size_t size) {
|
||||
}
|
||||
}
|
||||
|
||||
int Fuse::mknod(const bf::path &path, ::mode_t mode, dev_t rdev) {
|
||||
UNUSED(rdev);
|
||||
UNUSED(mode);
|
||||
UNUSED(path);
|
||||
ThreadNameForDebugging _threadName("mknod");
|
||||
LOG(WARN, "Called non-implemented mknod({}, {}, _)", path, mode);
|
||||
return ENOSYS;
|
||||
}
|
||||
|
||||
int Fuse::mkdir(const bf::path &path, ::mode_t mode) {
|
||||
ThreadNameForDebugging _threadName("mkdir");
|
||||
#ifdef FSPP_LOG
|
||||
@ -622,8 +210,7 @@ int Fuse::mkdir(const bf::path &path, ::mode_t mode) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
auto context = fuse_get_context();
|
||||
_fs->mkdir(path, mode, context->uid, context->gid);
|
||||
_fs->mkdir(path, mode, uid, gid);
|
||||
#ifdef FSPP_LOG
|
||||
LOG(DEBUG, "mkdir({}, {}): success", path, mode);
|
||||
#endif
|
||||
@ -710,8 +297,7 @@ int Fuse::symlink(const bf::path &to, const bf::path &from) {
|
||||
#endif
|
||||
try {
|
||||
ASSERT(is_valid_fspp_path(from), "has to be an absolute path");
|
||||
auto context = fuse_get_context();
|
||||
_fs->createSymlink(to, from, context->uid, context->gid);
|
||||
_fs->createSymlink(to, from, uid, gid);
|
||||
#ifdef FSPP_LOG
|
||||
LOG(DEBUG, "symlink({}, {}): success", to, from);
|
||||
#endif
|
||||
@ -861,16 +447,15 @@ int Fuse::truncate(const bf::path &path, int64_t size) {
|
||||
}
|
||||
}
|
||||
|
||||
int Fuse::ftruncate(const bf::path &path, int64_t size, fuse_file_info *fileinfo) {
|
||||
int Fuse::ftruncate(int64_t size, uint64_t fh) {
|
||||
ThreadNameForDebugging _threadName("ftruncate");
|
||||
#ifdef FSPP_LOG
|
||||
LOG(DEBUG, "ftruncate({}, {})", path, size);
|
||||
LOG(DEBUG, "ftruncate({}, {})", fh, size);
|
||||
#endif
|
||||
UNUSED(path);
|
||||
try {
|
||||
_fs->ftruncate(fileinfo->fh, fspp::num_bytes_t(size));
|
||||
_fs->ftruncate(fh, fspp::num_bytes_t(size));
|
||||
#ifdef FSPP_LOG
|
||||
LOG(DEBUG, "ftruncate({}, {}): success", path, size);
|
||||
LOG(DEBUG, "ftruncate({}, {}): success", fh, size);
|
||||
#endif
|
||||
return 0;
|
||||
} catch(const cpputils::AssertFailed &e) {
|
||||
@ -878,7 +463,7 @@ int Fuse::ftruncate(const bf::path &path, int64_t size, fuse_file_info *fileinfo
|
||||
return -EIO;
|
||||
} catch (FuseErrnoException &e) {
|
||||
#ifdef FSPP_LOG
|
||||
LOG(WARN, "ftruncate({}, {}): failed with errno {}", path, size, e.getErrno());
|
||||
LOG(WARN, "ftruncate({}, {}): failed with errno {}", fh, size, e.getErrno());
|
||||
#endif
|
||||
return -e.getErrno();
|
||||
} catch(const std::exception &e) {
|
||||
@ -890,14 +475,14 @@ int Fuse::ftruncate(const bf::path &path, int64_t size, fuse_file_info *fileinfo
|
||||
}
|
||||
}
|
||||
|
||||
int Fuse::utimens(const bf::path &path, const std::array<timespec, 2> times) {
|
||||
int Fuse::utimens(const bf::path &path, const timespec lastAccessTime, const timespec lastModificationTime) {
|
||||
ThreadNameForDebugging _threadName("utimens");
|
||||
#ifdef FSPP_LOG
|
||||
LOG(DEBUG, "utimens({}, _)", path);
|
||||
#endif
|
||||
try {
|
||||
ASSERT(is_valid_fspp_path(path), "has to be an absolute path");
|
||||
_fs->utimens(path, times[0], times[1]);
|
||||
_fs->utimens(path, lastAccessTime, lastModificationTime);
|
||||
#ifdef FSPP_LOG
|
||||
LOG(DEBUG, "utimens({}, _): success", path);
|
||||
#endif
|
||||
@ -919,14 +504,14 @@ int Fuse::utimens(const bf::path &path, const std::array<timespec, 2> times) {
|
||||
}
|
||||
}
|
||||
|
||||
int Fuse::open(const bf::path &path, fuse_file_info *fileinfo) {
|
||||
int Fuse::open(const bf::path &path, uint64_t* fh, int flags) {
|
||||
ThreadNameForDebugging _threadName("open");
|
||||
#ifdef FSPP_LOG
|
||||
LOG(DEBUG, "open({}, _)", path);
|
||||
#endif
|
||||
try {
|
||||
ASSERT(is_valid_fspp_path(path), "has to be an absolute path");
|
||||
fileinfo->fh = _fs->openFile(path, fileinfo->flags);
|
||||
*fh = _fs->openFile(path, flags);
|
||||
#ifdef FSPP_LOG
|
||||
LOG(DEBUG, "open({}, _): success", path);
|
||||
#endif
|
||||
@ -948,16 +533,15 @@ int Fuse::open(const bf::path &path, fuse_file_info *fileinfo) {
|
||||
}
|
||||
}
|
||||
|
||||
int Fuse::release(const bf::path &path, fuse_file_info *fileinfo) {
|
||||
int Fuse::release(uint64_t fh) {
|
||||
ThreadNameForDebugging _threadName("release");
|
||||
#ifdef FSPP_LOG
|
||||
LOG(DEBUG, "release({}, _)", path);
|
||||
LOG(DEBUG, "release({}, _)", fh);
|
||||
#endif
|
||||
UNUSED(path);
|
||||
try {
|
||||
_fs->closeFile(fileinfo->fh);
|
||||
_fs->closeFile(fh);
|
||||
#ifdef FSPP_LOG
|
||||
LOG(DEBUG, "release({}, _): success", path);
|
||||
LOG(DEBUG, "release({}, _): success", fh);
|
||||
#endif
|
||||
return 0;
|
||||
} catch(const cpputils::AssertFailed &e) {
|
||||
@ -965,7 +549,7 @@ int Fuse::release(const bf::path &path, fuse_file_info *fileinfo) {
|
||||
return -EIO;
|
||||
} catch (FuseErrnoException &e) {
|
||||
#ifdef FSPP_LOG
|
||||
LOG(WARN, "release({}, _): failed with errno {}", path, e.getErrno());
|
||||
LOG(WARN, "release({}, _): failed with errno {}", fh, e.getErrno());
|
||||
#endif
|
||||
return -e.getErrno();
|
||||
} catch(const std::exception &e) {
|
||||
@ -977,16 +561,15 @@ int Fuse::release(const bf::path &path, fuse_file_info *fileinfo) {
|
||||
}
|
||||
}
|
||||
|
||||
int Fuse::read(const bf::path &path, char *buf, size_t size, int64_t offset, fuse_file_info *fileinfo) {
|
||||
int Fuse::read(char *buf, size_t size, int64_t offset, uint64_t fh) {
|
||||
ThreadNameForDebugging _threadName("read");
|
||||
#ifdef FSPP_LOG
|
||||
LOG(DEBUG, "read({}, _, {}, {}, _)", path, size, offset);
|
||||
LOG(DEBUG, "read({}, _, {}, {}, _)", fh, size, offset);
|
||||
#endif
|
||||
UNUSED(path);
|
||||
try {
|
||||
int result = _fs->read(fileinfo->fh, buf, fspp::num_bytes_t(size), fspp::num_bytes_t(offset)).value();
|
||||
int result = _fs->read(fh, buf, fspp::num_bytes_t(size), fspp::num_bytes_t(offset)).value();
|
||||
#ifdef FSPP_LOG
|
||||
LOG(DEBUG, "read({}, _, {}, {}, _): success with {}", path, size, offset, result);
|
||||
LOG(DEBUG, "read({}, _, {}, {}, _): success with {}", fh, size, offset, result);
|
||||
#endif
|
||||
return result;
|
||||
} catch(const cpputils::AssertFailed &e) {
|
||||
@ -994,7 +577,7 @@ int Fuse::read(const bf::path &path, char *buf, size_t size, int64_t offset, fus
|
||||
return -EIO;
|
||||
} catch (FuseErrnoException &e) {
|
||||
#ifdef FSPP_LOG
|
||||
LOG(WARN, "read({}, _, {}, {}, _): failed with errno {}", path, size, offset, e.getErrno());
|
||||
LOG(WARN, "read({}, _, {}, {}, _): failed with errno {}", fh, size, offset, e.getErrno());
|
||||
#endif
|
||||
return -e.getErrno();
|
||||
} catch(const std::exception &e) {
|
||||
@ -1006,16 +589,15 @@ int Fuse::read(const bf::path &path, char *buf, size_t size, int64_t offset, fus
|
||||
}
|
||||
}
|
||||
|
||||
int Fuse::write(const bf::path &path, const char *buf, size_t size, int64_t offset, fuse_file_info *fileinfo) {
|
||||
int Fuse::write(const char *buf, size_t size, int64_t offset, uint64_t fh) {
|
||||
ThreadNameForDebugging _threadName("write");
|
||||
#ifdef FSPP_LOG
|
||||
LOG(DEBUG, "write({}, _, {}, {}, _)", path, size, offset);
|
||||
LOG(DEBUG, "write({}, _, {}, {}, _)", fh, size, offset);
|
||||
#endif
|
||||
UNUSED(path);
|
||||
try {
|
||||
_fs->write(fileinfo->fh, buf, fspp::num_bytes_t(size), fspp::num_bytes_t(offset));
|
||||
_fs->write(fh, buf, fspp::num_bytes_t(size), fspp::num_bytes_t(offset));
|
||||
#ifdef FSPP_LOG
|
||||
LOG(DEBUG, "write({}, _, {}, {}, _): success", path, size, offset);
|
||||
LOG(DEBUG, "write({}, _, {}, {}, _): success", fh, size, offset);
|
||||
#endif
|
||||
return size;
|
||||
} catch(const cpputils::AssertFailed &e) {
|
||||
@ -1023,7 +605,7 @@ int Fuse::write(const bf::path &path, const char *buf, size_t size, int64_t offs
|
||||
return -EIO;
|
||||
} catch (FuseErrnoException &e) {
|
||||
#ifdef FSPP_LOG
|
||||
LOG(WARN, "write({}, _, {}, {}, _): failed with errno {}", path, size, offset, e.getErrno());
|
||||
LOG(WARN, "write({}, _, {}, {}, _): failed with errno {}", fh, size, offset, e.getErrno());
|
||||
#endif
|
||||
return -e.getErrno();
|
||||
} catch(const std::exception &e) {
|
||||
@ -1040,7 +622,6 @@ int Fuse::statfs(const bf::path &path, struct ::statvfs *fsstat) {
|
||||
#ifdef FSPP_LOG
|
||||
LOG(DEBUG, "statfs({}, _)", path);
|
||||
#endif
|
||||
UNUSED(path);
|
||||
try {
|
||||
ASSERT(is_valid_fspp_path(path), "has to be an absolute path");
|
||||
_fs->statfs(fsstat);
|
||||
@ -1065,16 +646,15 @@ int Fuse::statfs(const bf::path &path, struct ::statvfs *fsstat) {
|
||||
}
|
||||
}
|
||||
|
||||
int Fuse::flush(const bf::path &path, fuse_file_info *fileinfo) {
|
||||
int Fuse::flush(uint64_t fh) {
|
||||
ThreadNameForDebugging _threadName("flush");
|
||||
#ifdef FSPP_LOG
|
||||
LOG(WARN, "flush({}, _)", path);
|
||||
LOG(WARN, "flush({}, _)", fh);
|
||||
#endif
|
||||
UNUSED(path);
|
||||
try {
|
||||
_fs->flush(fileinfo->fh);
|
||||
_fs->flush(fh);
|
||||
#ifdef FSPP_LOG
|
||||
LOG(WARN, "flush({}, _): success", path);
|
||||
LOG(WARN, "flush({}, _): success", fh);
|
||||
#endif
|
||||
return 0;
|
||||
} catch(const cpputils::AssertFailed &e) {
|
||||
@ -1082,7 +662,7 @@ int Fuse::flush(const bf::path &path, fuse_file_info *fileinfo) {
|
||||
return -EIO;
|
||||
} catch (FuseErrnoException &e) {
|
||||
#ifdef FSPP_LOG
|
||||
LOG(WARN, "flush({}, _): failed with errno {}", path, e.getErrno());
|
||||
LOG(WARN, "flush({}, _): failed with errno {}", fh, e.getErrno());
|
||||
#endif
|
||||
return -e.getErrno();
|
||||
} catch(const std::exception &e) {
|
||||
@ -1094,20 +674,19 @@ int Fuse::flush(const bf::path &path, fuse_file_info *fileinfo) {
|
||||
}
|
||||
}
|
||||
|
||||
int Fuse::fsync(const bf::path &path, int datasync, fuse_file_info *fileinfo) {
|
||||
int Fuse::fsync(int datasync, uint64_t fh) {
|
||||
ThreadNameForDebugging _threadName("fsync");
|
||||
#ifdef FSPP_LOG
|
||||
LOG(DEBUG, "fsync({}, {}, _)", path, datasync);
|
||||
LOG(DEBUG, "fsync({}, {}, _)", fh, datasync);
|
||||
#endif
|
||||
UNUSED(path);
|
||||
try {
|
||||
if (datasync) {
|
||||
_fs->fdatasync(fileinfo->fh);
|
||||
_fs->fdatasync(fh);
|
||||
} else {
|
||||
_fs->fsync(fileinfo->fh);
|
||||
_fs->fsync(fh);
|
||||
}
|
||||
#ifdef FSPP_LOG
|
||||
LOG(DEBUG, "fsync({}, {}, _): success", path, datasync);
|
||||
LOG(DEBUG, "fsync({}, {}, _): success", fh, datasync);
|
||||
#endif
|
||||
return 0;
|
||||
} catch(const cpputils::AssertFailed &e) {
|
||||
@ -1115,7 +694,7 @@ int Fuse::fsync(const bf::path &path, int datasync, fuse_file_info *fileinfo) {
|
||||
return -EIO;
|
||||
} catch (FuseErrnoException &e) {
|
||||
#ifdef FSPP_LOG
|
||||
LOG(WARN, "fsync({}, {}, _): failed with errno {}", path, datasync, e.getErrno());
|
||||
LOG(WARN, "fsync({}, {}, _): failed with errno {}", fh, datasync, e.getErrno());
|
||||
#endif
|
||||
return -e.getErrno();
|
||||
} catch(const std::exception &e) {
|
||||
@ -1127,22 +706,11 @@ int Fuse::fsync(const bf::path &path, int datasync, fuse_file_info *fileinfo) {
|
||||
}
|
||||
}
|
||||
|
||||
int Fuse::opendir(const bf::path &path, fuse_file_info *fileinfo) {
|
||||
UNUSED(path);
|
||||
UNUSED(fileinfo);
|
||||
ThreadNameForDebugging _threadName("opendir");
|
||||
//LOG(DEBUG, "opendir({}, _)", path);
|
||||
//We don't need opendir, because readdir works directly on the path
|
||||
return 0;
|
||||
}
|
||||
|
||||
int Fuse::readdir(const bf::path &path, void *buf, fuse_fill_dir_t filler, int64_t offset, fuse_file_info *fileinfo) {
|
||||
int Fuse::readdir(const bf::path &path, void *buf, fuse_fill_dir_t filler) {
|
||||
ThreadNameForDebugging _threadName("readdir");
|
||||
#ifdef FSPP_LOG
|
||||
LOG(DEBUG, "readdir({}, _, _, {}, _)", path, offset);
|
||||
LOG(DEBUG, "readdir({}, _, _)", path);
|
||||
#endif
|
||||
UNUSED(fileinfo);
|
||||
UNUSED(offset);
|
||||
try {
|
||||
ASSERT(is_valid_fspp_path(path), "has to be an absolute path");
|
||||
auto entries = _fs->readDir(path);
|
||||
@ -1161,15 +729,15 @@ int Fuse::readdir(const bf::path &path, void *buf, fuse_fill_dir_t filler, int64
|
||||
} else {
|
||||
ASSERT(false, "Unknown entry type");
|
||||
}
|
||||
if (filler(buf, entry.name.c_str(), &stbuf, 0) != 0) {
|
||||
if (filler(buf, entry.name.c_str(), &stbuf) != 0) {
|
||||
#ifdef FSPP_LOG
|
||||
LOG(DEBUG, "readdir({}, _, _, {}, _): failure with ENOMEM", path, offset);
|
||||
LOG(DEBUG, "readdir({}, _, _): failure with ENOMEM", path);
|
||||
#endif
|
||||
return -ENOMEM;
|
||||
}
|
||||
}
|
||||
#ifdef FSPP_LOG
|
||||
LOG(DEBUG, "readdir({}, _, _, {}, _): success", path, offset);
|
||||
LOG(DEBUG, "readdir({}, _, _): success", path);
|
||||
#endif
|
||||
return 0;
|
||||
} catch(const cpputils::AssertFailed &e) {
|
||||
@ -1177,7 +745,7 @@ int Fuse::readdir(const bf::path &path, void *buf, fuse_fill_dir_t filler, int64
|
||||
return -EIO;
|
||||
} catch (FuseErrnoException &e) {
|
||||
#ifdef FSPP_LOG
|
||||
LOG(WARN, "readdir({}, _, _, {}, _): failed with errno {}", path, offset, e.getErrno());
|
||||
LOG(WARN, "readdir({}, _, _): failed with errno {}", path, e.getErrno());
|
||||
#endif
|
||||
return -e.getErrno();
|
||||
} catch(const std::exception &e) {
|
||||
@ -1189,37 +757,17 @@ int Fuse::readdir(const bf::path &path, void *buf, fuse_fill_dir_t filler, int64
|
||||
}
|
||||
}
|
||||
|
||||
int Fuse::releasedir(const bf::path &path, fuse_file_info *fileinfo) {
|
||||
UNUSED(path);
|
||||
UNUSED(fileinfo);
|
||||
ThreadNameForDebugging _threadName("releasedir");
|
||||
//LOG(DEBUG, "releasedir({}, _)", path);
|
||||
//We don't need releasedir, because readdir works directly on the path
|
||||
return 0;
|
||||
}
|
||||
|
||||
//TODO
|
||||
int Fuse::fsyncdir(const bf::path &path, int datasync, fuse_file_info *fileinfo) {
|
||||
UNUSED(fileinfo);
|
||||
UNUSED(datasync);
|
||||
UNUSED(path);
|
||||
ThreadNameForDebugging _threadName("fsyncdir");
|
||||
//LOG(WARN, "Called non-implemented fsyncdir({}, {}, _)", path, datasync);
|
||||
return 0;
|
||||
}
|
||||
|
||||
void Fuse::init(fuse_conn_info *conn) {
|
||||
UNUSED(conn);
|
||||
void Fuse::init() {
|
||||
ThreadNameForDebugging _threadName("init");
|
||||
_fs = _init(this);
|
||||
_fs = _init();
|
||||
|
||||
_context = Context(noatime());
|
||||
ASSERT(_context != boost::none, "Context should have been initialized in Fuse::run() but somehow didn't");
|
||||
_fs->setContext(fspp::Context { *_context });
|
||||
|
||||
LOG(INFO, "Filesystem started.");
|
||||
|
||||
_running = true;
|
||||
_onMounted();
|
||||
|
||||
#ifdef FSPP_LOG
|
||||
cpputils::logging::setLevel(DEBUG);
|
||||
@ -1263,15 +811,14 @@ int Fuse::access(const bf::path &path, int mask) {
|
||||
}
|
||||
}
|
||||
|
||||
int Fuse::create(const bf::path &path, ::mode_t mode, fuse_file_info *fileinfo) {
|
||||
int Fuse::create(const bf::path &path, ::mode_t mode, uint64_t* fh) {
|
||||
ThreadNameForDebugging _threadName("create");
|
||||
#ifdef FSPP_LOG
|
||||
LOG(DEBUG, "create({}, {}, _)", path, mode);
|
||||
#endif
|
||||
try {
|
||||
ASSERT(is_valid_fspp_path(path), "has to be an absolute path");
|
||||
auto context = fuse_get_context();
|
||||
fileinfo->fh = _fs->createAndOpenFile(path, mode, context->uid, context->gid);
|
||||
*fh = _fs->createAndOpenFile(path, mode, uid, gid);
|
||||
#ifdef FSPP_LOG
|
||||
LOG(DEBUG, "create({}, {}, _): success", path, mode);
|
||||
#endif
|
||||
|
@ -2,7 +2,7 @@
|
||||
#ifndef MESSMER_FSPP_FUSE_FUSE_H_
|
||||
#define MESSMER_FSPP_FUSE_FUSE_H_
|
||||
|
||||
#include "params.h"
|
||||
#include <sys/statvfs.h>
|
||||
#include <cstdio>
|
||||
#include <string>
|
||||
#include <vector>
|
||||
@ -15,6 +15,8 @@
|
||||
#include "stat_compatibility.h"
|
||||
#include <fspp/fs_interface/Context.h>
|
||||
|
||||
typedef int (*fuse_fill_dir_t)(void*, const char*, fspp::fuse::STAT*);
|
||||
|
||||
namespace fspp {
|
||||
class Device;
|
||||
|
||||
@ -23,18 +25,14 @@ class Filesystem;
|
||||
|
||||
class Fuse final {
|
||||
public:
|
||||
explicit Fuse(std::function<std::shared_ptr<Filesystem> (Fuse *fuse)> init, std::function<void()> onMounted, std::string fstype, boost::optional<std::string> fsname);
|
||||
explicit Fuse(std::function<std::shared_ptr<Filesystem> ()> init, std::string fstype, boost::optional<std::string> fsname);
|
||||
~Fuse();
|
||||
|
||||
void runInBackground(const boost::filesystem::path &mountdir, std::vector<std::string> fuseOptions);
|
||||
void runInForeground(const boost::filesystem::path &mountdir, std::vector<std::string> fuseOptions);
|
||||
bool running() const;
|
||||
void stop();
|
||||
|
||||
static void unmount(const boost::filesystem::path &mountdir, bool force = false);
|
||||
|
||||
int getattr(const boost::filesystem::path &path, fspp::fuse::STAT *stbuf);
|
||||
int fgetattr(const boost::filesystem::path &path, fspp::fuse::STAT *stbuf, fuse_file_info *fileinfo);
|
||||
int fgetattr(const boost::filesystem::path &path, fspp::fuse::STAT *stbuf, uint64_t fh);
|
||||
int readlink(const boost::filesystem::path &path, char *buf, size_t size);
|
||||
int mknod(const boost::filesystem::path &path, ::mode_t mode, dev_t rdev);
|
||||
int mkdir(const boost::filesystem::path &path, ::mode_t mode);
|
||||
@ -46,47 +44,40 @@ public:
|
||||
int chmod(const boost::filesystem::path &path, ::mode_t mode);
|
||||
int chown(const boost::filesystem::path &path, ::uid_t uid, ::gid_t gid);
|
||||
int truncate(const boost::filesystem::path &path, int64_t size);
|
||||
int ftruncate(const boost::filesystem::path &path, int64_t size, fuse_file_info *fileinfo);
|
||||
int utimens(const boost::filesystem::path &path, const std::array<timespec, 2> times);
|
||||
int open(const boost::filesystem::path &path, fuse_file_info *fileinfo);
|
||||
int release(const boost::filesystem::path &path, fuse_file_info *fileinfo);
|
||||
int read(const boost::filesystem::path &path, char *buf, size_t size, int64_t offset, fuse_file_info *fileinfo);
|
||||
int write(const boost::filesystem::path &path, const char *buf, size_t size, int64_t offset, fuse_file_info *fileinfo);
|
||||
int ftruncate(int64_t size, uint64_t fh);
|
||||
int utimens(const boost::filesystem::path &path, const timespec lastAccessTime, const timespec lastModificationTime);
|
||||
int open(const boost::filesystem::path &path, uint64_t* fh, int flags);
|
||||
int release(uint64_t fh);
|
||||
int read(char *buf, size_t size, int64_t offset, uint64_t fh);
|
||||
int write(const char *buf, size_t size, int64_t offset, uint64_t fh);
|
||||
int statfs(const boost::filesystem::path &path, struct ::statvfs *fsstat);
|
||||
int flush(const boost::filesystem::path &path, fuse_file_info *fileinfo);
|
||||
int fsync(const boost::filesystem::path &path, int flags, fuse_file_info *fileinfo);
|
||||
int opendir(const boost::filesystem::path &path, fuse_file_info *fileinfo);
|
||||
int readdir(const boost::filesystem::path &path, void *buf, fuse_fill_dir_t filler, int64_t offset, fuse_file_info *fileinfo);
|
||||
int releasedir(const boost::filesystem::path &path, fuse_file_info *fileinfo);
|
||||
int fsyncdir(const boost::filesystem::path &path, int datasync, fuse_file_info *fileinfo);
|
||||
void init(fuse_conn_info *conn);
|
||||
int flush(uint64_t fh);
|
||||
int fsync(int flags, uint64_t fh);
|
||||
int readdir(const boost::filesystem::path &path, void *buf, fuse_fill_dir_t filler);
|
||||
void init();
|
||||
void destroy();
|
||||
int access(const boost::filesystem::path &path, int mask);
|
||||
int create(const boost::filesystem::path &path, ::mode_t mode, fuse_file_info *fileinfo);
|
||||
int create(const boost::filesystem::path &path, ::mode_t mode, uint64_t* fh);
|
||||
|
||||
private:
|
||||
static void _logException(const std::exception &e);
|
||||
static void _logUnknownException();
|
||||
static char *_create_c_string(const std::string &str);
|
||||
static void _removeAndWarnIfExists(std::vector<std::string> *fuseOptions, const std::string &option);
|
||||
void _run(const boost::filesystem::path &mountdir, std::vector<std::string> fuseOptions);
|
||||
static bool _has_option(const std::vector<char *> &vec, const std::string &key);
|
||||
static bool _has_entry_with_prefix(const std::string &prefix, const std::vector<char *> &vec);
|
||||
std::vector<char *> _build_argv(const boost::filesystem::path &mountdir, const std::vector<std::string> &fuseOptions);
|
||||
void _add_fuse_option_if_not_exists(std::vector<char *> *argv, const std::string &key, const std::string &value);
|
||||
void _createContext(const std::vector<std::string> &fuseOptions);
|
||||
|
||||
std::function<std::shared_ptr<Filesystem> (Fuse *fuse)> _init;
|
||||
std::function<void()> _onMounted;
|
||||
std::function<std::shared_ptr<Filesystem> ()> _init;
|
||||
std::shared_ptr<Filesystem> _fs;
|
||||
boost::filesystem::path _mountdir;
|
||||
std::vector<char*> _argv;
|
||||
std::atomic<bool> _running;
|
||||
std::string _fstype;
|
||||
boost::optional<std::string> _fsname;
|
||||
boost::optional<Context> _context;
|
||||
|
||||
DISALLOW_COPY_AND_ASSIGN(Fuse);
|
||||
::uid_t uid;
|
||||
::gid_t gid;
|
||||
};
|
||||
}
|
||||
}
|
||||
|
@ -1,8 +0,0 @@
|
||||
#pragma once
|
||||
#ifndef MESSMER_FSPP_FUSE_PARAMS_H_
|
||||
#define MESSMER_FSPP_FUSE_PARAMS_H_
|
||||
|
||||
#define FUSE_USE_VERSION 26
|
||||
#include <fuse.h>
|
||||
|
||||
#endif
|
@ -11,7 +11,7 @@ set(SOURCES
|
||||
)
|
||||
|
||||
add_library(${PROJECT_NAME} STATIC ${SOURCES})
|
||||
target_link_libraries(${PROJECT_NAME})
|
||||
target_link_libraries(${PROJECT_NAME} PUBLIC boost)
|
||||
target_compile_definitions(${PROJECT_NAME} PRIVATE GIT_VERSION_STRING="${GIT_VERSION}")
|
||||
target_add_boost(${PROJECT_NAME})
|
||||
target_enable_style_warnings(${PROJECT_NAME})
|
||||
|
12
src/jni/CMakeLists.txt
Normal file
12
src/jni/CMakeLists.txt
Normal file
@ -0,0 +1,12 @@
|
||||
project (libcryfs-jni)
|
||||
|
||||
add_library(${PROJECT_NAME} STATIC libcryfs-jni.cpp)
|
||||
|
||||
target_link_libraries(${PROJECT_NAME} PUBLIC fspp-fuse)
|
||||
|
||||
target_enable_style_warnings(${PROJECT_NAME})
|
||||
target_activate_cpp14(${PROJECT_NAME})
|
||||
|
||||
set_target_properties(${PROJECT_NAME} PROPERTIES PUBLIC_HEADER include/libcryfs-jni.h)
|
||||
target_include_directories(${PROJECT_NAME} PUBLIC include)
|
||||
|
17
src/jni/include/libcryfs-jni.h
Normal file
17
src/jni/include/libcryfs-jni.h
Normal file
@ -0,0 +1,17 @@
|
||||
#include <jni.h>
|
||||
|
||||
jlong cryfs_init(JNIEnv* env, jstring jbaseDir, jstring jlocalSateDir, jbyteArray jpassword, jboolean createBaseDir, jstring jcipher);
|
||||
jlong cryfs_create(JNIEnv* env, jlong fusePtr, jstring jpath, mode_t mode);
|
||||
jlong cryfs_open(JNIEnv* env, jlong fusePtr, jstring jpath, jint flags);
|
||||
jint cryfs_read(JNIEnv* env, jlong fusePtr, jlong fileHandle, jbyteArray jbuffer, jlong offset);
|
||||
jint cryfs_write(JNIEnv* env, jlong fusePtr, jlong fileHandle, jlong offset, jbyteArray jbuffer, jint size);
|
||||
jint cryfs_truncate(JNIEnv* env, jlong fusePtr, jstring jpath, jlong size);
|
||||
jint cryfs_unlink(JNIEnv* env, jlong fusePtr, jstring jpath);
|
||||
jint cryfs_release(jlong fusePtr, jlong fileHandle);
|
||||
jlong cryfs_readdir(JNIEnv* env, jlong fusePtr, jstring jpath ,void* data, int(void*, const char*, const struct stat*));
|
||||
jint cryfs_mkdir(JNIEnv* env, jlong fusePtr, jstring jpath, mode_t mode);
|
||||
jint cryfs_rmdir(JNIEnv* env, jlong fusePtr, jstring jpath);
|
||||
jint cryfs_getattr(JNIEnv* env, jlong fusePtr, jstring jpath, struct stat* stat);
|
||||
jint cryfs_rename(JNIEnv* env, jlong fusePtr, jstring jsrcPath, jstring jdstPath);
|
||||
void cryfs_destroy(jlong fusePtr);
|
||||
jboolean cryfs_is_closed(jlong fusePtr);
|
202
src/jni/libcryfs-jni.cpp
Normal file
202
src/jni/libcryfs-jni.cpp
Normal file
@ -0,0 +1,202 @@
|
||||
#include <jni.h>
|
||||
#include <cryfs-cli/Cli.h>
|
||||
#include <fspp/fuse/Fuse.h>
|
||||
|
||||
using std::unique_ptr;
|
||||
using std::make_unique;
|
||||
using boost::none;
|
||||
using cpputils::Random;
|
||||
using cpputils::SCrypt;
|
||||
using cryfs_cli::Cli;
|
||||
using cryfs_cli::program_options::ProgramOptions;
|
||||
using fspp::fuse::Fuse;
|
||||
|
||||
std::set<jlong> validFusePtrs;
|
||||
|
||||
extern "C" jlong cryfs_init(JNIEnv* env, jstring jbaseDir, jstring jlocalStateDir, jbyteArray jpassword, jboolean createBaseDir, jstring jcipher) {
|
||||
const char* baseDir = env->GetStringUTFChars(jbaseDir, NULL);
|
||||
const char* localStateDir = env->GetStringUTFChars(jlocalStateDir, NULL);
|
||||
boost::optional<string> cipher = none;
|
||||
if (jcipher != NULL) {
|
||||
const char* cipherName = env->GetStringUTFChars(jcipher, NULL);
|
||||
cipher = boost::optional<string>(cipherName);
|
||||
env->ReleaseStringUTFChars(jcipher, cipherName);
|
||||
}
|
||||
auto &keyGenerator = Random::OSRandom();
|
||||
ProgramOptions options = ProgramOptions(baseDir, none, localStateDir, false, false, createBaseDir, cipher, none, false, none);
|
||||
char* password = reinterpret_cast<char*>(env->GetByteArrayElements(jpassword, NULL));
|
||||
|
||||
Fuse* fuse = Cli(keyGenerator, SCrypt::DefaultSettings).initFilesystem(options, make_unique<string>(password));
|
||||
|
||||
env->ReleaseByteArrayElements(jpassword, reinterpret_cast<jbyte*>(password), 0);
|
||||
env->ReleaseStringUTFChars(jbaseDir, baseDir);
|
||||
env->ReleaseStringUTFChars(jlocalStateDir, localStateDir);
|
||||
|
||||
jlong fusePtr = reinterpret_cast<jlong>(fuse);
|
||||
if (fusePtr != 0) {
|
||||
validFusePtrs.insert(fusePtr);
|
||||
}
|
||||
return fusePtr;
|
||||
}
|
||||
|
||||
extern "C" jlong cryfs_create(JNIEnv* env, jlong fusePtr, jstring jpath, mode_t mode) {
|
||||
Fuse* fuse = reinterpret_cast<Fuse*>(fusePtr);
|
||||
const char* path = env->GetStringUTFChars(jpath, NULL);
|
||||
uint64_t fh;
|
||||
|
||||
int result = fuse->create(path, mode, &fh);
|
||||
|
||||
env->ReleaseStringUTFChars(jpath, path);
|
||||
if (result == 0) {
|
||||
return fh;
|
||||
} else {
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
||||
extern "C" jlong cryfs_open(JNIEnv* env, jlong fusePtr, jstring jpath, jint flags) {
|
||||
Fuse* fuse = reinterpret_cast<Fuse*>(fusePtr);
|
||||
const char* path = env->GetStringUTFChars(jpath, NULL);
|
||||
uint64_t fh;
|
||||
|
||||
int result = fuse->open(path, &fh, flags);
|
||||
|
||||
env->ReleaseStringUTFChars(jpath, path);
|
||||
if (result == 0) {
|
||||
return fh;
|
||||
} else {
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
||||
extern "C" jint cryfs_read(JNIEnv* env, jlong fusePtr, jlong fileHandle, jbyteArray jbuffer, jlong offset) {
|
||||
Fuse* fuse = reinterpret_cast<Fuse*>(fusePtr);
|
||||
const jsize size = env->GetArrayLength(jbuffer);
|
||||
char* buff = reinterpret_cast<char*>(env->GetByteArrayElements(jbuffer, NULL));
|
||||
|
||||
int result = fuse->read(buff, size, offset, fileHandle);
|
||||
|
||||
env->ReleaseByteArrayElements(jbuffer, reinterpret_cast<jbyte*>(buff), 0);
|
||||
return result;
|
||||
}
|
||||
|
||||
extern "C" jint cryfs_write(JNIEnv* env, jlong fusePtr, jlong fileHandle, jlong offset, jbyteArray jbuffer, jint size) {
|
||||
Fuse* fuse = reinterpret_cast<Fuse*>(fusePtr);
|
||||
char* buff = reinterpret_cast<char*>(env->GetByteArrayElements(jbuffer, NULL));
|
||||
|
||||
int result = fuse->write(buff, size, offset, fileHandle);
|
||||
|
||||
env->ReleaseByteArrayElements(jbuffer, reinterpret_cast<jbyte*>(buff), 0);
|
||||
return result;
|
||||
}
|
||||
|
||||
extern "C" jint cryfs_truncate(JNIEnv* env, jlong fusePtr, jstring jpath, jlong size) {
|
||||
Fuse* fuse = reinterpret_cast<Fuse*>(fusePtr);
|
||||
const char* path = env->GetStringUTFChars(jpath, NULL);
|
||||
|
||||
int result = fuse->truncate(path, size);
|
||||
|
||||
env->ReleaseStringUTFChars(jpath, path);
|
||||
return result;
|
||||
}
|
||||
|
||||
extern "C" jint cryfs_unlink(JNIEnv* env, jlong fusePtr, jstring jpath) {
|
||||
Fuse* fuse = reinterpret_cast<Fuse*>(fusePtr);
|
||||
const char* path = env->GetStringUTFChars(jpath, NULL);
|
||||
|
||||
int result = fuse->unlink(path);
|
||||
|
||||
env->ReleaseStringUTFChars(jpath, path);
|
||||
return result;
|
||||
}
|
||||
|
||||
extern "C" jint cryfs_release(jlong fusePtr, jlong fileHandle) {
|
||||
Fuse* fuse = reinterpret_cast<Fuse*>(fusePtr);
|
||||
return fuse->release(fileHandle);
|
||||
}
|
||||
|
||||
struct readDirHelper {
|
||||
Fuse* fuse;
|
||||
boost::filesystem::path path;
|
||||
void* data;
|
||||
fuse_fill_dir_t filler;
|
||||
};
|
||||
|
||||
int readDir(void* data, const char* name, fspp::fuse::STAT* stat) {
|
||||
if (strcmp(name, ".") == 0 || strcmp(name, "..") == 0) {
|
||||
return 0;
|
||||
}
|
||||
struct readDirHelper* helper = reinterpret_cast<readDirHelper*>(data);
|
||||
mode_t mode = stat->st_mode; // saving mode because getattr sometimes modifies it badly
|
||||
helper->fuse->getattr(helper->path / name, stat);
|
||||
stat->st_mode = mode;
|
||||
return helper->filler(helper->data, name, stat);
|
||||
}
|
||||
|
||||
extern "C" jint cryfs_readdir(JNIEnv* env, jlong fusePtr, jstring jpath, void* data, fuse_fill_dir_t filler) {
|
||||
Fuse* fuse = reinterpret_cast<Fuse*>(fusePtr);
|
||||
const char* path = env->GetStringUTFChars(jpath, NULL);
|
||||
struct readDirHelper helper;
|
||||
helper.fuse = fuse;
|
||||
helper.path = boost::filesystem::path(path);
|
||||
helper.data = data;
|
||||
helper.filler = filler;
|
||||
|
||||
int result = fuse->readdir(path, &helper, readDir);
|
||||
|
||||
env->ReleaseStringUTFChars(jpath, path);
|
||||
return result;
|
||||
}
|
||||
|
||||
extern "C" jint cryfs_mkdir(JNIEnv* env, jlong fusePtr, jstring jpath, mode_t mode) {
|
||||
Fuse* fuse = reinterpret_cast<Fuse*>(fusePtr);
|
||||
const char* path = env->GetStringUTFChars(jpath, NULL);
|
||||
|
||||
int result = fuse->mkdir(path, mode);
|
||||
|
||||
env->ReleaseStringUTFChars(jpath, path);
|
||||
return result;
|
||||
}
|
||||
|
||||
extern "C" jint cryfs_rmdir(JNIEnv* env, jlong fusePtr, jstring jpath) {
|
||||
Fuse* fuse = reinterpret_cast<Fuse*>(fusePtr);
|
||||
const char* path = env->GetStringUTFChars(jpath, NULL);
|
||||
|
||||
int result = fuse->rmdir(path);
|
||||
|
||||
env->ReleaseStringUTFChars(jpath, path);
|
||||
return result;
|
||||
}
|
||||
|
||||
extern "C" jint cryfs_getattr(JNIEnv* env, jlong fusePtr, jstring jpath, fspp::fuse::STAT* stat) {
|
||||
Fuse* fuse = reinterpret_cast<Fuse*>(fusePtr);
|
||||
const char* path = env->GetStringUTFChars(jpath, NULL);
|
||||
|
||||
int result = fuse->getattr(path, stat);
|
||||
|
||||
env->ReleaseStringUTFChars(jpath, path);
|
||||
return result;
|
||||
}
|
||||
|
||||
extern "C" jint cryfs_rename(JNIEnv* env, jlong fusePtr, jstring jsrcPath, jstring jdstPath) {
|
||||
Fuse* fuse = reinterpret_cast<Fuse*>(fusePtr);
|
||||
const char* srcPath = env->GetStringUTFChars(jsrcPath, NULL);
|
||||
const char* dstPath = env->GetStringUTFChars(jdstPath, NULL);
|
||||
|
||||
int result = fuse->rename(srcPath, dstPath);
|
||||
|
||||
env->ReleaseStringUTFChars(jsrcPath, srcPath);
|
||||
env->ReleaseStringUTFChars(jdstPath, dstPath);
|
||||
return result;
|
||||
}
|
||||
|
||||
extern "C" void cryfs_destroy(jlong fusePtr) {
|
||||
Fuse* fuse = reinterpret_cast<Fuse*>(fusePtr);
|
||||
fuse->destroy();
|
||||
delete fuse;
|
||||
validFusePtrs.erase(fusePtr);
|
||||
}
|
||||
|
||||
extern "C" jboolean cryfs_is_closed(jlong fusePtr) {
|
||||
return validFusePtrs.find(fusePtr) == validFusePtrs.end();
|
||||
}
|
@ -1,12 +0,0 @@
|
||||
project (stats)
|
||||
|
||||
set(SOURCES
|
||||
main.cpp
|
||||
traversal.cpp
|
||||
)
|
||||
|
||||
add_executable(${PROJECT_NAME} ${SOURCES})
|
||||
target_link_libraries(${PROJECT_NAME} PUBLIC cryfs cpp-utils gitversion)
|
||||
target_enable_style_warnings(${PROJECT_NAME})
|
||||
target_activate_cpp14(${PROJECT_NAME})
|
||||
set_target_properties(${PROJECT_NAME} PROPERTIES OUTPUT_NAME cryfs-stats)
|
@ -1,254 +0,0 @@
|
||||
#include <iostream>
|
||||
#include <boost/filesystem.hpp>
|
||||
#include <cryfs/impl/config/CryConfigLoader.h>
|
||||
#include <cryfs/impl/config/CryPasswordBasedKeyProvider.h>
|
||||
#include <blockstore/implementations/ondisk/OnDiskBlockStore2.h>
|
||||
#include <blockstore/implementations/readonly/ReadOnlyBlockStore2.h>
|
||||
#include <blockstore/implementations/integrity/IntegrityBlockStore2.h>
|
||||
#include <blockstore/implementations/low2highlevel/LowToHighLevelBlockStore.h>
|
||||
#include <blobstore/implementations/onblocks/datanodestore/DataNodeStore.h>
|
||||
#include <blobstore/implementations/onblocks/datanodestore/DataNode.h>
|
||||
#include <blobstore/implementations/onblocks/datanodestore/DataInnerNode.h>
|
||||
#include <blobstore/implementations/onblocks/datanodestore/DataLeafNode.h>
|
||||
#include <blobstore/implementations/onblocks/BlobStoreOnBlocks.h>
|
||||
#include <cryfs/impl/filesystem/fsblobstore/FsBlobStore.h>
|
||||
#include <cryfs/impl/filesystem/fsblobstore/DirBlob.h>
|
||||
#include <cryfs/impl/filesystem/CryDevice.h>
|
||||
#include <cpp-utils/io/IOStreamConsole.h>
|
||||
#include <cpp-utils/system/homedir.h>
|
||||
#include "traversal.h"
|
||||
|
||||
#include <set>
|
||||
|
||||
using std::endl;
|
||||
using std::cout;
|
||||
using std::set;
|
||||
using std::flush;
|
||||
using std::vector;
|
||||
using boost::none;
|
||||
using boost::filesystem::path;
|
||||
|
||||
using namespace cryfs;
|
||||
using namespace cpputils;
|
||||
using namespace blockstore;
|
||||
using namespace blockstore::ondisk;
|
||||
using namespace blockstore::readonly;
|
||||
using namespace blockstore::integrity;
|
||||
using namespace blockstore::lowtohighlevel;
|
||||
using namespace blobstore::onblocks;
|
||||
using namespace blobstore::onblocks::datanodestore;
|
||||
using namespace cryfs::fsblobstore;
|
||||
|
||||
using namespace cryfs_stats;
|
||||
|
||||
void printNode(unique_ref<DataNode> node) {
|
||||
std::cout << "BlockId: " << node->blockId().ToString() << ", Depth: " << static_cast<int>(node->depth()) << " ";
|
||||
auto innerNode = dynamic_pointer_move<DataInnerNode>(node);
|
||||
if (innerNode != none) {
|
||||
std::cout << "Type: inner\n";
|
||||
return;
|
||||
}
|
||||
auto leafNode = dynamic_pointer_move<DataLeafNode>(node);
|
||||
if (leafNode != none) {
|
||||
std::cout << "Type: leaf\n";
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
unique_ref<BlockStore> makeBlockStore(const path& basedir, const CryConfigLoader::ConfigLoadResult& config, LocalStateDir& localStateDir) {
|
||||
auto onDiskBlockStore = make_unique_ref<OnDiskBlockStore2>(basedir);
|
||||
auto readOnlyBlockStore = make_unique_ref<ReadOnlyBlockStore2>(std::move(onDiskBlockStore));
|
||||
auto encryptedBlockStore = CryCiphers::find(config.configFile->config()->Cipher()).createEncryptedBlockstore(std::move(readOnlyBlockStore), config.configFile->config()->EncryptionKey());
|
||||
auto statePath = localStateDir.forFilesystemId(config.configFile->config()->FilesystemId());
|
||||
auto integrityFilePath = statePath / "integritydata";
|
||||
auto onIntegrityViolation = [] () {
|
||||
std::cerr << "Warning: Integrity violation encountered" << std::endl;
|
||||
};
|
||||
auto integrityBlockStore = make_unique_ref<IntegrityBlockStore2>(std::move(encryptedBlockStore), integrityFilePath, config.myClientId, false, true, onIntegrityViolation);
|
||||
return make_unique_ref<LowToHighLevelBlockStore>(std::move(integrityBlockStore));
|
||||
}
|
||||
|
||||
struct AccumulateBlockIds final {
|
||||
public:
|
||||
auto callback() {
|
||||
return [this] (const BlockId& id) {
|
||||
_blockIds.push_back(id);
|
||||
};
|
||||
}
|
||||
|
||||
const std::vector<BlockId>& blockIds() const {
|
||||
return _blockIds;
|
||||
}
|
||||
|
||||
void reserve(size_t size) {
|
||||
_blockIds.reserve(size);
|
||||
}
|
||||
|
||||
private:
|
||||
std::vector<BlockId> _blockIds;
|
||||
};
|
||||
|
||||
class ProgressBar final {
|
||||
public:
|
||||
ProgressBar(size_t numBlocks): _currentBlock(0), _numBlocks(numBlocks) {}
|
||||
|
||||
auto callback() {
|
||||
return [this] (const BlockId&) {
|
||||
cout << "\r" << (++_currentBlock) << "/" << _numBlocks << flush;
|
||||
};
|
||||
}
|
||||
private:
|
||||
size_t _currentBlock;
|
||||
size_t _numBlocks;
|
||||
};
|
||||
|
||||
std::vector<BlockId> getKnownBlobIds(const path& basedir, const CryConfigLoader::ConfigLoadResult& config, LocalStateDir& localStateDir) {
|
||||
auto blockStore = makeBlockStore(basedir, config, localStateDir);
|
||||
auto fsBlobStore = make_unique_ref<FsBlobStore>(make_unique_ref<BlobStoreOnBlocks>(std::move(blockStore), config.configFile->config()->BlocksizeBytes()));
|
||||
|
||||
std::vector<BlockId> result;
|
||||
AccumulateBlockIds knownBlobIds;
|
||||
cout << "Listing all file system entities (i.e. blobs)..." << flush;
|
||||
auto rootId = BlockId::FromString(config.configFile->config()->RootBlob());
|
||||
forEachReachableBlob(fsBlobStore.get(), rootId, {knownBlobIds.callback()});
|
||||
cout << "done" << endl;
|
||||
|
||||
return knownBlobIds.blockIds();
|
||||
}
|
||||
|
||||
std::vector<BlockId> getKnownBlockIds(const path& basedir, const CryConfigLoader::ConfigLoadResult& config, LocalStateDir& localStateDir) {
|
||||
auto knownBlobIds = getKnownBlobIds(basedir, config, localStateDir);
|
||||
|
||||
auto blockStore = makeBlockStore(basedir, config, localStateDir);
|
||||
auto nodeStore = make_unique_ref<DataNodeStore>(std::move(blockStore), config.configFile->config()->BlocksizeBytes());
|
||||
AccumulateBlockIds knownBlockIds;
|
||||
const uint32_t numNodes = nodeStore->numNodes();
|
||||
knownBlockIds.reserve(numNodes);
|
||||
cout << "Listing all blocks used by these file system entities..." << endl;
|
||||
for (const auto& blobId : knownBlobIds) {
|
||||
forEachReachableBlockInBlob(nodeStore.get(), blobId, {
|
||||
ProgressBar(numNodes).callback(),
|
||||
knownBlockIds.callback()
|
||||
});
|
||||
}
|
||||
std::cout << "...done" << endl;
|
||||
return knownBlockIds.blockIds();
|
||||
}
|
||||
|
||||
set<BlockId> getAllBlockIds(const path& basedir, const CryConfigLoader::ConfigLoadResult& config, LocalStateDir& localStateDir) {
|
||||
auto blockStore = makeBlockStore(basedir, config, localStateDir);
|
||||
AccumulateBlockIds allBlockIds;
|
||||
allBlockIds.reserve(blockStore->numBlocks());
|
||||
forEachBlock(blockStore.get(), {allBlockIds.callback()});
|
||||
return set<BlockId>(allBlockIds.blockIds().begin(), allBlockIds.blockIds().end());
|
||||
}
|
||||
|
||||
void printConfig(const CryConfig& config) {
|
||||
std::cout
|
||||
<< "----------------------------------------------------"
|
||||
<< "\nFilesystem configuration:"
|
||||
<< "\n----------------------------------------------------"
|
||||
<< "\n- Filesystem format version: " << config.Version()
|
||||
<< "\n- Created with: CryFS " << config.CreatedWithVersion()
|
||||
<< "\n- Last opened with: CryFS " << config.LastOpenedWithVersion()
|
||||
<< "\n- Cipher: " << config.Cipher()
|
||||
<< "\n- Blocksize: " << config.BlocksizeBytes() << " bytes"
|
||||
<< "\n- Filesystem Id: " << config.FilesystemId().ToString()
|
||||
<< "\n- Root Blob Id: " << config.RootBlob();
|
||||
if (config.missingBlockIsIntegrityViolation()) {
|
||||
ASSERT(config.ExclusiveClientId() != boost::none, "ExclusiveClientId must be set if missingBlockIsIntegrityViolation");
|
||||
std::cout << "\n- Extended integrity measures: enabled."
|
||||
"\n - Exclusive client id: " << *config.ExclusiveClientId();
|
||||
} else {
|
||||
ASSERT(config.ExclusiveClientId() == boost::none, "ExclusiveClientId must be unset if !missingBlockIsIntegrityViolation");
|
||||
std::cout << "\n- Extended integrity measures: disabled.";
|
||||
}
|
||||
#ifndef CRYFS_NO_COMPATIBILITY
|
||||
std::cout << "\n- Has parent pointers: " << (config.HasParentPointers() ? "yes" : "no");
|
||||
std::cout << "\n- Has version numbers: " << (config.HasVersionNumbers() ? "yes" : "no");
|
||||
#endif
|
||||
std::cout << "\n----------------------------------------------------\n";
|
||||
}
|
||||
|
||||
int main(int argc, char* argv[]) {
|
||||
if (argc != 2) {
|
||||
std::cerr << "Usage: cryfs-stats [basedir]" << std::endl;
|
||||
exit(1);
|
||||
}
|
||||
path basedir = argv[1];
|
||||
std::cout << "Calculating stats for filesystem at " << basedir << std::endl;
|
||||
|
||||
auto console = std::make_shared<cpputils::IOStreamConsole>();
|
||||
|
||||
console->print("Loading config\n");
|
||||
auto askPassword = [console] () {
|
||||
return console->askPassword("Password: ");
|
||||
};
|
||||
unique_ref<CryKeyProvider> keyProvider = make_unique_ref<CryPasswordBasedKeyProvider>(
|
||||
console,
|
||||
askPassword,
|
||||
askPassword,
|
||||
make_unique_ref<SCrypt>(SCrypt::DefaultSettings)
|
||||
);
|
||||
|
||||
auto config_path = basedir / "cryfs.config";
|
||||
LocalStateDir localStateDir(cpputils::system::HomeDirectory::getXDGDataDir() / "cryfs");
|
||||
CryConfigLoader config_loader(console, Random::OSRandom(), std::move(keyProvider), localStateDir, boost::none, boost::none, boost::none);
|
||||
|
||||
auto config = config_loader.load(config_path, false, true, CryConfigFile::Access::ReadOnly);
|
||||
if (config.is_left()) {
|
||||
switch (config.left()) {
|
||||
case CryConfigFile::LoadError::ConfigFileNotFound:
|
||||
throw std::runtime_error("Error loading config file: Config file not found. Are you sure this is a valid CryFS file system?");
|
||||
case CryConfigFile::LoadError::DecryptionFailed:
|
||||
throw std::runtime_error("Error loading config file: Decryption failed. Did you maybe enter a wrong password?");
|
||||
}
|
||||
}
|
||||
const auto& config_ = config.right().configFile->config();
|
||||
std::cout << "Loading filesystem" << std::endl;
|
||||
printConfig(*config_);
|
||||
#ifndef CRYFS_NO_COMPATIBILITY
|
||||
const bool is_correct_format = config_->Version() == CryConfig::FilesystemFormatVersion && config_->HasParentPointers() && config_->HasVersionNumbers();
|
||||
#else
|
||||
const bool is_correct_format = config_->Version() == CryConfig::FilesystemFormatVersion;
|
||||
#endif
|
||||
if (!is_correct_format) {
|
||||
std::cerr << "The filesystem is not in the 0.10 format. It needs to be migrated. The cryfs-stats tool unfortunately can't handle this, please mount and unmount the filesystem once." << std::endl;
|
||||
exit(1);
|
||||
}
|
||||
|
||||
cout << "Listing all blocks..." << flush;
|
||||
set<BlockId> unaccountedBlocks = getAllBlockIds(basedir, config.right(), localStateDir);
|
||||
cout << "done" << endl;
|
||||
|
||||
vector<BlockId> accountedBlocks = getKnownBlockIds(basedir, config.right(), localStateDir);
|
||||
for (const BlockId& blockId : accountedBlocks) {
|
||||
auto num_erased = unaccountedBlocks.erase(blockId);
|
||||
ASSERT(1 == num_erased, "Blob id referenced by directory entry but didn't found it on disk? This can't happen.");
|
||||
}
|
||||
|
||||
console->print("Calculate statistics\n");
|
||||
|
||||
auto blockStore = makeBlockStore(basedir, config.right(), localStateDir);
|
||||
auto nodeStore = make_unique_ref<DataNodeStore>(std::move(blockStore), config.right().configFile->config()->BlocksizeBytes());
|
||||
|
||||
uint32_t numUnaccountedBlocks = unaccountedBlocks.size();
|
||||
uint32_t numLeaves = 0;
|
||||
uint32_t numInner = 0;
|
||||
console->print("Unaccounted blocks: " + std::to_string(unaccountedBlocks.size()) + "\n");
|
||||
for (const auto &blockId : unaccountedBlocks) {
|
||||
console->print("\r" + std::to_string(numLeaves+numInner) + "/" + std::to_string(numUnaccountedBlocks) + ": ");
|
||||
auto node = nodeStore->load(blockId);
|
||||
auto innerNode = dynamic_pointer_move<DataInnerNode>(*node);
|
||||
if (innerNode != none) {
|
||||
++numInner;
|
||||
printNode(std::move(*innerNode));
|
||||
}
|
||||
auto leafNode = dynamic_pointer_move<DataLeafNode>(*node);
|
||||
if (leafNode != none) {
|
||||
++numLeaves;
|
||||
printNode(std::move(*leafNode));
|
||||
}
|
||||
}
|
||||
console->print("\n" + std::to_string(numLeaves) + " leaves and " + std::to_string(numInner) + " inner nodes\n");
|
||||
}
|
@ -1,67 +0,0 @@
|
||||
#include "traversal.h"
|
||||
|
||||
#include <blobstore/implementations/onblocks/datanodestore/DataInnerNode.h>
|
||||
|
||||
using blockstore::BlockId;
|
||||
using blockstore::BlockStore;
|
||||
using cryfs::fsblobstore::FsBlobStore;
|
||||
using cryfs::fsblobstore::DirBlob;
|
||||
using blobstore::onblocks::datanodestore::DataNodeStore;
|
||||
using blobstore::onblocks::datanodestore::DataInnerNode;
|
||||
using cpputils::dynamic_pointer_move;
|
||||
|
||||
using std::vector;
|
||||
using std::function;
|
||||
using boost::none;
|
||||
|
||||
namespace cryfs_stats {
|
||||
|
||||
void forEachBlock(BlockStore* blockStore, const vector<function<void (const BlockId& blobId)>>& callbacks) {
|
||||
blockStore->forEachBlock([&callbacks] (const BlockId& blockId) {
|
||||
for(const auto& callback : callbacks) {
|
||||
callback(blockId);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(misc-no-recursion)
|
||||
void forEachReachableBlob(FsBlobStore* blobStore, const BlockId& rootId, const vector<function<void (const BlockId& blobId)>>& callbacks) {
|
||||
for (const auto& callback : callbacks) {
|
||||
callback(rootId);
|
||||
}
|
||||
|
||||
auto rootBlob = blobStore->load(rootId);
|
||||
ASSERT(rootBlob != none, "Blob not found but referenced from directory entry");
|
||||
|
||||
auto rootDir = dynamic_pointer_move<DirBlob>(*rootBlob);
|
||||
if (rootDir != none) {
|
||||
vector<fspp::Dir::Entry> children;
|
||||
children.reserve((*rootDir)->NumChildren());
|
||||
(*rootDir)->AppendChildrenTo(&children);
|
||||
|
||||
for (const auto& child : children) {
|
||||
auto childEntry = (*rootDir)->GetChild(child.name);
|
||||
ASSERT(childEntry != none, "We just got this from the entry list, it must exist.");
|
||||
auto childId = childEntry->blockId();
|
||||
forEachReachableBlob(blobStore, childId, callbacks);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(misc-no-recursion)
|
||||
void forEachReachableBlockInBlob(DataNodeStore* nodeStore, const BlockId& rootId, const vector<function<void (const BlockId& blockId)>>& callbacks) {
|
||||
for (const auto& callback : callbacks) {
|
||||
callback(rootId);
|
||||
}
|
||||
|
||||
auto node = nodeStore->load(rootId);
|
||||
auto innerNode = dynamic_pointer_move<DataInnerNode>(*node);
|
||||
if (innerNode != none) {
|
||||
for (uint32_t childIndex = 0; childIndex < (*innerNode)->numChildren(); ++childIndex) {
|
||||
auto childId = (*innerNode)->readChild(childIndex).blockId();
|
||||
forEachReachableBlockInBlob(nodeStore, childId, callbacks);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
}
|
@ -1,24 +0,0 @@
|
||||
#pragma once
|
||||
#ifndef CRYFS_STATS_TRAVERSAL_H
|
||||
#define CRYFS_STATS_TRAVERSAL_H
|
||||
|
||||
#include <vector>
|
||||
#include <functional>
|
||||
#include <blockstore/interface/Block.h>
|
||||
#include <blobstore/implementations/onblocks/datanodestore/DataNodeStore.h>
|
||||
#include <cryfs/impl/filesystem/fsblobstore/FsBlobStore.h>
|
||||
|
||||
namespace cryfs_stats {
|
||||
|
||||
// Call the callbacks on each existing block, whether it is connected or orphaned
|
||||
void forEachBlock(blockstore::BlockStore* blockStore, const std::vector<std::function<void (const blockstore::BlockId& blobId)>>& callbacks);
|
||||
|
||||
// Call the callbacks on each existing blob that is reachable from the root blob, i.e. not orphaned
|
||||
void forEachReachableBlob(cryfs::fsblobstore::FsBlobStore* blobStore, const blockstore::BlockId& rootId, const std::vector<std::function<void (const blockstore::BlockId& blobId)>>& callbacks);
|
||||
|
||||
// Call the callbacks on each block that is reachable from the given blob root, i.e. belongs to this blob.
|
||||
void forEachReachableBlockInBlob(blobstore::onblocks::datanodestore::DataNodeStore* nodeStore, const blockstore::BlockId& rootId, const std::vector<std::function<void (const blockstore::BlockId& blockId)>>& callbacks);
|
||||
|
||||
}
|
||||
|
||||
#endif
|
@ -1,16 +0,0 @@
|
||||
if (BUILD_TESTING)
|
||||
include_directories(../src)
|
||||
|
||||
add_subdirectory(my-gtest-main)
|
||||
add_subdirectory(gitversion)
|
||||
add_subdirectory(cpp-utils)
|
||||
if (NOT MSVC)
|
||||
# TODO Make this build on Windows
|
||||
add_subdirectory(fspp)
|
||||
endif()
|
||||
add_subdirectory(parallelaccessstore)
|
||||
add_subdirectory(blockstore)
|
||||
add_subdirectory(blobstore)
|
||||
add_subdirectory(cryfs)
|
||||
add_subdirectory(cryfs-cli)
|
||||
endif(BUILD_TESTING)
|
@ -1,34 +0,0 @@
|
||||
project (blobstore-test)
|
||||
|
||||
set(SOURCES
|
||||
implementations/onblocks/utils/MaxZeroSubtractionTest.cpp
|
||||
implementations/onblocks/utils/CeilDivisionTest.cpp
|
||||
implementations/onblocks/utils/IntPowTest.cpp
|
||||
implementations/onblocks/utils/CeilLogTest.cpp
|
||||
implementations/onblocks/testutils/BlobStoreTest.cpp
|
||||
implementations/onblocks/BlobStoreTest.cpp
|
||||
implementations/onblocks/datanodestore/DataLeafNodeTest.cpp
|
||||
implementations/onblocks/datanodestore/DataInnerNodeTest.cpp
|
||||
implementations/onblocks/datanodestore/DataNodeViewTest.cpp
|
||||
implementations/onblocks/datanodestore/DataNodeStoreTest.cpp
|
||||
implementations/onblocks/datatreestore/testutils/DataTreeTest.cpp
|
||||
implementations/onblocks/datatreestore/impl/GetLowestRightBorderNodeWithMoreThanOneChildOrNullTest.cpp
|
||||
implementations/onblocks/datatreestore/impl/GetLowestInnerRightBorderNodeWithLessThanKChildrenOrNullTest.cpp
|
||||
implementations/onblocks/datatreestore/DataTreeTest_Performance.cpp
|
||||
implementations/onblocks/datatreestore/DataTreeTest_ResizeByTraversing.cpp
|
||||
implementations/onblocks/datatreestore/DataTreeTest_NumStoredBytes.cpp
|
||||
implementations/onblocks/datatreestore/DataTreeTest_ResizeNumBytes.cpp
|
||||
implementations/onblocks/datatreestore/DataTreeStoreTest.cpp
|
||||
implementations/onblocks/datatreestore/LeafTraverserTest.cpp
|
||||
implementations/onblocks/BlobSizeTest.cpp
|
||||
implementations/onblocks/BlobReadWriteTest.cpp
|
||||
implementations/onblocks/BigBlobsTest.cpp
|
||||
|
||||
)
|
||||
|
||||
add_executable(${PROJECT_NAME} ${SOURCES})
|
||||
target_link_libraries(${PROJECT_NAME} my-gtest-main googletest blobstore)
|
||||
add_test(${PROJECT_NAME} ${PROJECT_NAME})
|
||||
|
||||
target_enable_style_warnings(${PROJECT_NAME})
|
||||
target_activate_cpp14(${PROJECT_NAME})
|
@ -1,136 +0,0 @@
|
||||
#include <gtest/gtest.h>
|
||||
#include <blockstore/implementations/compressing/CompressingBlockStore.h>
|
||||
#include <blockstore/implementations/compressing/compressors/RunLengthEncoding.h>
|
||||
#include <blockstore/implementations/inmemory/InMemoryBlockStore2.h>
|
||||
#include <blockstore/implementations/low2highlevel/LowToHighLevelBlockStore.h>
|
||||
#include <cpp-utils/data/DataFixture.h>
|
||||
#include <cpp-utils/data/Data.h>
|
||||
#include "blobstore/implementations/onblocks/BlobStoreOnBlocks.h"
|
||||
#include "blobstore/implementations/onblocks/BlobOnBlocks.h"
|
||||
|
||||
using namespace blobstore;
|
||||
using namespace blobstore::onblocks;
|
||||
using cpputils::unique_ref;
|
||||
using cpputils::make_unique_ref;
|
||||
using cpputils::DataFixture;
|
||||
using cpputils::Data;
|
||||
using blockstore::inmemory::InMemoryBlockStore2;
|
||||
using blockstore::lowtohighlevel::LowToHighLevelBlockStore;
|
||||
using blockstore::compressing::CompressingBlockStore;
|
||||
using blockstore::compressing::RunLengthEncoding;
|
||||
|
||||
// Test cases, ensuring that big blobs (>4G) work (i.e. testing that we don't use any 32bit variables for blob size, etc.)
|
||||
class BigBlobsTest : public ::testing::Test {
|
||||
public:
|
||||
static constexpr size_t BLOCKSIZE = 32 * 1024;
|
||||
static constexpr uint64_t SMALL_BLOB_SIZE = UINT64_C(1024)*1024*1024*3.95; // 3.95 GB (<4GB)
|
||||
static constexpr uint64_t LARGE_BLOB_SIZE = UINT64_C(1024)*1024*1024*4.05; // 4.05 GB (>4GB)
|
||||
|
||||
static constexpr uint64_t max_uint_32 = std::numeric_limits<uint32_t>::max();
|
||||
static_assert(SMALL_BLOB_SIZE < max_uint_32, "LARGE_BLOB_SIZE should need 64bit or the test case is mute");
|
||||
static_assert(LARGE_BLOB_SIZE > max_uint_32, "LARGE_BLOB_SIZE should need 64bit or the test case is mute");
|
||||
|
||||
unique_ref<BlobStore> blobStore = make_unique_ref<BlobStoreOnBlocks>(make_unique_ref<CompressingBlockStore<RunLengthEncoding>>(make_unique_ref<LowToHighLevelBlockStore>(make_unique_ref<InMemoryBlockStore2>())), BLOCKSIZE);
|
||||
unique_ref<Blob> blob = blobStore->create();
|
||||
};
|
||||
|
||||
constexpr size_t BigBlobsTest::BLOCKSIZE;
|
||||
constexpr uint64_t BigBlobsTest::SMALL_BLOB_SIZE;
|
||||
constexpr uint64_t BigBlobsTest::LARGE_BLOB_SIZE;
|
||||
|
||||
TEST_F(BigBlobsTest, Resize) {
|
||||
//These operations are in one test case and not in many small ones, because it takes quite long to create a >4GB blob.
|
||||
|
||||
//Resize to >4GB
|
||||
blob->resize(LARGE_BLOB_SIZE);
|
||||
EXPECT_EQ(LARGE_BLOB_SIZE, blob->size());
|
||||
|
||||
//Grow while >4GB
|
||||
blob->resize(LARGE_BLOB_SIZE + 1024);
|
||||
EXPECT_EQ(LARGE_BLOB_SIZE + 1024, blob->size());
|
||||
|
||||
//Shrink while >4GB
|
||||
blob->resize(LARGE_BLOB_SIZE);
|
||||
EXPECT_EQ(LARGE_BLOB_SIZE, blob->size());
|
||||
|
||||
//Shrink to <4GB
|
||||
blob->resize(SMALL_BLOB_SIZE);
|
||||
EXPECT_EQ(SMALL_BLOB_SIZE, blob->size());
|
||||
|
||||
//Grow to >4GB
|
||||
blob->resize(LARGE_BLOB_SIZE);
|
||||
EXPECT_EQ(LARGE_BLOB_SIZE, blob->size());
|
||||
|
||||
//Flush >4GB blob
|
||||
blob->flush();
|
||||
|
||||
//Destruct >4GB blob
|
||||
auto blockId = blob->blockId();
|
||||
cpputils::destruct(std::move(blob));
|
||||
|
||||
//Load >4GB blob
|
||||
blob = blobStore->load(blockId).value();
|
||||
|
||||
//Remove >4GB blob
|
||||
blobStore->remove(std::move(blob));
|
||||
}
|
||||
|
||||
TEST_F(BigBlobsTest, GrowByWriting_Crossing4GBBorder) {
|
||||
Data fixture = DataFixture::generate(2*(LARGE_BLOB_SIZE-SMALL_BLOB_SIZE));
|
||||
blob->write(fixture.data(), SMALL_BLOB_SIZE, fixture.size());
|
||||
|
||||
EXPECT_EQ(LARGE_BLOB_SIZE+(LARGE_BLOB_SIZE-SMALL_BLOB_SIZE), blob->size());
|
||||
|
||||
Data loaded(fixture.size());
|
||||
blob->read(loaded.data(), SMALL_BLOB_SIZE, loaded.size());
|
||||
EXPECT_EQ(0, std::memcmp(loaded.data(), fixture.data(), loaded.size()));
|
||||
}
|
||||
|
||||
TEST_F(BigBlobsTest, GrowByWriting_Outside4GBBorder_StartingSizeZero) {
|
||||
Data fixture = DataFixture::generate(1024);
|
||||
blob->write(fixture.data(), LARGE_BLOB_SIZE, fixture.size());
|
||||
|
||||
EXPECT_EQ(LARGE_BLOB_SIZE+1024, blob->size());
|
||||
|
||||
Data loaded(fixture.size());
|
||||
blob->read(loaded.data(), LARGE_BLOB_SIZE, loaded.size());
|
||||
EXPECT_EQ(0, std::memcmp(loaded.data(), fixture.data(), loaded.size()));
|
||||
}
|
||||
|
||||
TEST_F(BigBlobsTest, GrowByWriting_Outside4GBBorder_StartingSizeOutside4GBBorder) {
|
||||
blob->resize(LARGE_BLOB_SIZE);
|
||||
Data fixture = DataFixture::generate(1024);
|
||||
blob->write(fixture.data(), LARGE_BLOB_SIZE+1024, fixture.size());
|
||||
|
||||
EXPECT_EQ(LARGE_BLOB_SIZE+2048, blob->size());
|
||||
|
||||
Data loaded(fixture.size());
|
||||
blob->read(loaded.data(), LARGE_BLOB_SIZE+1024, loaded.size());
|
||||
EXPECT_EQ(0, std::memcmp(loaded.data(), fixture.data(), loaded.size()));
|
||||
}
|
||||
|
||||
TEST_F(BigBlobsTest, ReadWriteAfterGrown_Crossing4GBBorder) {
|
||||
blob->resize(LARGE_BLOB_SIZE+(LARGE_BLOB_SIZE-SMALL_BLOB_SIZE)+1024);
|
||||
Data fixture = DataFixture::generate(2*(LARGE_BLOB_SIZE-SMALL_BLOB_SIZE));
|
||||
blob->write(fixture.data(), SMALL_BLOB_SIZE, fixture.size());
|
||||
|
||||
EXPECT_EQ(LARGE_BLOB_SIZE+(LARGE_BLOB_SIZE-SMALL_BLOB_SIZE)+1024, blob->size());
|
||||
|
||||
Data loaded(fixture.size());
|
||||
blob->read(loaded.data(), SMALL_BLOB_SIZE, loaded.size());
|
||||
EXPECT_EQ(0, std::memcmp(loaded.data(), fixture.data(), loaded.size()));
|
||||
}
|
||||
|
||||
TEST_F(BigBlobsTest, ReadWriteAfterGrown_Outside4GBBorder) {
|
||||
blob->resize(LARGE_BLOB_SIZE+2048);
|
||||
Data fixture = DataFixture::generate(1024);
|
||||
blob->write(fixture.data(), LARGE_BLOB_SIZE, fixture.size());
|
||||
|
||||
EXPECT_EQ(LARGE_BLOB_SIZE+2048, blob->size());
|
||||
|
||||
Data loaded(fixture.size());
|
||||
blob->read(loaded.data(), LARGE_BLOB_SIZE, loaded.size());
|
||||
EXPECT_EQ(0, std::memcmp(loaded.data(), fixture.data(), loaded.size()));
|
||||
}
|
||||
|
||||
//TODO Test Blob::readAll (only on 64bit systems)
|
@ -1,256 +0,0 @@
|
||||
#include "testutils/BlobStoreTest.h"
|
||||
#include <cpp-utils/data/Data.h>
|
||||
#include <cpp-utils/data/DataFixture.h>
|
||||
#include "blobstore/implementations/onblocks/datanodestore/DataNodeView.h"
|
||||
|
||||
using cpputils::unique_ref;
|
||||
using ::testing::WithParamInterface;
|
||||
using ::testing::Values;
|
||||
|
||||
using namespace blobstore;
|
||||
using blobstore::onblocks::datanodestore::DataNodeLayout;
|
||||
using blockstore::BlockId;
|
||||
using cpputils::Data;
|
||||
using cpputils::DataFixture;
|
||||
|
||||
namespace {
|
||||
|
||||
class BlobReadWriteTest: public BlobStoreTest {
|
||||
public:
|
||||
static constexpr uint32_t LARGE_SIZE = 10 * 1024 * 1024;
|
||||
static constexpr DataNodeLayout LAYOUT = DataNodeLayout(BLOCKSIZE_BYTES);
|
||||
|
||||
BlobReadWriteTest()
|
||||
:randomData(DataFixture::generate(LARGE_SIZE)),
|
||||
blob(blobStore->create()) {
|
||||
}
|
||||
|
||||
Data readBlob(const Blob &blob) {
|
||||
Data data(blob.size());
|
||||
blob.read(data.data(), 0, data.size());
|
||||
return data;
|
||||
}
|
||||
|
||||
template<class DataClass>
|
||||
void EXPECT_DATA_READS_AS(const DataClass &expected, const Blob &actual, uint64_t offset, uint64_t size) {
|
||||
Data read(size);
|
||||
actual.read(read.data(), offset, size);
|
||||
EXPECT_EQ(0, std::memcmp(expected.data(), read.data(), size));
|
||||
}
|
||||
|
||||
Data randomData;
|
||||
unique_ref<Blob> blob;
|
||||
};
|
||||
constexpr uint32_t BlobReadWriteTest::LARGE_SIZE;
|
||||
constexpr DataNodeLayout BlobReadWriteTest::LAYOUT;
|
||||
|
||||
TEST_F(BlobReadWriteTest, WritingImmediatelyFlushes_SmallSize) {
|
||||
blob->resize(5);
|
||||
blob->write(randomData.data(), 0, 5);
|
||||
auto loaded = loadBlob(blob->blockId());
|
||||
EXPECT_DATA_READS_AS(randomData, *loaded, 0, 5);
|
||||
}
|
||||
|
||||
TEST_F(BlobReadWriteTest, WritingImmediatelyFlushes_LargeSize) {
|
||||
blob->resize(LARGE_SIZE);
|
||||
blob->write(randomData.data(), 0, LARGE_SIZE);
|
||||
auto loaded = loadBlob(blob->blockId());
|
||||
EXPECT_DATA_READS_AS(randomData, *loaded, 0, LARGE_SIZE);
|
||||
}
|
||||
|
||||
// Regression test for a strange bug we had
|
||||
TEST_F(BlobReadWriteTest, WritingCloseTo16ByteLimitDoesntDestroySize) {
|
||||
blob->resize(1);
|
||||
blob->write(randomData.data(), 32776, 4);
|
||||
EXPECT_EQ(32780u, blob->size());
|
||||
}
|
||||
|
||||
TEST_F(BlobReadWriteTest, givenEmptyBlob_whenTryReadInFirstLeaf_thenFails) {
|
||||
Data data(5);
|
||||
size_t read = blob->tryRead(data.data(), 3, 5);
|
||||
EXPECT_EQ(0, read);
|
||||
}
|
||||
|
||||
TEST_F(BlobReadWriteTest, givenEmptyBlob_whenTryReadInLaterLeaf_thenFails) {
|
||||
Data data(5);
|
||||
size_t read = blob->tryRead(data.data(), 2*LAYOUT.maxBytesPerLeaf(), 5);
|
||||
EXPECT_EQ(0, read);
|
||||
}
|
||||
|
||||
TEST_F(BlobReadWriteTest, givenEmptyBlob_whenReadInFirstLeaf_thenFails) {
|
||||
Data data(5);
|
||||
EXPECT_ANY_THROW(
|
||||
blob->read(data.data(), 3, 5)
|
||||
);
|
||||
}
|
||||
|
||||
TEST_F(BlobReadWriteTest, givenEmptyBlob_whenReadInLaterLeaf_thenFails) {
|
||||
Data data(5);
|
||||
EXPECT_ANY_THROW(
|
||||
blob->read(data.data(), 2*LAYOUT.maxBytesPerLeaf(), 5)
|
||||
);
|
||||
}
|
||||
|
||||
TEST_F(BlobReadWriteTest, givenEmptyBlob_whenReadAll_thenReturnsZeroSizedData) {
|
||||
Data data = blob->readAll();
|
||||
EXPECT_EQ(0, data.size());
|
||||
}
|
||||
|
||||
TEST_F(BlobReadWriteTest, givenEmptyBlob_whenWrite_thenGrows) {
|
||||
Data data(5);
|
||||
blob->write(data.data(), 4, 5);
|
||||
EXPECT_EQ(9, blob->size());
|
||||
}
|
||||
|
||||
TEST_F(BlobReadWriteTest, givenEmptyBlob_whenWriteZeroBytes_thenDoesntGrow) {
|
||||
Data data(5);
|
||||
blob->write(data.data(), 4, 0);
|
||||
EXPECT_EQ(0, blob->size());;
|
||||
}
|
||||
|
||||
TEST_F(BlobReadWriteTest, givenBlobResizedToZero_whenTryReadInFirstLeaf_thenFails) {
|
||||
Data data(5);
|
||||
size_t read = blob->tryRead(data.data(), 3, 5);
|
||||
EXPECT_EQ(0, read);
|
||||
}
|
||||
|
||||
TEST_F(BlobReadWriteTest, givenBlobResizedToZero_whenTryReadInLaterLeaf_thenFails) {
|
||||
Data data(5);
|
||||
size_t read = blob->tryRead(data.data(), 2*LAYOUT.maxBytesPerLeaf(), 5);
|
||||
EXPECT_EQ(0, read);
|
||||
}
|
||||
|
||||
TEST_F(BlobReadWriteTest, givenBlobResizedToZero_whenReadInFirstLeaf_thenFails) {
|
||||
Data data(5);
|
||||
EXPECT_ANY_THROW(
|
||||
blob->read(data.data(), 3, 5)
|
||||
);
|
||||
}
|
||||
|
||||
TEST_F(BlobReadWriteTest, givenBlobResizedToZero_whenReadInLaterLeaf_thenFails) {
|
||||
Data data(5);
|
||||
EXPECT_ANY_THROW(
|
||||
blob->read(data.data(), 2*LAYOUT.maxBytesPerLeaf(), 5)
|
||||
);
|
||||
}
|
||||
|
||||
TEST_F(BlobReadWriteTest, givenBlobResizedToZero_whenReadAll_thenReturnsZeroSizedData) {
|
||||
Data data = blob->readAll();
|
||||
EXPECT_EQ(0, data.size());
|
||||
}
|
||||
|
||||
TEST_F(BlobReadWriteTest, givenBlobResizedToZero_whenWrite_thenGrows) {
|
||||
Data data(5);
|
||||
blob->write(data.data(), 4, 5);
|
||||
EXPECT_EQ(9, blob->size());
|
||||
}
|
||||
|
||||
TEST_F(BlobReadWriteTest, givenBlobResizedToZero_whenWriteZeroBytes_thenDoesntGrow) {
|
||||
Data data(5);
|
||||
blob->write(data.data(), 4, 0);
|
||||
EXPECT_EQ(0, blob->size());
|
||||
}
|
||||
|
||||
struct DataRange {
|
||||
uint64_t blobsize;
|
||||
uint64_t offset;
|
||||
uint64_t count;
|
||||
};
|
||||
class BlobReadWriteDataTest: public BlobReadWriteTest, public WithParamInterface<DataRange> {
|
||||
public:
|
||||
Data foregroundData;
|
||||
Data backgroundData;
|
||||
|
||||
BlobReadWriteDataTest()
|
||||
: foregroundData(DataFixture::generate(GetParam().count, 0)),
|
||||
backgroundData(DataFixture::generate(GetParam().blobsize, 1)) {
|
||||
}
|
||||
|
||||
template<class DataClass>
|
||||
void EXPECT_DATA_READS_AS_OUTSIDE_OF(const DataClass &expected, const Blob &blob, uint64_t start, uint64_t count) {
|
||||
Data begin(start);
|
||||
Data end(GetParam().blobsize - count - start);
|
||||
|
||||
std::memcpy(begin.data(), expected.data(), start);
|
||||
std::memcpy(end.data(), expected.dataOffset(start+count), end.size());
|
||||
|
||||
EXPECT_DATA_READS_AS(begin, blob, 0, start);
|
||||
EXPECT_DATA_READS_AS(end, blob, start + count, end.size());
|
||||
}
|
||||
|
||||
void EXPECT_DATA_IS_ZEROES_OUTSIDE_OF(const Blob &blob, uint64_t start, uint64_t count) {
|
||||
Data ZEROES(GetParam().blobsize);
|
||||
ZEROES.FillWithZeroes();
|
||||
EXPECT_DATA_READS_AS_OUTSIDE_OF(ZEROES, blob, start, count);
|
||||
}
|
||||
};
|
||||
INSTANTIATE_TEST_SUITE_P(BlobReadWriteDataTest, BlobReadWriteDataTest, Values(
|
||||
//Blob with only one leaf
|
||||
DataRange{BlobReadWriteDataTest::LAYOUT.maxBytesPerLeaf(), 0, BlobReadWriteDataTest::LAYOUT.maxBytesPerLeaf()}, // full size leaf, access beginning to end
|
||||
DataRange{BlobReadWriteDataTest::LAYOUT.maxBytesPerLeaf(), 100, BlobReadWriteDataTest::LAYOUT.maxBytesPerLeaf()-200}, // full size leaf, access middle to middle
|
||||
DataRange{BlobReadWriteDataTest::LAYOUT.maxBytesPerLeaf(), 0, BlobReadWriteDataTest::LAYOUT.maxBytesPerLeaf()-100}, // full size leaf, access beginning to middle
|
||||
DataRange{BlobReadWriteDataTest::LAYOUT.maxBytesPerLeaf(), 100, BlobReadWriteDataTest::LAYOUT.maxBytesPerLeaf()-100}, // full size leaf, access middle to end
|
||||
DataRange{BlobReadWriteDataTest::LAYOUT.maxBytesPerLeaf()-100, 0, BlobReadWriteDataTest::LAYOUT.maxBytesPerLeaf()-100}, // non-full size leaf, access beginning to end
|
||||
DataRange{BlobReadWriteDataTest::LAYOUT.maxBytesPerLeaf()-100, 100, BlobReadWriteDataTest::LAYOUT.maxBytesPerLeaf()-300}, // non-full size leaf, access middle to middle
|
||||
DataRange{BlobReadWriteDataTest::LAYOUT.maxBytesPerLeaf()-100, 0, BlobReadWriteDataTest::LAYOUT.maxBytesPerLeaf()-200}, // non-full size leaf, access beginning to middle
|
||||
DataRange{BlobReadWriteDataTest::LAYOUT.maxBytesPerLeaf()-100, 100, BlobReadWriteDataTest::LAYOUT.maxBytesPerLeaf()-200}, // non-full size leaf, access middle to end
|
||||
//Larger blob
|
||||
DataRange{BlobReadWriteDataTest::LARGE_SIZE, 0, BlobReadWriteDataTest::LARGE_SIZE}, // access beginning to end
|
||||
DataRange{BlobReadWriteDataTest::LARGE_SIZE, 100, BlobReadWriteDataTest::LARGE_SIZE-200}, // access middle first leaf to middle last leaf
|
||||
DataRange{BlobReadWriteDataTest::LARGE_SIZE, 0, BlobReadWriteDataTest::LARGE_SIZE-100}, // access beginning to middle last leaf
|
||||
DataRange{BlobReadWriteDataTest::LARGE_SIZE, 100, BlobReadWriteDataTest::LARGE_SIZE-100}, // access middle first leaf to end
|
||||
DataRange{BlobReadWriteDataTest::LARGE_SIZE, BlobReadWriteDataTest::LARGE_SIZE*1/3, BlobReadWriteDataTest::LARGE_SIZE*1/3}, // access middle to middle
|
||||
DataRange{BlobReadWriteDataTest::LARGE_SIZE, 0, BlobReadWriteDataTest::LARGE_SIZE*2/3}, // access beginning to middle
|
||||
DataRange{BlobReadWriteDataTest::LARGE_SIZE, BlobReadWriteDataTest::LARGE_SIZE*1/3, BlobReadWriteDataTest::LARGE_SIZE*2/3} // access middle to end
|
||||
));
|
||||
|
||||
TEST_P(BlobReadWriteDataTest, WritingDoesntChangeSize) {
|
||||
blob->resize(GetParam().blobsize);
|
||||
blob->write(this->foregroundData.data(), GetParam().offset, GetParam().count);
|
||||
EXPECT_EQ(GetParam().blobsize, blob->size());
|
||||
}
|
||||
|
||||
TEST_P(BlobReadWriteDataTest, WriteAndReadImmediately) {
|
||||
blob->resize(GetParam().blobsize);
|
||||
blob->write(this->foregroundData.data(), GetParam().offset, GetParam().count);
|
||||
|
||||
EXPECT_DATA_READS_AS(this->foregroundData, *blob, GetParam().offset, GetParam().count);
|
||||
EXPECT_DATA_IS_ZEROES_OUTSIDE_OF(*blob, GetParam().offset, GetParam().count);
|
||||
}
|
||||
|
||||
TEST_P(BlobReadWriteDataTest, WriteAndReadAfterLoading) {
|
||||
blob->resize(GetParam().blobsize);
|
||||
blob->write(this->foregroundData.data(), GetParam().offset, GetParam().count);
|
||||
auto loaded = loadBlob(blob->blockId());
|
||||
|
||||
EXPECT_DATA_READS_AS(this->foregroundData, *loaded, GetParam().offset, GetParam().count);
|
||||
EXPECT_DATA_IS_ZEROES_OUTSIDE_OF(*loaded, GetParam().offset, GetParam().count);
|
||||
}
|
||||
|
||||
TEST_P(BlobReadWriteDataTest, OverwriteAndRead) {
|
||||
blob->resize(GetParam().blobsize);
|
||||
blob->write(this->backgroundData.data(), 0, GetParam().blobsize);
|
||||
blob->write(this->foregroundData.data(), GetParam().offset, GetParam().count);
|
||||
EXPECT_DATA_READS_AS(this->foregroundData, *blob, GetParam().offset, GetParam().count);
|
||||
EXPECT_DATA_READS_AS_OUTSIDE_OF(this->backgroundData, *blob, GetParam().offset, GetParam().count);
|
||||
}
|
||||
|
||||
TEST_P(BlobReadWriteDataTest, WriteWholeAndReadPart) {
|
||||
blob->resize(GetParam().blobsize);
|
||||
blob->write(this->backgroundData.data(), 0, GetParam().blobsize);
|
||||
Data read(GetParam().count);
|
||||
blob->read(read.data(), GetParam().offset, GetParam().count);
|
||||
EXPECT_EQ(0, std::memcmp(read.data(), this->backgroundData.dataOffset(GetParam().offset), GetParam().count));
|
||||
}
|
||||
|
||||
TEST_P(BlobReadWriteDataTest, WritePartAndReadWhole) {
|
||||
blob->resize(GetParam().blobsize);
|
||||
blob->write(this->backgroundData.data(), 0, GetParam().blobsize);
|
||||
blob->write(this->foregroundData.data(), GetParam().offset, GetParam().count);
|
||||
Data read = readBlob(*blob);
|
||||
EXPECT_EQ(0, std::memcmp(read.data(), this->backgroundData.data(), GetParam().offset));
|
||||
EXPECT_EQ(0, std::memcmp(read.dataOffset(GetParam().offset), this->foregroundData.data(), GetParam().count));
|
||||
EXPECT_EQ(0, std::memcmp(read.dataOffset(GetParam().offset+GetParam().count), this->backgroundData.dataOffset(GetParam().offset+GetParam().count), GetParam().blobsize-GetParam().count-GetParam().offset));
|
||||
}
|
||||
|
||||
}
|
@ -1,172 +0,0 @@
|
||||
#include "testutils/BlobStoreTest.h"
|
||||
#include <cpp-utils/data/Data.h>
|
||||
#include <cpp-utils/data/DataFixture.h>
|
||||
|
||||
using namespace blobstore;
|
||||
using blockstore::BlockId;
|
||||
using cpputils::Data;
|
||||
using cpputils::DataFixture;
|
||||
using cpputils::unique_ref;
|
||||
|
||||
class BlobSizeTest: public BlobStoreTest {
|
||||
public:
|
||||
BlobSizeTest(): blob(blobStore->create()) {}
|
||||
|
||||
static constexpr uint32_t MEDIUM_SIZE = 5 * 1024 * 1024;
|
||||
static constexpr uint32_t LARGE_SIZE = 10 * 1024 * 1024;
|
||||
|
||||
unique_ref<Blob> blob;
|
||||
};
|
||||
constexpr uint32_t BlobSizeTest::MEDIUM_SIZE;
|
||||
constexpr uint32_t BlobSizeTest::LARGE_SIZE;
|
||||
|
||||
TEST_F(BlobSizeTest, CreatedBlobIsEmpty) {
|
||||
EXPECT_EQ(0u, blob->size());
|
||||
}
|
||||
|
||||
TEST_F(BlobSizeTest, Growing_1Byte) {
|
||||
blob->resize(1);
|
||||
EXPECT_EQ(1u, blob->size());
|
||||
}
|
||||
|
||||
TEST_F(BlobSizeTest, Growing_Large) {
|
||||
blob->resize(LARGE_SIZE);
|
||||
EXPECT_EQ(LARGE_SIZE, blob->size());
|
||||
}
|
||||
|
||||
TEST_F(BlobSizeTest, Shrinking_Empty) {
|
||||
blob->resize(LARGE_SIZE);
|
||||
blob->resize(0);
|
||||
EXPECT_EQ(0u, blob->size());
|
||||
}
|
||||
|
||||
TEST_F(BlobSizeTest, Shrinking_1Byte) {
|
||||
blob->resize(LARGE_SIZE);
|
||||
blob->resize(1);
|
||||
EXPECT_EQ(1u, blob->size());
|
||||
}
|
||||
|
||||
TEST_F(BlobSizeTest, ResizingToItself_Empty) {
|
||||
blob->resize(0);
|
||||
EXPECT_EQ(0u, blob->size());
|
||||
}
|
||||
|
||||
TEST_F(BlobSizeTest, ResizingToItself_1Byte) {
|
||||
blob->resize(1);
|
||||
blob->resize(1);
|
||||
EXPECT_EQ(1u, blob->size());
|
||||
}
|
||||
|
||||
TEST_F(BlobSizeTest, ResizingToItself_Large) {
|
||||
blob->resize(LARGE_SIZE);
|
||||
blob->resize(LARGE_SIZE);
|
||||
EXPECT_EQ(LARGE_SIZE, blob->size());
|
||||
}
|
||||
|
||||
TEST_F(BlobSizeTest, EmptyBlobStaysEmptyWhenLoading) {
|
||||
BlockId blockId = blob->blockId();
|
||||
reset(std::move(blob));
|
||||
auto loaded = loadBlob(blockId);
|
||||
EXPECT_EQ(0u, loaded->size());
|
||||
}
|
||||
|
||||
TEST_F(BlobSizeTest, BlobSizeStaysIntactWhenLoading) {
|
||||
blob->resize(LARGE_SIZE);
|
||||
BlockId blockId = blob->blockId();
|
||||
reset(std::move(blob));
|
||||
auto loaded = loadBlob(blockId);
|
||||
EXPECT_EQ(LARGE_SIZE, loaded->size());
|
||||
}
|
||||
|
||||
TEST_F(BlobSizeTest, WritingAtEndOfBlobGrowsBlob_Empty) {
|
||||
int value = 0;
|
||||
blob->write(&value, 0, 4);
|
||||
EXPECT_EQ(4u, blob->size());
|
||||
}
|
||||
|
||||
TEST_F(BlobSizeTest, WritingAfterEndOfBlobGrowsBlob_Empty) {
|
||||
int value = 0;
|
||||
blob->write(&value, 2, 4);
|
||||
EXPECT_EQ(6u, blob->size());
|
||||
}
|
||||
|
||||
TEST_F(BlobSizeTest, WritingOverEndOfBlobGrowsBlob_NonEmpty) {
|
||||
blob->resize(1);
|
||||
int value = 0;
|
||||
blob->write(&value, 0, 4);
|
||||
EXPECT_EQ(4u, blob->size());
|
||||
}
|
||||
|
||||
TEST_F(BlobSizeTest, WritingAtEndOfBlobGrowsBlob_NonEmpty) {
|
||||
blob->resize(1);
|
||||
int value = 0;
|
||||
blob->write(&value, 1, 4);
|
||||
EXPECT_EQ(5u, blob->size());
|
||||
}
|
||||
|
||||
TEST_F(BlobSizeTest, WritingAfterEndOfBlobGrowsBlob_NonEmpty) {
|
||||
blob->resize(1);
|
||||
int value = 0;
|
||||
blob->write(&value, 2, 4);
|
||||
EXPECT_EQ(6u, blob->size());
|
||||
}
|
||||
|
||||
TEST_F(BlobSizeTest, ChangingSizeImmediatelyFlushes) {
|
||||
blob->resize(LARGE_SIZE);
|
||||
auto loaded = loadBlob(blob->blockId());
|
||||
EXPECT_EQ(LARGE_SIZE, loaded->size());
|
||||
}
|
||||
|
||||
class BlobSizeDataTest: public BlobSizeTest {
|
||||
public:
|
||||
BlobSizeDataTest()
|
||||
:ZEROES(LARGE_SIZE),
|
||||
randomData(DataFixture::generate(LARGE_SIZE)) {
|
||||
ZEROES.FillWithZeroes();
|
||||
}
|
||||
|
||||
Data readBlob(const Blob &blob) {
|
||||
Data data(blob.size());
|
||||
blob.read(data.data(), 0, data.size());
|
||||
return data;
|
||||
}
|
||||
|
||||
Data ZEROES;
|
||||
Data randomData;
|
||||
};
|
||||
|
||||
TEST_F(BlobSizeDataTest, BlobIsZeroedOutAfterGrowing) {
|
||||
//uint32_t LARGE_SIZE = 2*1024*1024;
|
||||
blob->resize(LARGE_SIZE);
|
||||
EXPECT_EQ(0, std::memcmp(readBlob(*blob).data(), ZEROES.data(), LARGE_SIZE));
|
||||
}
|
||||
|
||||
TEST_F(BlobSizeDataTest, BlobIsZeroedOutAfterGrowingAndLoading) {
|
||||
blob->resize(LARGE_SIZE);
|
||||
auto loaded = loadBlob(blob->blockId());
|
||||
EXPECT_EQ(0, std::memcmp(readBlob(*loaded).data(), ZEROES.data(), LARGE_SIZE));
|
||||
}
|
||||
|
||||
TEST_F(BlobSizeDataTest, DataStaysIntactWhenGrowing) {
|
||||
blob->resize(MEDIUM_SIZE);
|
||||
blob->write(randomData.data(), 0, MEDIUM_SIZE);
|
||||
blob->resize(LARGE_SIZE);
|
||||
EXPECT_EQ(0, std::memcmp(readBlob(*blob).data(), randomData.data(), MEDIUM_SIZE));
|
||||
EXPECT_EQ(0, std::memcmp(readBlob(*blob).dataOffset(MEDIUM_SIZE), ZEROES.data(), LARGE_SIZE-MEDIUM_SIZE));
|
||||
}
|
||||
|
||||
TEST_F(BlobSizeDataTest, DataStaysIntactWhenShrinking) {
|
||||
blob->resize(LARGE_SIZE);
|
||||
blob->write(randomData.data(), 0, LARGE_SIZE);
|
||||
blob->resize(MEDIUM_SIZE);
|
||||
EXPECT_EQ(0, std::memcmp(readBlob(*blob).data(), randomData.data(), MEDIUM_SIZE));
|
||||
}
|
||||
|
||||
TEST_F(BlobSizeDataTest, ChangedAreaIsZeroedOutWhenShrinkingAndRegrowing) {
|
||||
blob->resize(LARGE_SIZE);
|
||||
blob->write(randomData.data(), 0, LARGE_SIZE);
|
||||
blob->resize(MEDIUM_SIZE);
|
||||
blob->resize(LARGE_SIZE);
|
||||
EXPECT_EQ(0, std::memcmp(readBlob(*blob).data(), randomData.data(), MEDIUM_SIZE));
|
||||
EXPECT_EQ(0, std::memcmp(readBlob(*blob).dataOffset(MEDIUM_SIZE), ZEROES.data(), LARGE_SIZE-MEDIUM_SIZE));
|
||||
}
|
@ -1,43 +0,0 @@
|
||||
#include "testutils/BlobStoreTest.h"
|
||||
#include <cpp-utils/pointer/unique_ref_boost_optional_gtest_workaround.h>
|
||||
|
||||
using blockstore::BlockId;
|
||||
using boost::none;
|
||||
|
||||
TEST_F(BlobStoreTest, LoadNonexistingKeyOnEmptyBlobstore) {
|
||||
const blockstore::BlockId blockId = blockstore::BlockId::FromString("1491BB4932A389EE14BC7090AC772972");
|
||||
EXPECT_EQ(none, blobStore->load(blockId));
|
||||
}
|
||||
|
||||
TEST_F(BlobStoreTest, LoadNonexistingKeyOnNonEmptyBlobstore) {
|
||||
blobStore->create();
|
||||
const blockstore::BlockId blockId = blockstore::BlockId::FromString("1491BB4932A389EE14BC7090AC772972");
|
||||
EXPECT_EQ(none, blobStore->load(blockId));
|
||||
}
|
||||
|
||||
TEST_F(BlobStoreTest, TwoCreatedBlobsHaveDifferentKeys) {
|
||||
auto blob1 = blobStore->create();
|
||||
auto blob2 = blobStore->create();
|
||||
EXPECT_NE(blob1->blockId(), blob2->blockId());
|
||||
}
|
||||
|
||||
TEST_F(BlobStoreTest, BlobIsNotLoadableAfterDeletion_DeleteDirectly) {
|
||||
auto blob = blobStore->create();
|
||||
BlockId blockId = blob->blockId();
|
||||
blobStore->remove(std::move(blob));
|
||||
EXPECT_FALSE(static_cast<bool>(blobStore->load(blockId)));
|
||||
}
|
||||
|
||||
TEST_F(BlobStoreTest, BlobIsNotLoadableAfterDeletion_DeleteByKey) {
|
||||
auto blockId = blobStore->create()->blockId();
|
||||
blobStore->remove(blockId);
|
||||
EXPECT_FALSE(static_cast<bool>(blobStore->load(blockId)));
|
||||
}
|
||||
|
||||
TEST_F(BlobStoreTest, BlobIsNotLoadableAfterDeletion_DeleteAfterLoading) {
|
||||
auto blob = blobStore->create();
|
||||
BlockId blockId = blob->blockId();
|
||||
reset(std::move(blob));
|
||||
blobStore->remove(loadBlob(blockId));
|
||||
EXPECT_FALSE(static_cast<bool>(blobStore->load(blockId)));
|
||||
}
|
@ -1,231 +0,0 @@
|
||||
#include <gtest/gtest.h>
|
||||
|
||||
#include "blobstore/implementations/onblocks/datanodestore/DataInnerNode.h"
|
||||
#include "blobstore/implementations/onblocks/datanodestore/DataLeafNode.h"
|
||||
#include "blobstore/implementations/onblocks/datanodestore/DataNodeStore.h"
|
||||
|
||||
#include <blockstore/implementations/testfake/FakeBlockStore.h>
|
||||
#include <blockstore/implementations/testfake/FakeBlock.h>
|
||||
|
||||
#include <memory>
|
||||
#include <cpp-utils/pointer/cast.h>
|
||||
|
||||
using ::testing::Test;
|
||||
|
||||
using cpputils::dynamic_pointer_move;
|
||||
|
||||
using blockstore::BlockId;
|
||||
using blockstore::testfake::FakeBlockStore;
|
||||
using blockstore::BlockStore;
|
||||
using cpputils::Data;
|
||||
using namespace blobstore;
|
||||
using namespace blobstore::onblocks;
|
||||
using namespace blobstore::onblocks::datanodestore;
|
||||
|
||||
using cpputils::unique_ref;
|
||||
using cpputils::make_unique_ref;
|
||||
using std::vector;
|
||||
|
||||
class DataInnerNodeTest: public Test {
|
||||
public:
|
||||
static constexpr uint32_t BLOCKSIZE_BYTES = 1024;
|
||||
|
||||
DataInnerNodeTest() :
|
||||
_blockStore(make_unique_ref<FakeBlockStore>()),
|
||||
blockStore(_blockStore.get()),
|
||||
nodeStore(make_unique_ref<DataNodeStore>(std::move(_blockStore), BLOCKSIZE_BYTES)),
|
||||
ZEROES(nodeStore->layout().maxBytesPerLeaf()),
|
||||
leaf(nodeStore->createNewLeafNode(Data(0))),
|
||||
node(nodeStore->createNewInnerNode(1, {leaf->blockId()})) {
|
||||
|
||||
ZEROES.FillWithZeroes();
|
||||
}
|
||||
|
||||
unique_ref<DataInnerNode> LoadInnerNode(const BlockId &blockId) {
|
||||
auto node = nodeStore->load(blockId).value();
|
||||
return dynamic_pointer_move<DataInnerNode>(node).value();
|
||||
}
|
||||
|
||||
BlockId CreateNewInnerNodeReturnKey(const DataNode &firstChild) {
|
||||
return nodeStore->createNewInnerNode(firstChild.depth()+1, {firstChild.blockId()})->blockId();
|
||||
}
|
||||
|
||||
unique_ref<DataInnerNode> CreateNewInnerNode() {
|
||||
auto new_leaf = nodeStore->createNewLeafNode(Data(0));
|
||||
return nodeStore->createNewInnerNode(1, {new_leaf->blockId()});
|
||||
}
|
||||
|
||||
unique_ref<DataInnerNode> CreateAndLoadNewInnerNode(const DataNode &firstChild) {
|
||||
auto blockId = CreateNewInnerNodeReturnKey(firstChild);
|
||||
return LoadInnerNode(blockId);
|
||||
}
|
||||
|
||||
unique_ref<DataInnerNode> CreateNewInnerNode(uint8_t depth, const vector<blockstore::BlockId> &children) {
|
||||
return nodeStore->createNewInnerNode(depth, children);
|
||||
}
|
||||
|
||||
BlockId CreateNewInnerNodeReturnKey(uint8_t depth, const vector<blockstore::BlockId> &children) {
|
||||
return CreateNewInnerNode(depth, children)->blockId();
|
||||
}
|
||||
|
||||
unique_ref<DataInnerNode> CreateAndLoadNewInnerNode(uint8_t depth, const vector<blockstore::BlockId> &children) {
|
||||
auto blockId = CreateNewInnerNodeReturnKey(depth, children);
|
||||
return LoadInnerNode(blockId);
|
||||
}
|
||||
|
||||
BlockId AddALeafTo(DataInnerNode *node) {
|
||||
auto leaf2 = nodeStore->createNewLeafNode(Data(0));
|
||||
node->addChild(*leaf2);
|
||||
return leaf2->blockId();
|
||||
}
|
||||
|
||||
BlockId CreateNodeWithDataConvertItToInnerNodeAndReturnKey() {
|
||||
auto node = CreateNewInnerNode();
|
||||
AddALeafTo(node.get());
|
||||
AddALeafTo(node.get());
|
||||
auto child = nodeStore->createNewLeafNode(Data(0));
|
||||
unique_ref<DataInnerNode> converted = DataNode::convertToNewInnerNode(std::move(node), nodeStore->layout(), *child);
|
||||
return converted->blockId();
|
||||
}
|
||||
|
||||
unique_ref<DataInnerNode> CopyInnerNode(const DataInnerNode &node) {
|
||||
auto copied = nodeStore->createNewNodeAsCopyFrom(node);
|
||||
return dynamic_pointer_move<DataInnerNode>(copied).value();
|
||||
}
|
||||
|
||||
BlockId InitializeInnerNodeAddLeafReturnKey() {
|
||||
auto node = DataInnerNode::CreateNewNode(blockStore, nodeStore->layout(), 1, {leaf->blockId()});
|
||||
AddALeafTo(node.get());
|
||||
return node->blockId();
|
||||
}
|
||||
|
||||
unique_ref<BlockStore> _blockStore;
|
||||
BlockStore *blockStore;
|
||||
unique_ref<DataNodeStore> nodeStore;
|
||||
Data ZEROES;
|
||||
unique_ref<DataLeafNode> leaf;
|
||||
unique_ref<DataInnerNode> node;
|
||||
|
||||
private:
|
||||
|
||||
DISALLOW_COPY_AND_ASSIGN(DataInnerNodeTest);
|
||||
};
|
||||
|
||||
constexpr uint32_t DataInnerNodeTest::BLOCKSIZE_BYTES;
|
||||
|
||||
TEST_F(DataInnerNodeTest, CorrectKeyReturnedAfterLoading) {
|
||||
BlockId blockId = DataInnerNode::CreateNewNode(blockStore, nodeStore->layout(), 1, {leaf->blockId()})->blockId();
|
||||
|
||||
auto loaded = nodeStore->load(blockId).value();
|
||||
EXPECT_EQ(blockId, loaded->blockId());
|
||||
}
|
||||
|
||||
TEST_F(DataInnerNodeTest, InitializesCorrectly) {
|
||||
auto node = DataInnerNode::CreateNewNode(blockStore, nodeStore->layout(), 1, {leaf->blockId()});
|
||||
|
||||
EXPECT_EQ(1u, node->numChildren());
|
||||
EXPECT_EQ(leaf->blockId(), node->readChild(0).blockId());
|
||||
}
|
||||
|
||||
TEST_F(DataInnerNodeTest, ReinitializesCorrectly) {
|
||||
auto blockId = DataLeafNode::CreateNewNode(blockStore, nodeStore->layout(), Data(0))->blockId();
|
||||
auto node = DataInnerNode::InitializeNewNode(blockStore->load(blockId).value(), nodeStore->layout(), 1, {leaf->blockId()});
|
||||
|
||||
EXPECT_EQ(1u, node->numChildren());
|
||||
EXPECT_EQ(leaf->blockId(), node->readChild(0).blockId());
|
||||
}
|
||||
|
||||
TEST_F(DataInnerNodeTest, IsCorrectlyInitializedAfterLoading) {
|
||||
auto loaded = CreateAndLoadNewInnerNode(*leaf);
|
||||
|
||||
EXPECT_EQ(1u, loaded->numChildren());
|
||||
EXPECT_EQ(leaf->blockId(), loaded->readChild(0).blockId());
|
||||
}
|
||||
|
||||
TEST_F(DataInnerNodeTest, AddingASecondLeaf) {
|
||||
BlockId leaf2_blockId = AddALeafTo(node.get());
|
||||
|
||||
EXPECT_EQ(2u, node->numChildren());
|
||||
EXPECT_EQ(leaf->blockId(), node->readChild(0).blockId());
|
||||
EXPECT_EQ(leaf2_blockId, node->readChild(1).blockId());
|
||||
}
|
||||
|
||||
TEST_F(DataInnerNodeTest, AddingASecondLeafAndReload) {
|
||||
auto leaf2 = nodeStore->createNewLeafNode(Data(0));
|
||||
auto loaded = CreateAndLoadNewInnerNode(1, {leaf->blockId(), leaf2->blockId()});
|
||||
|
||||
EXPECT_EQ(2u, loaded->numChildren());
|
||||
EXPECT_EQ(leaf->blockId(), loaded->readChild(0).blockId());
|
||||
EXPECT_EQ(leaf2->blockId(), loaded->readChild(1).blockId());
|
||||
}
|
||||
|
||||
TEST_F(DataInnerNodeTest, BuildingAThreeLevelTree) {
|
||||
auto node2 = CreateNewInnerNode();
|
||||
auto parent = CreateNewInnerNode(node->depth()+1, {node->blockId(), node2->blockId()});
|
||||
|
||||
EXPECT_EQ(2u, parent->numChildren());
|
||||
EXPECT_EQ(node->blockId(), parent->readChild(0).blockId());
|
||||
EXPECT_EQ(node2->blockId(), parent->readChild(1).blockId());
|
||||
}
|
||||
|
||||
TEST_F(DataInnerNodeTest, BuildingAThreeLevelTreeAndReload) {
|
||||
auto node2 = CreateNewInnerNode();
|
||||
auto parent = CreateAndLoadNewInnerNode(node->depth()+1, {node->blockId(), node2->blockId()});
|
||||
|
||||
EXPECT_EQ(2u, parent->numChildren());
|
||||
EXPECT_EQ(node->blockId(), parent->readChild(0).blockId());
|
||||
EXPECT_EQ(node2->blockId(), parent->readChild(1).blockId());
|
||||
}
|
||||
|
||||
TEST_F(DataInnerNodeTest, ConvertToInternalNode) {
|
||||
auto child = nodeStore->createNewLeafNode(Data(0));
|
||||
BlockId node_blockId = node->blockId();
|
||||
unique_ref<DataInnerNode> converted = DataNode::convertToNewInnerNode(std::move(node), nodeStore->layout(), *child);
|
||||
|
||||
EXPECT_EQ(1u, converted->numChildren());
|
||||
EXPECT_EQ(child->blockId(), converted->readChild(0).blockId());
|
||||
EXPECT_EQ(node_blockId, converted->blockId());
|
||||
}
|
||||
|
||||
TEST_F(DataInnerNodeTest, ConvertToInternalNodeZeroesOutChildrenRegion) {
|
||||
BlockId blockId = CreateNodeWithDataConvertItToInnerNodeAndReturnKey();
|
||||
|
||||
auto block = blockStore->load(blockId).value();
|
||||
EXPECT_EQ(0, std::memcmp(ZEROES.data(), static_cast<const uint8_t*>(block->data())+DataNodeLayout::HEADERSIZE_BYTES+sizeof(DataInnerNode::ChildEntry), nodeStore->layout().maxBytesPerLeaf()-sizeof(DataInnerNode::ChildEntry)));
|
||||
}
|
||||
|
||||
TEST_F(DataInnerNodeTest, CopyingCreatesNewNode) {
|
||||
auto copied = CopyInnerNode(*node);
|
||||
EXPECT_NE(node->blockId(), copied->blockId());
|
||||
}
|
||||
|
||||
TEST_F(DataInnerNodeTest, CopyInnerNodeWithOneChild) {
|
||||
auto copied = CopyInnerNode(*node);
|
||||
|
||||
EXPECT_EQ(node->numChildren(), copied->numChildren());
|
||||
EXPECT_EQ(node->readChild(0).blockId(), copied->readChild(0).blockId());
|
||||
}
|
||||
|
||||
TEST_F(DataInnerNodeTest, CopyInnerNodeWithTwoChildren) {
|
||||
AddALeafTo(node.get());
|
||||
auto copied = CopyInnerNode(*node);
|
||||
|
||||
EXPECT_EQ(node->numChildren(), copied->numChildren());
|
||||
EXPECT_EQ(node->readChild(0).blockId(), copied->readChild(0).blockId());
|
||||
EXPECT_EQ(node->readChild(1).blockId(), copied->readChild(1).blockId());
|
||||
}
|
||||
|
||||
TEST_F(DataInnerNodeTest, LastChildWhenOneChild) {
|
||||
EXPECT_EQ(leaf->blockId(), node->readLastChild().blockId());
|
||||
}
|
||||
|
||||
TEST_F(DataInnerNodeTest, LastChildWhenTwoChildren) {
|
||||
BlockId blockId = AddALeafTo(node.get());
|
||||
EXPECT_EQ(blockId, node->readLastChild().blockId());
|
||||
}
|
||||
|
||||
TEST_F(DataInnerNodeTest, LastChildWhenThreeChildren) {
|
||||
AddALeafTo(node.get());
|
||||
BlockId blockId = AddALeafTo(node.get());
|
||||
EXPECT_EQ(blockId, node->readLastChild().blockId());
|
||||
}
|
@ -1,345 +0,0 @@
|
||||
#include "blobstore/implementations/onblocks/datanodestore/DataLeafNode.h"
|
||||
#include "blobstore/implementations/onblocks/datanodestore/DataInnerNode.h"
|
||||
#include "blobstore/implementations/onblocks/datanodestore/DataNodeStore.h"
|
||||
#include "blobstore/implementations/onblocks/BlobStoreOnBlocks.h"
|
||||
#include <gtest/gtest.h>
|
||||
|
||||
#include <cpp-utils/pointer/cast.h>
|
||||
|
||||
#include <blockstore/implementations/testfake/FakeBlockStore.h>
|
||||
#include <blockstore/implementations/testfake/FakeBlock.h>
|
||||
#include <cpp-utils/data/DataFixture.h>
|
||||
|
||||
using ::testing::Test;
|
||||
using ::testing::WithParamInterface;
|
||||
using ::testing::Values;
|
||||
using cpputils::unique_ref;
|
||||
using cpputils::make_unique_ref;
|
||||
using std::string;
|
||||
using cpputils::DataFixture;
|
||||
using cpputils::deserialize;
|
||||
|
||||
//TODO Split into multiple files
|
||||
|
||||
using cpputils::dynamic_pointer_move;
|
||||
|
||||
using blockstore::BlockStore;
|
||||
using cpputils::Data;
|
||||
using blockstore::BlockId;
|
||||
using blockstore::testfake::FakeBlockStore;
|
||||
using namespace blobstore;
|
||||
using namespace blobstore::onblocks;
|
||||
using namespace blobstore::onblocks::datanodestore;
|
||||
|
||||
namespace {
|
||||
|
||||
#define EXPECT_IS_PTR_TYPE(Type, ptr) EXPECT_NE(nullptr, dynamic_cast<Type*>(ptr)) << "Given pointer cannot be cast to the given type"
|
||||
|
||||
class DataLeafNodeTest: public Test {
|
||||
public:
|
||||
|
||||
static constexpr uint32_t BLOCKSIZE_BYTES = 1024;
|
||||
static constexpr DataNodeLayout LAYOUT = DataNodeLayout(BLOCKSIZE_BYTES);
|
||||
|
||||
DataLeafNodeTest():
|
||||
_blockStore(make_unique_ref<FakeBlockStore>()),
|
||||
blockStore(_blockStore.get()),
|
||||
nodeStore(make_unique_ref<DataNodeStore>(std::move(_blockStore), BLOCKSIZE_BYTES)),
|
||||
ZEROES(nodeStore->layout().maxBytesPerLeaf()),
|
||||
randomData(nodeStore->layout().maxBytesPerLeaf()),
|
||||
leaf(nodeStore->createNewLeafNode(Data(0))) {
|
||||
|
||||
ZEROES.FillWithZeroes();
|
||||
|
||||
Data dataFixture(DataFixture::generate(nodeStore->layout().maxBytesPerLeaf()));
|
||||
|
||||
std::memcpy(randomData.data(), dataFixture.data(), randomData.size());
|
||||
}
|
||||
|
||||
Data loadData(const DataLeafNode &leaf) {
|
||||
Data data(leaf.numBytes());
|
||||
leaf.read(data.data(), 0, leaf.numBytes());
|
||||
return data;
|
||||
}
|
||||
|
||||
BlockId WriteDataToNewLeafBlockAndReturnKey() {
|
||||
auto newleaf = nodeStore->createNewLeafNode(Data(0));
|
||||
newleaf->resize(randomData.size());
|
||||
newleaf->write(randomData.data(), 0, randomData.size());
|
||||
return newleaf->blockId();
|
||||
}
|
||||
|
||||
void FillLeafBlockWithData() {
|
||||
FillLeafBlockWithData(leaf.get());
|
||||
}
|
||||
|
||||
void FillLeafBlockWithData(DataLeafNode *leaf_to_fill) {
|
||||
leaf_to_fill->resize(randomData.size());
|
||||
leaf_to_fill->write(randomData.data(), 0, randomData.size());
|
||||
}
|
||||
|
||||
unique_ref<DataLeafNode> LoadLeafNode(const BlockId &blockId) {
|
||||
auto leaf = nodeStore->load(blockId).value();
|
||||
return dynamic_pointer_move<DataLeafNode>(leaf).value();
|
||||
}
|
||||
|
||||
void ResizeLeaf(const BlockId &blockId, size_t size) {
|
||||
auto leaf = LoadLeafNode(blockId);
|
||||
EXPECT_IS_PTR_TYPE(DataLeafNode, leaf.get());
|
||||
leaf->resize(size);
|
||||
}
|
||||
|
||||
BlockId CreateLeafWithDataConvertItToInnerNodeAndReturnKey() {
|
||||
auto leaf = nodeStore->createNewLeafNode(Data(0));
|
||||
FillLeafBlockWithData(leaf.get());
|
||||
auto child = nodeStore->createNewLeafNode(Data(0));
|
||||
unique_ref<DataInnerNode> converted = DataNode::convertToNewInnerNode(std::move(leaf), LAYOUT, *child);
|
||||
return converted->blockId();
|
||||
}
|
||||
|
||||
unique_ref<DataLeafNode> CopyLeafNode(const DataLeafNode &node) {
|
||||
auto copied = nodeStore->createNewNodeAsCopyFrom(node);
|
||||
return dynamic_pointer_move<DataLeafNode>(copied).value();
|
||||
}
|
||||
|
||||
BlockId InitializeLeafGrowAndReturnKey() {
|
||||
auto leaf = DataLeafNode::CreateNewNode(blockStore, LAYOUT, Data(LAYOUT.maxBytesPerLeaf()));
|
||||
leaf->resize(5);
|
||||
return leaf->blockId();
|
||||
}
|
||||
|
||||
unique_ref<BlockStore> _blockStore;
|
||||
BlockStore *blockStore;
|
||||
unique_ref<DataNodeStore> nodeStore;
|
||||
Data ZEROES;
|
||||
Data randomData;
|
||||
unique_ref<DataLeafNode> leaf;
|
||||
|
||||
private:
|
||||
DISALLOW_COPY_AND_ASSIGN(DataLeafNodeTest);
|
||||
};
|
||||
|
||||
constexpr uint32_t DataLeafNodeTest::BLOCKSIZE_BYTES;
|
||||
constexpr DataNodeLayout DataLeafNodeTest::LAYOUT;
|
||||
|
||||
TEST_F(DataLeafNodeTest, CorrectKeyReturnedAfterLoading) {
|
||||
BlockId blockId = DataLeafNode::CreateNewNode(blockStore, LAYOUT, Data(LAYOUT.maxBytesPerLeaf()))->blockId();
|
||||
|
||||
auto loaded = nodeStore->load(blockId).value();
|
||||
EXPECT_EQ(blockId, loaded->blockId());
|
||||
}
|
||||
|
||||
TEST_F(DataLeafNodeTest, InitializesCorrectly) {
|
||||
auto leaf = DataLeafNode::CreateNewNode(blockStore, LAYOUT, Data(5));
|
||||
EXPECT_EQ(5u, leaf->numBytes());
|
||||
}
|
||||
|
||||
TEST_F(DataLeafNodeTest, ReadWrittenDataAfterReloadingBlock) {
|
||||
BlockId blockId = WriteDataToNewLeafBlockAndReturnKey();
|
||||
|
||||
auto loaded = LoadLeafNode(blockId);
|
||||
|
||||
EXPECT_EQ(randomData.size(), loaded->numBytes());
|
||||
EXPECT_EQ(randomData, loadData(*loaded));
|
||||
}
|
||||
|
||||
TEST_F(DataLeafNodeTest, NewLeafNodeHasSizeZero) {
|
||||
EXPECT_EQ(0u, leaf->numBytes());
|
||||
}
|
||||
|
||||
TEST_F(DataLeafNodeTest, NewLeafNodeHasSizeZero_AfterLoading) {
|
||||
BlockId blockId = nodeStore->createNewLeafNode(Data(0))->blockId();
|
||||
auto leaf = LoadLeafNode(blockId);
|
||||
|
||||
EXPECT_EQ(0u, leaf->numBytes());
|
||||
}
|
||||
|
||||
class DataLeafNodeSizeTest: public DataLeafNodeTest, public WithParamInterface<unsigned int> {
|
||||
public:
|
||||
BlockId CreateLeafResizeItAndReturnKey() {
|
||||
auto leaf = nodeStore->createNewLeafNode(Data(0));
|
||||
leaf->resize(GetParam());
|
||||
return leaf->blockId();
|
||||
}
|
||||
};
|
||||
INSTANTIATE_TEST_SUITE_P(DataLeafNodeSizeTest, DataLeafNodeSizeTest, Values(0, 1, 5, 16, 32, 512, DataNodeLayout(DataLeafNodeTest::BLOCKSIZE_BYTES).maxBytesPerLeaf()));
|
||||
|
||||
TEST_P(DataLeafNodeSizeTest, ResizeNode_ReadSizeImmediately) {
|
||||
leaf->resize(GetParam());
|
||||
EXPECT_EQ(GetParam(), leaf->numBytes());
|
||||
}
|
||||
|
||||
TEST_P(DataLeafNodeSizeTest, ResizeNode_ReadSizeAfterLoading) {
|
||||
BlockId blockId = CreateLeafResizeItAndReturnKey();
|
||||
|
||||
auto leaf = LoadLeafNode(blockId);
|
||||
EXPECT_EQ(GetParam(), leaf->numBytes());
|
||||
}
|
||||
|
||||
TEST_F(DataLeafNodeTest, SpaceIsZeroFilledWhenGrowing) {
|
||||
leaf->resize(randomData.size());
|
||||
EXPECT_EQ(0, std::memcmp(ZEROES.data(), loadData(*leaf).data(), randomData.size()));
|
||||
}
|
||||
|
||||
TEST_F(DataLeafNodeTest, SpaceGetsZeroFilledWhenShrinkingAndRegrowing) {
|
||||
FillLeafBlockWithData();
|
||||
// resize it smaller and then back to original size
|
||||
uint32_t smaller_size = randomData.size() - 100;
|
||||
leaf->resize(smaller_size);
|
||||
leaf->resize(randomData.size());
|
||||
|
||||
//Check that the space was filled with zeroes
|
||||
EXPECT_EQ(0, std::memcmp(ZEROES.data(), static_cast<const uint8_t*>(loadData(*leaf).data())+smaller_size, 100));
|
||||
}
|
||||
|
||||
TEST_F(DataLeafNodeTest, DataGetsZeroFilledWhenShrinking) {
|
||||
BlockId blockId = WriteDataToNewLeafBlockAndReturnKey();
|
||||
uint32_t smaller_size = randomData.size() - 100;
|
||||
{
|
||||
//At first, we expect there to be random data in the underlying data block
|
||||
auto block = blockStore->load(blockId).value();
|
||||
EXPECT_EQ(0, std::memcmp(randomData.dataOffset(smaller_size), static_cast<const uint8_t*>(block->data())+DataNodeLayout::HEADERSIZE_BYTES+smaller_size, 100));
|
||||
}
|
||||
|
||||
//After shrinking, we expect there to be zeroes in the underlying data block
|
||||
ResizeLeaf(blockId, smaller_size);
|
||||
{
|
||||
auto block = blockStore->load(blockId).value();
|
||||
EXPECT_EQ(0, std::memcmp(ZEROES.data(), static_cast<const uint8_t*>(block->data())+DataNodeLayout::HEADERSIZE_BYTES+smaller_size, 100));
|
||||
}
|
||||
}
|
||||
|
||||
TEST_F(DataLeafNodeTest, ShrinkingDoesntDestroyValidDataRegion) {
|
||||
FillLeafBlockWithData();
|
||||
uint32_t smaller_size = randomData.size() - 100;
|
||||
leaf->resize(smaller_size);
|
||||
|
||||
//Check that the remaining data region is unchanged
|
||||
EXPECT_EQ(0, std::memcmp(randomData.data(), loadData(*leaf).data(), smaller_size));
|
||||
}
|
||||
|
||||
TEST_F(DataLeafNodeTest, ConvertToInternalNode) {
|
||||
auto child = nodeStore->createNewLeafNode(Data(0));
|
||||
BlockId leaf_blockId = leaf->blockId();
|
||||
unique_ref<DataInnerNode> converted = DataNode::convertToNewInnerNode(std::move(leaf), LAYOUT, *child);
|
||||
|
||||
EXPECT_EQ(1u, converted->numChildren());
|
||||
EXPECT_EQ(child->blockId(), converted->readChild(0).blockId());
|
||||
EXPECT_EQ(leaf_blockId, converted->blockId());
|
||||
}
|
||||
|
||||
TEST_F(DataLeafNodeTest, ConvertToInternalNodeZeroesOutChildrenRegion) {
|
||||
BlockId blockId = CreateLeafWithDataConvertItToInnerNodeAndReturnKey();
|
||||
|
||||
auto block = blockStore->load(blockId).value();
|
||||
EXPECT_EQ(0, std::memcmp(ZEROES.data(), static_cast<const uint8_t*>(block->data())+DataNodeLayout::HEADERSIZE_BYTES+sizeof(DataInnerNode::ChildEntry), nodeStore->layout().maxBytesPerLeaf()-sizeof(DataInnerNode::ChildEntry)));
|
||||
}
|
||||
|
||||
TEST_F(DataLeafNodeTest, CopyingCreatesANewLeaf) {
|
||||
auto copied = CopyLeafNode(*leaf);
|
||||
EXPECT_NE(leaf->blockId(), copied->blockId());
|
||||
}
|
||||
|
||||
TEST_F(DataLeafNodeTest, CopyEmptyLeaf) {
|
||||
auto copied = CopyLeafNode(*leaf);
|
||||
EXPECT_EQ(leaf->numBytes(), copied->numBytes());
|
||||
}
|
||||
|
||||
TEST_F(DataLeafNodeTest, CopyDataLeaf) {
|
||||
FillLeafBlockWithData();
|
||||
auto copied = CopyLeafNode(*leaf);
|
||||
|
||||
EXPECT_EQ(leaf->numBytes(), copied->numBytes());
|
||||
EXPECT_EQ(0, std::memcmp(loadData(*leaf).data(), loadData(*copied).data(), leaf->numBytes()));
|
||||
|
||||
//Test that they have different data regions (changing the original one doesn't change the copy)
|
||||
uint8_t data = 0;
|
||||
leaf->write(&data, 0, 1);
|
||||
EXPECT_EQ(data, deserialize<uint8_t>(loadData(*leaf).data()));
|
||||
EXPECT_NE(data, deserialize<uint8_t>(loadData(*copied).data()));
|
||||
}
|
||||
|
||||
|
||||
struct DataRange {
|
||||
uint64_t leafsize;
|
||||
uint64_t offset;
|
||||
uint64_t count;
|
||||
};
|
||||
|
||||
class DataLeafNodeDataTest: public DataLeafNodeTest, public WithParamInterface<DataRange> {
|
||||
public:
|
||||
Data foregroundData;
|
||||
Data backgroundData;
|
||||
|
||||
DataLeafNodeDataTest():
|
||||
foregroundData(DataFixture::generate(GetParam().count, 0)),
|
||||
backgroundData(DataFixture::generate(GetParam().leafsize, 1)) {
|
||||
}
|
||||
|
||||
BlockId CreateLeafWriteToItAndReturnKey(const Data &to_write) {
|
||||
auto newleaf = nodeStore->createNewLeafNode(Data(0));
|
||||
|
||||
newleaf->resize(GetParam().leafsize);
|
||||
newleaf->write(to_write.data(), GetParam().offset, GetParam().count);
|
||||
return newleaf->blockId();
|
||||
}
|
||||
|
||||
void EXPECT_DATA_READS_AS(const Data &expected, const DataLeafNode &leaf, uint64_t offset, uint64_t count) {
|
||||
Data read(count);
|
||||
leaf.read(read.data(), offset, count);
|
||||
EXPECT_EQ(expected, read);
|
||||
}
|
||||
|
||||
void EXPECT_DATA_READS_AS_OUTSIDE_OF(const Data &expected, const DataLeafNode &leaf, uint64_t start, uint64_t count) {
|
||||
Data begin(start);
|
||||
Data end(GetParam().leafsize - count - start);
|
||||
|
||||
std::memcpy(begin.data(), expected.data(), start);
|
||||
std::memcpy(end.data(), expected.dataOffset(start+count), end.size());
|
||||
|
||||
EXPECT_DATA_READS_AS(begin, leaf, 0, start);
|
||||
EXPECT_DATA_READS_AS(end, leaf, start + count, end.size());
|
||||
}
|
||||
|
||||
void EXPECT_DATA_IS_ZEROES_OUTSIDE_OF(const DataLeafNode &leaf, uint64_t start, uint64_t count) {
|
||||
Data ZEROES(GetParam().leafsize);
|
||||
ZEROES.FillWithZeroes();
|
||||
EXPECT_DATA_READS_AS_OUTSIDE_OF(ZEROES, leaf, start, count);
|
||||
}
|
||||
};
|
||||
INSTANTIATE_TEST_SUITE_P(DataLeafNodeDataTest, DataLeafNodeDataTest, Values(
|
||||
DataRange{DataLeafNodeTest::LAYOUT.maxBytesPerLeaf(), 0, DataLeafNodeTest::LAYOUT.maxBytesPerLeaf()}, // full size leaf, access beginning to end
|
||||
DataRange{DataLeafNodeTest::LAYOUT.maxBytesPerLeaf(), 100, DataLeafNodeTest::LAYOUT.maxBytesPerLeaf()-200}, // full size leaf, access middle to middle
|
||||
DataRange{DataLeafNodeTest::LAYOUT.maxBytesPerLeaf(), 0, DataLeafNodeTest::LAYOUT.maxBytesPerLeaf()-100}, // full size leaf, access beginning to middle
|
||||
DataRange{DataLeafNodeTest::LAYOUT.maxBytesPerLeaf(), 100, DataLeafNodeTest::LAYOUT.maxBytesPerLeaf()-100}, // full size leaf, access middle to end
|
||||
DataRange{DataLeafNodeTest::LAYOUT.maxBytesPerLeaf()-100, 0, DataLeafNodeTest::LAYOUT.maxBytesPerLeaf()-100}, // non-full size leaf, access beginning to end
|
||||
DataRange{DataLeafNodeTest::LAYOUT.maxBytesPerLeaf()-100, 100, DataLeafNodeTest::LAYOUT.maxBytesPerLeaf()-300}, // non-full size leaf, access middle to middle
|
||||
DataRange{DataLeafNodeTest::LAYOUT.maxBytesPerLeaf()-100, 0, DataLeafNodeTest::LAYOUT.maxBytesPerLeaf()-200}, // non-full size leaf, access beginning to middle
|
||||
DataRange{DataLeafNodeTest::LAYOUT.maxBytesPerLeaf()-100, 100, DataLeafNodeTest::LAYOUT.maxBytesPerLeaf()-200} // non-full size leaf, access middle to end
|
||||
));
|
||||
|
||||
TEST_P(DataLeafNodeDataTest, WriteAndReadImmediately) {
|
||||
leaf->resize(GetParam().leafsize);
|
||||
leaf->write(this->foregroundData.data(), GetParam().offset, GetParam().count);
|
||||
|
||||
EXPECT_DATA_READS_AS(this->foregroundData, *leaf, GetParam().offset, GetParam().count);
|
||||
EXPECT_DATA_IS_ZEROES_OUTSIDE_OF(*leaf, GetParam().offset, GetParam().count);
|
||||
}
|
||||
|
||||
TEST_P(DataLeafNodeDataTest, WriteAndReadAfterLoading) {
|
||||
BlockId blockId = CreateLeafWriteToItAndReturnKey(this->foregroundData);
|
||||
|
||||
auto loaded_leaf = LoadLeafNode(blockId);
|
||||
EXPECT_DATA_READS_AS(this->foregroundData, *loaded_leaf, GetParam().offset, GetParam().count);
|
||||
EXPECT_DATA_IS_ZEROES_OUTSIDE_OF(*loaded_leaf, GetParam().offset, GetParam().count);
|
||||
}
|
||||
|
||||
TEST_P(DataLeafNodeDataTest, OverwriteAndRead) {
|
||||
leaf->resize(GetParam().leafsize);
|
||||
leaf->write(this->backgroundData.data(), 0, GetParam().leafsize);
|
||||
leaf->write(this->foregroundData.data(), GetParam().offset, GetParam().count);
|
||||
EXPECT_DATA_READS_AS(this->foregroundData, *leaf, GetParam().offset, GetParam().count);
|
||||
EXPECT_DATA_READS_AS_OUTSIDE_OF(this->backgroundData, *leaf, GetParam().offset, GetParam().count);
|
||||
}
|
||||
|
||||
}
|
@ -1,150 +0,0 @@
|
||||
#include "blobstore/implementations/onblocks/datanodestore/DataInnerNode.h"
|
||||
#include "blobstore/implementations/onblocks/datanodestore/DataLeafNode.h"
|
||||
#include "blobstore/implementations/onblocks/datanodestore/DataNode.h"
|
||||
#include "blobstore/implementations/onblocks/datanodestore/DataNodeStore.h"
|
||||
#include "blobstore/implementations/onblocks/BlobStoreOnBlocks.h"
|
||||
#include <gtest/gtest.h>
|
||||
|
||||
#include <blockstore/implementations/testfake/FakeBlockStore.h>
|
||||
#include <blockstore/implementations/testfake/FakeBlock.h>
|
||||
#include <cpp-utils/pointer/unique_ref_boost_optional_gtest_workaround.h>
|
||||
|
||||
using ::testing::Test;
|
||||
using cpputils::unique_ref;
|
||||
using cpputils::make_unique_ref;
|
||||
using std::string;
|
||||
using boost::none;
|
||||
|
||||
using blockstore::BlockStore;
|
||||
using blockstore::testfake::FakeBlockStore;
|
||||
using blockstore::BlockId;
|
||||
using cpputils::Data;
|
||||
using namespace blobstore;
|
||||
using namespace blobstore::onblocks;
|
||||
using namespace blobstore::onblocks::datanodestore;
|
||||
|
||||
class DataNodeStoreTest: public Test {
|
||||
public:
|
||||
static constexpr uint32_t BLOCKSIZE_BYTES = 1024;
|
||||
|
||||
unique_ref<BlockStore> _blockStore = make_unique_ref<FakeBlockStore>();
|
||||
BlockStore *blockStore = _blockStore.get();
|
||||
unique_ref<DataNodeStore> nodeStore = make_unique_ref<DataNodeStore>(std::move(_blockStore), BLOCKSIZE_BYTES);
|
||||
};
|
||||
|
||||
constexpr uint32_t DataNodeStoreTest::BLOCKSIZE_BYTES;
|
||||
|
||||
#define EXPECT_IS_PTR_TYPE(Type, ptr) EXPECT_NE(nullptr, dynamic_cast<Type*>(ptr)) << "Given pointer cannot be cast to the given type"
|
||||
|
||||
TEST_F(DataNodeStoreTest, CreateLeafNodeCreatesLeafNode) {
|
||||
auto node = nodeStore->createNewLeafNode(Data(0));
|
||||
EXPECT_IS_PTR_TYPE(DataLeafNode, node.get());
|
||||
}
|
||||
|
||||
TEST_F(DataNodeStoreTest, CreateInnerNodeCreatesInnerNode) {
|
||||
auto leaf = nodeStore->createNewLeafNode(Data(0));
|
||||
|
||||
auto node = nodeStore->createNewInnerNode(1, {leaf->blockId()});
|
||||
EXPECT_IS_PTR_TYPE(DataInnerNode, node.get());
|
||||
}
|
||||
|
||||
TEST_F(DataNodeStoreTest, LeafNodeIsRecognizedAfterStoreAndLoad) {
|
||||
BlockId blockId = nodeStore->createNewLeafNode(Data(0))->blockId();
|
||||
|
||||
auto loaded_node = nodeStore->load(blockId).value();
|
||||
|
||||
EXPECT_IS_PTR_TYPE(DataLeafNode, loaded_node.get());
|
||||
}
|
||||
|
||||
TEST_F(DataNodeStoreTest, InnerNodeWithDepth1IsRecognizedAfterStoreAndLoad) {
|
||||
auto leaf = nodeStore->createNewLeafNode(Data(0));
|
||||
BlockId blockId = nodeStore->createNewInnerNode(1, {leaf->blockId()})->blockId();
|
||||
|
||||
auto loaded_node = nodeStore->load(blockId).value();
|
||||
|
||||
EXPECT_IS_PTR_TYPE(DataInnerNode, loaded_node.get());
|
||||
}
|
||||
|
||||
TEST_F(DataNodeStoreTest, InnerNodeWithDepth2IsRecognizedAfterStoreAndLoad) {
|
||||
auto leaf = nodeStore->createNewLeafNode(Data(0));
|
||||
auto inner = nodeStore->createNewInnerNode(1, {leaf->blockId()});
|
||||
BlockId blockId = nodeStore->createNewInnerNode(2, {inner->blockId()})->blockId();
|
||||
|
||||
auto loaded_node = nodeStore->load(blockId).value();
|
||||
|
||||
EXPECT_IS_PTR_TYPE(DataInnerNode, loaded_node.get());
|
||||
}
|
||||
|
||||
TEST_F(DataNodeStoreTest, DataNodeCrashesOnLoadIfDepthIsTooHigh) {
|
||||
auto block = blockStore->create(Data(BLOCKSIZE_BYTES));
|
||||
BlockId blockId = block->blockId();
|
||||
{
|
||||
DataNodeView view(std::move(block));
|
||||
view.setDepth(DataNodeStore::MAX_DEPTH + 1);
|
||||
}
|
||||
|
||||
EXPECT_ANY_THROW(
|
||||
nodeStore->load(blockId)
|
||||
);
|
||||
}
|
||||
|
||||
TEST_F(DataNodeStoreTest, CreatedInnerNodeIsInitialized) {
|
||||
auto leaf = nodeStore->createNewLeafNode(Data(0));
|
||||
auto node = nodeStore->createNewInnerNode(1, {leaf->blockId()});
|
||||
EXPECT_EQ(1u, node->numChildren());
|
||||
EXPECT_EQ(leaf->blockId(), node->readChild(0).blockId());
|
||||
}
|
||||
|
||||
TEST_F(DataNodeStoreTest, CreatedLeafNodeIsInitialized) {
|
||||
auto leaf = nodeStore->createNewLeafNode(Data(0));
|
||||
EXPECT_EQ(0u, leaf->numBytes());
|
||||
}
|
||||
|
||||
TEST_F(DataNodeStoreTest, NodeIsNotLoadableAfterDeleting) {
|
||||
auto nodekey = nodeStore->createNewLeafNode(Data(0))->blockId();
|
||||
auto node = nodeStore->load(nodekey);
|
||||
EXPECT_NE(none, node);
|
||||
nodeStore->remove(std::move(*node));
|
||||
EXPECT_EQ(none, nodeStore->load(nodekey));
|
||||
}
|
||||
|
||||
TEST_F(DataNodeStoreTest, NumNodesIsCorrectOnEmptyNodestore) {
|
||||
EXPECT_EQ(0u, nodeStore->numNodes());
|
||||
}
|
||||
|
||||
TEST_F(DataNodeStoreTest, NumNodesIsCorrectAfterAddingOneLeafNode) {
|
||||
nodeStore->createNewLeafNode(Data(0));
|
||||
EXPECT_EQ(1u, nodeStore->numNodes());
|
||||
}
|
||||
|
||||
TEST_F(DataNodeStoreTest, NumNodesIsCorrectAfterRemovingTheLastNode) {
|
||||
auto leaf = nodeStore->createNewLeafNode(Data(0));
|
||||
nodeStore->remove(std::move(leaf));
|
||||
EXPECT_EQ(0u, nodeStore->numNodes());
|
||||
}
|
||||
|
||||
TEST_F(DataNodeStoreTest, NumNodesIsCorrectAfterAddingTwoNodes) {
|
||||
auto leaf = nodeStore->createNewLeafNode(Data(0));
|
||||
auto node = nodeStore->createNewInnerNode(1, {leaf->blockId()});
|
||||
EXPECT_EQ(2u, nodeStore->numNodes());
|
||||
}
|
||||
|
||||
TEST_F(DataNodeStoreTest, NumNodesIsCorrectAfterRemovingANode) {
|
||||
auto leaf = nodeStore->createNewLeafNode(Data(0));
|
||||
auto node = nodeStore->createNewInnerNode(1, {leaf->blockId()});
|
||||
nodeStore->remove(std::move(node));
|
||||
EXPECT_EQ(1u, nodeStore->numNodes());
|
||||
}
|
||||
|
||||
TEST_F(DataNodeStoreTest, PhysicalBlockSize_Leaf) {
|
||||
auto leaf = nodeStore->createNewLeafNode(Data(0));
|
||||
auto block = blockStore->load(leaf->blockId()).value();
|
||||
EXPECT_EQ(BLOCKSIZE_BYTES, block->size());
|
||||
}
|
||||
|
||||
TEST_F(DataNodeStoreTest, PhysicalBlockSize_Inner) {
|
||||
auto leaf = nodeStore->createNewLeafNode(Data(0));
|
||||
auto node = nodeStore->createNewInnerNode(1, {leaf->blockId()});
|
||||
auto block = blockStore->load(node->blockId()).value();
|
||||
EXPECT_EQ(BLOCKSIZE_BYTES, block->size());
|
||||
}
|
@ -1,98 +0,0 @@
|
||||
#include "blobstore/implementations/onblocks/datanodestore/DataNodeView.h"
|
||||
#include <gtest/gtest.h>
|
||||
|
||||
#include <blockstore/implementations/testfake/FakeBlockStore.h>
|
||||
#include <blockstore/implementations/testfake/FakeBlock.h>
|
||||
#include "blobstore/implementations/onblocks/BlobStoreOnBlocks.h"
|
||||
#include <cpp-utils/data/DataFixture.h>
|
||||
|
||||
using ::testing::Test;
|
||||
using ::testing::WithParamInterface;
|
||||
using ::testing::Values;
|
||||
using std::string;
|
||||
|
||||
using blockstore::BlockStore;
|
||||
using blockstore::testfake::FakeBlockStore;
|
||||
using cpputils::Data;
|
||||
using cpputils::DataFixture;
|
||||
using cpputils::unique_ref;
|
||||
using cpputils::make_unique_ref;
|
||||
using namespace blobstore;
|
||||
using namespace blobstore::onblocks;
|
||||
using namespace blobstore::onblocks::datanodestore;
|
||||
|
||||
class DataNodeViewTest: public Test {
|
||||
public:
|
||||
static constexpr uint32_t BLOCKSIZE_BYTES = 1024;
|
||||
static constexpr uint32_t DATASIZE_BYTES = DataNodeLayout(DataNodeViewTest::BLOCKSIZE_BYTES).datasizeBytes();
|
||||
|
||||
unique_ref<BlockStore> blockStore = make_unique_ref<FakeBlockStore>();
|
||||
};
|
||||
|
||||
class DataNodeViewDepthTest: public DataNodeViewTest, public WithParamInterface<uint8_t> {
|
||||
};
|
||||
INSTANTIATE_TEST_SUITE_P(DataNodeViewDepthTest, DataNodeViewDepthTest, Values(0, 1, 3, 10, 100));
|
||||
|
||||
TEST_P(DataNodeViewDepthTest, DepthIsStored) {
|
||||
auto block = blockStore->create(Data(BLOCKSIZE_BYTES));
|
||||
auto blockId = block->blockId();
|
||||
{
|
||||
DataNodeView view(std::move(block));
|
||||
view.setDepth(GetParam());
|
||||
}
|
||||
DataNodeView view(blockStore->load(blockId).value());
|
||||
EXPECT_EQ(GetParam(), view.Depth());
|
||||
}
|
||||
|
||||
class DataNodeViewSizeTest: public DataNodeViewTest, public WithParamInterface<uint32_t> {
|
||||
};
|
||||
INSTANTIATE_TEST_SUITE_P(DataNodeViewSizeTest, DataNodeViewSizeTest, Values(0, 50, 64, 1024, 1024*1024*1024));
|
||||
|
||||
TEST_P(DataNodeViewSizeTest, SizeIsStored) {
|
||||
auto block = blockStore->create(Data(BLOCKSIZE_BYTES));
|
||||
auto blockId = block->blockId();
|
||||
{
|
||||
DataNodeView view(std::move(block));
|
||||
view.setSize(GetParam());
|
||||
}
|
||||
DataNodeView view(blockStore->load(blockId).value());
|
||||
EXPECT_EQ(GetParam(), view.Size());
|
||||
}
|
||||
|
||||
TEST_F(DataNodeViewTest, DataIsStored) {
|
||||
Data randomData = DataFixture::generate(DATASIZE_BYTES);
|
||||
auto block = blockStore->create(Data(BLOCKSIZE_BYTES));
|
||||
auto blockId = block->blockId();
|
||||
{
|
||||
DataNodeView view(std::move(block));
|
||||
view.write(randomData.data(), 0, randomData.size());
|
||||
}
|
||||
DataNodeView view(blockStore->load(blockId).value());
|
||||
EXPECT_EQ(0, std::memcmp(view.data(), randomData.data(), randomData.size()));
|
||||
}
|
||||
|
||||
TEST_F(DataNodeViewTest, HeaderAndBodyDontOverlap) {
|
||||
Data randomData = DataFixture::generate(DATASIZE_BYTES);
|
||||
auto block = blockStore->create(Data(BLOCKSIZE_BYTES));
|
||||
auto blockId = block->blockId();
|
||||
{
|
||||
DataNodeView view(std::move(block));
|
||||
view.setDepth(3);
|
||||
view.setSize(1000000000u);
|
||||
view.write(randomData.data(), 0, DATASIZE_BYTES);
|
||||
}
|
||||
DataNodeView view(blockStore->load(blockId).value());
|
||||
EXPECT_EQ(3, view.Depth());
|
||||
EXPECT_EQ(1000000000u, view.Size());
|
||||
EXPECT_EQ(0, std::memcmp(view.data(), randomData.data(), DATASIZE_BYTES));
|
||||
}
|
||||
|
||||
TEST_F(DataNodeViewTest, Data) {
|
||||
auto block = blockStore->create(Data(BLOCKSIZE_BYTES));
|
||||
const uint8_t *blockBegin = static_cast<const uint8_t*>(block->data());
|
||||
DataNodeView view(std::move(block));
|
||||
|
||||
EXPECT_EQ(blockBegin+DataNodeLayout::HEADERSIZE_BYTES, static_cast<const uint8_t*>(view.data()));
|
||||
}
|
||||
|
||||
//TODO Test that header fields (and data) are also stored over reloads
|
@ -1,69 +0,0 @@
|
||||
#include "testutils/DataTreeTest.h"
|
||||
|
||||
#include "blobstore/implementations/onblocks/datanodestore/DataNodeStore.h"
|
||||
#include "blobstore/implementations/onblocks/datatreestore/DataTreeStore.h"
|
||||
#include <blockstore/implementations/testfake/FakeBlockStore.h>
|
||||
#include <cpp-utils/pointer/unique_ref_boost_optional_gtest_workaround.h>
|
||||
|
||||
using blockstore::BlockId;
|
||||
using boost::none;
|
||||
|
||||
using namespace blobstore::onblocks::datatreestore;
|
||||
|
||||
class DataTreeStoreTest: public DataTreeTest {
|
||||
};
|
||||
|
||||
TEST_F(DataTreeStoreTest, CorrectKeyReturned) {
|
||||
BlockId blockId = treeStore.createNewTree()->blockId();
|
||||
auto tree = treeStore.load(blockId).value();
|
||||
EXPECT_EQ(blockId, tree->blockId());
|
||||
}
|
||||
|
||||
TEST_F(DataTreeStoreTest, CreatedTreeIsLoadable) {
|
||||
auto blockId = treeStore.createNewTree()->blockId();
|
||||
auto loaded = treeStore.load(blockId);
|
||||
EXPECT_NE(none, loaded);
|
||||
}
|
||||
|
||||
TEST_F(DataTreeStoreTest, NewTreeIsLeafOnly) {
|
||||
auto tree = treeStore.createNewTree();
|
||||
|
||||
EXPECT_IS_LEAF_NODE(tree->blockId());
|
||||
}
|
||||
|
||||
TEST_F(DataTreeStoreTest, TreeIsNotLoadableAfterRemove_DeleteByTree) {
|
||||
BlockId blockId = treeStore.createNewTree()->blockId();
|
||||
auto tree = treeStore.load(blockId);
|
||||
EXPECT_NE(none, tree);
|
||||
treeStore.remove(std::move(*tree));
|
||||
EXPECT_EQ(none, treeStore.load(blockId));
|
||||
}
|
||||
|
||||
TEST_F(DataTreeStoreTest, TreeIsNotLoadableAfterRemove_DeleteByKey) {
|
||||
BlockId blockId = treeStore.createNewTree()->blockId();
|
||||
treeStore.remove(blockId);
|
||||
EXPECT_EQ(none, treeStore.load(blockId));
|
||||
}
|
||||
|
||||
TEST_F(DataTreeStoreTest, RemovingTreeRemovesAllNodesOfTheTree_DeleteByTree) {
|
||||
auto tree1_blockId = CreateThreeLevelMinData()->blockId();
|
||||
auto tree2_blockId = treeStore.createNewTree()->blockId();
|
||||
|
||||
auto tree1 = treeStore.load(tree1_blockId).value();
|
||||
treeStore.remove(std::move(tree1));
|
||||
|
||||
//Check that the only remaining node is tree2
|
||||
EXPECT_EQ(1u, nodeStore->numNodes());
|
||||
EXPECT_NE(none, treeStore.load(tree2_blockId));
|
||||
}
|
||||
|
||||
TEST_F(DataTreeStoreTest, RemovingTreeRemovesAllNodesOfTheTree_DeleteByKey) {
|
||||
auto tree1_blockId = CreateThreeLevelMinData()->blockId();
|
||||
auto tree2_blockId = treeStore.createNewTree()->blockId();
|
||||
|
||||
treeStore.remove(tree1_blockId);
|
||||
|
||||
//Check that the only remaining node is tree2
|
||||
EXPECT_EQ(1u, nodeStore->numNodes());
|
||||
EXPECT_NE(none, treeStore.load(tree2_blockId));
|
||||
}
|
@ -1,88 +0,0 @@
|
||||
#include "testutils/DataTreeTest.h"
|
||||
#include <gmock/gmock.h>
|
||||
|
||||
using ::testing::WithParamInterface;
|
||||
using ::testing::Values;
|
||||
|
||||
using blobstore::onblocks::datanodestore::DataNodeLayout;
|
||||
using blockstore::BlockId;
|
||||
|
||||
class DataTreeTest_NumStoredBytes: public DataTreeTest {
|
||||
public:
|
||||
};
|
||||
|
||||
TEST_F(DataTreeTest_NumStoredBytes, CreatedTreeIsEmpty) {
|
||||
auto tree = treeStore.createNewTree();
|
||||
EXPECT_EQ(0u, tree->numBytes());
|
||||
}
|
||||
|
||||
class DataTreeTest_NumStoredBytes_P: public DataTreeTest_NumStoredBytes, public WithParamInterface<uint32_t> {};
|
||||
INSTANTIATE_TEST_SUITE_P(EmptyLastLeaf, DataTreeTest_NumStoredBytes_P, Values(0u));
|
||||
INSTANTIATE_TEST_SUITE_P(HalfFullLastLeaf, DataTreeTest_NumStoredBytes_P, Values(5u, 10u));
|
||||
INSTANTIATE_TEST_SUITE_P(FullLastLeaf, DataTreeTest_NumStoredBytes_P, Values(static_cast<uint32_t>(DataNodeLayout(DataTreeTest_NumStoredBytes::BLOCKSIZE_BYTES).maxBytesPerLeaf())));
|
||||
|
||||
//TODO Test numLeaves() and numNodes() also two configurations with same number of bytes but different number of leaves (last leaf has 0 bytes)
|
||||
|
||||
TEST_P(DataTreeTest_NumStoredBytes_P, SingleLeaf) {
|
||||
BlockId blockId = CreateLeafWithSize(GetParam())->blockId();
|
||||
auto tree = treeStore.load(blockId).value();
|
||||
EXPECT_EQ(GetParam(), tree->numBytes());
|
||||
EXPECT_EQ(1, tree->numLeaves());
|
||||
EXPECT_EQ(1, tree->numNodes());
|
||||
}
|
||||
|
||||
TEST_P(DataTreeTest_NumStoredBytes_P, TwoLeafTree) {
|
||||
BlockId blockId = CreateTwoLeafWithSecondLeafSize(GetParam())->blockId();
|
||||
auto tree = treeStore.load(blockId).value();
|
||||
EXPECT_EQ(nodeStore->layout().maxBytesPerLeaf() + GetParam(), tree->numBytes());
|
||||
EXPECT_EQ(2, tree->numLeaves());
|
||||
EXPECT_EQ(3, tree->numNodes());
|
||||
}
|
||||
|
||||
TEST_P(DataTreeTest_NumStoredBytes_P, FullTwolevelTree) {
|
||||
BlockId blockId = CreateFullTwoLevelWithLastLeafSize(GetParam())->blockId();
|
||||
auto tree = treeStore.load(blockId).value();
|
||||
EXPECT_EQ(nodeStore->layout().maxBytesPerLeaf()*(nodeStore->layout().maxChildrenPerInnerNode()-1) + GetParam(), tree->numBytes());
|
||||
EXPECT_EQ(nodeStore->layout().maxChildrenPerInnerNode(), tree->numLeaves());
|
||||
EXPECT_EQ(1 + nodeStore->layout().maxChildrenPerInnerNode(), tree->numNodes());
|
||||
}
|
||||
|
||||
TEST_P(DataTreeTest_NumStoredBytes_P, ThreeLevelTreeWithOneChild) {
|
||||
BlockId blockId = CreateThreeLevelWithOneChildAndLastLeafSize(GetParam())->blockId();
|
||||
auto tree = treeStore.load(blockId).value();
|
||||
EXPECT_EQ(nodeStore->layout().maxBytesPerLeaf() + GetParam(), tree->numBytes());
|
||||
EXPECT_EQ(2, tree->numLeaves());
|
||||
EXPECT_EQ(4, tree->numNodes());
|
||||
}
|
||||
|
||||
TEST_P(DataTreeTest_NumStoredBytes_P, ThreeLevelTreeWithTwoChildren) {
|
||||
BlockId blockId = CreateThreeLevelWithTwoChildrenAndLastLeafSize(GetParam())->blockId();
|
||||
auto tree = treeStore.load(blockId).value();
|
||||
EXPECT_EQ(nodeStore->layout().maxBytesPerLeaf()*nodeStore->layout().maxChildrenPerInnerNode() + nodeStore->layout().maxBytesPerLeaf() + GetParam(), tree->numBytes());
|
||||
EXPECT_EQ(2 + nodeStore->layout().maxChildrenPerInnerNode(), tree->numLeaves());
|
||||
EXPECT_EQ(5 + nodeStore->layout().maxChildrenPerInnerNode(), tree->numNodes());
|
||||
}
|
||||
|
||||
TEST_P(DataTreeTest_NumStoredBytes_P, ThreeLevelTreeWithThreeChildren) {
|
||||
BlockId blockId = CreateThreeLevelWithThreeChildrenAndLastLeafSize(GetParam())->blockId();
|
||||
auto tree = treeStore.load(blockId).value();
|
||||
EXPECT_EQ(2*nodeStore->layout().maxBytesPerLeaf()*nodeStore->layout().maxChildrenPerInnerNode() + nodeStore->layout().maxBytesPerLeaf() + GetParam(), tree->numBytes());
|
||||
EXPECT_EQ(2 + 2*nodeStore->layout().maxChildrenPerInnerNode(), tree->numLeaves());
|
||||
EXPECT_EQ(6 + 2*nodeStore->layout().maxChildrenPerInnerNode(), tree->numNodes());
|
||||
}
|
||||
|
||||
TEST_P(DataTreeTest_NumStoredBytes_P, FullThreeLevelTree) {
|
||||
BlockId blockId = CreateFullThreeLevelWithLastLeafSize(GetParam())->blockId();
|
||||
auto tree = treeStore.load(blockId).value();
|
||||
EXPECT_EQ(nodeStore->layout().maxBytesPerLeaf()*nodeStore->layout().maxChildrenPerInnerNode()*(nodeStore->layout().maxChildrenPerInnerNode()-1) + nodeStore->layout().maxBytesPerLeaf()*(nodeStore->layout().maxChildrenPerInnerNode()-1) + GetParam(), tree->numBytes());
|
||||
EXPECT_EQ(nodeStore->layout().maxChildrenPerInnerNode()*nodeStore->layout().maxChildrenPerInnerNode(), tree->numLeaves());
|
||||
EXPECT_EQ(1 + nodeStore->layout().maxChildrenPerInnerNode() + nodeStore->layout().maxChildrenPerInnerNode()*nodeStore->layout().maxChildrenPerInnerNode(), tree->numNodes());
|
||||
}
|
||||
|
||||
TEST_P(DataTreeTest_NumStoredBytes_P, FourLevelMinDataTree) {
|
||||
BlockId blockId = CreateFourLevelMinDataWithLastLeafSize(GetParam())->blockId();
|
||||
auto tree = treeStore.load(blockId).value();
|
||||
EXPECT_EQ(nodeStore->layout().maxBytesPerLeaf()*nodeStore->layout().maxChildrenPerInnerNode()*nodeStore->layout().maxChildrenPerInnerNode() + GetParam(), tree->numBytes());
|
||||
EXPECT_EQ(1 + nodeStore->layout().maxChildrenPerInnerNode()*nodeStore->layout().maxChildrenPerInnerNode(), tree->numLeaves());
|
||||
EXPECT_EQ(5 + nodeStore->layout().maxChildrenPerInnerNode() + nodeStore->layout().maxChildrenPerInnerNode()*nodeStore->layout().maxChildrenPerInnerNode(), tree->numNodes());
|
||||
}
|
@ -1,558 +0,0 @@
|
||||
#include "testutils/DataTreeTest.h"
|
||||
|
||||
#include <gmock/gmock.h>
|
||||
|
||||
using blobstore::onblocks::datatreestore::DataTree;
|
||||
using blockstore::BlockId;
|
||||
using cpputils::Data;
|
||||
|
||||
class DataTreeTest_Performance: public DataTreeTest {
|
||||
public:
|
||||
void TraverseByWriting(DataTree *tree, uint64_t beginIndex, uint64_t endIndex) {
|
||||
uint64_t offset = beginIndex * maxBytesPerLeaf;
|
||||
uint64_t count = endIndex * maxBytesPerLeaf - offset;
|
||||
Data data(count);
|
||||
data.FillWithZeroes();
|
||||
tree->writeBytes(data.data(), offset, count);
|
||||
}
|
||||
|
||||
void TraverseByReading(DataTree *tree, uint64_t beginIndex, uint64_t endIndex) {
|
||||
uint64_t offset = beginIndex * maxBytesPerLeaf;
|
||||
uint64_t count = endIndex * maxBytesPerLeaf - offset;
|
||||
Data data(count);
|
||||
tree->readBytes(data.data(), offset, count);
|
||||
}
|
||||
|
||||
uint64_t maxChildrenPerInnerNode = nodeStore->layout().maxChildrenPerInnerNode();
|
||||
uint64_t maxBytesPerLeaf = nodeStore->layout().maxBytesPerLeaf();
|
||||
};
|
||||
|
||||
TEST_F(DataTreeTest_Performance, DeletingDoesntLoadLeaves_Twolevel_DeleteByTree) {
|
||||
auto blockId = CreateFullTwoLevel()->blockId();
|
||||
auto tree = treeStore.load(blockId).value();
|
||||
blockStore->resetCounters();
|
||||
|
||||
treeStore.remove(std::move(tree));
|
||||
|
||||
EXPECT_EQ(0u, blockStore->loadedBlocks().size());
|
||||
EXPECT_EQ(0u, blockStore->createdBlocks());
|
||||
EXPECT_EQ(1u + maxChildrenPerInnerNode, blockStore->removedBlocks().size());
|
||||
EXPECT_EQ(0u, blockStore->distinctWrittenBlocks().size());
|
||||
EXPECT_EQ(0u, blockStore->resizedBlocks().size());
|
||||
}
|
||||
|
||||
TEST_F(DataTreeTest_Performance, DeletingDoesntLoadLeaves_Twolevel_DeleteByKey) {
|
||||
auto blockId = CreateFullTwoLevel()->blockId();
|
||||
blockStore->resetCounters();
|
||||
|
||||
treeStore.remove(blockId);
|
||||
|
||||
EXPECT_EQ(1u, blockStore->loadedBlocks().size());
|
||||
EXPECT_EQ(0u, blockStore->createdBlocks());
|
||||
EXPECT_EQ(1u + maxChildrenPerInnerNode, blockStore->removedBlocks().size());
|
||||
EXPECT_EQ(0u, blockStore->distinctWrittenBlocks().size());
|
||||
EXPECT_EQ(0u, blockStore->resizedBlocks().size());
|
||||
}
|
||||
|
||||
TEST_F(DataTreeTest_Performance, DeletingDoesntLoadLeaves_Threelevel_DeleteByTree) {
|
||||
auto blockId = CreateFullThreeLevel()->blockId();
|
||||
auto tree = treeStore.load(blockId).value();
|
||||
blockStore->resetCounters();
|
||||
|
||||
treeStore.remove(std::move(tree));
|
||||
|
||||
EXPECT_EQ(maxChildrenPerInnerNode, blockStore->loadedBlocks().size());
|
||||
EXPECT_EQ(0u, blockStore->createdBlocks());
|
||||
EXPECT_EQ(1u + maxChildrenPerInnerNode + maxChildrenPerInnerNode*maxChildrenPerInnerNode, blockStore->removedBlocks().size());
|
||||
EXPECT_EQ(0u, blockStore->distinctWrittenBlocks().size());
|
||||
EXPECT_EQ(0u, blockStore->resizedBlocks().size());
|
||||
}
|
||||
|
||||
TEST_F(DataTreeTest_Performance, DeletingDoesntLoadLeaves_Threelevel_DeleteByKey) {
|
||||
auto blockId = CreateFullThreeLevel()->blockId();
|
||||
blockStore->resetCounters();
|
||||
|
||||
treeStore.remove(blockId);
|
||||
|
||||
EXPECT_EQ(1u + maxChildrenPerInnerNode, blockStore->loadedBlocks().size());
|
||||
EXPECT_EQ(0u, blockStore->createdBlocks());
|
||||
EXPECT_EQ(1u + maxChildrenPerInnerNode + maxChildrenPerInnerNode*maxChildrenPerInnerNode, blockStore->removedBlocks().size());
|
||||
EXPECT_EQ(0u, blockStore->distinctWrittenBlocks().size());
|
||||
EXPECT_EQ(0u, blockStore->resizedBlocks().size());
|
||||
}
|
||||
|
||||
TEST_F(DataTreeTest_Performance, TraverseLeaves_Twolevel_All_ByWriting) {
|
||||
auto blockId = CreateFullTwoLevel()->blockId();
|
||||
auto tree = treeStore.load(blockId).value();
|
||||
blockStore->resetCounters();
|
||||
|
||||
TraverseByWriting(tree.get(), 0, maxChildrenPerInnerNode);
|
||||
|
||||
EXPECT_EQ(1u, blockStore->loadedBlocks().size()); // Has to load the rightmost leaf once to adapt its size, rest of the leaves aren't loaded but just overwritten
|
||||
EXPECT_EQ(0u, blockStore->createdBlocks());
|
||||
EXPECT_EQ(0u, blockStore->removedBlocks().size());
|
||||
EXPECT_EQ(maxChildrenPerInnerNode, blockStore->distinctWrittenBlocks().size());
|
||||
EXPECT_EQ(0u, blockStore->resizedBlocks().size());
|
||||
}
|
||||
|
||||
TEST_F(DataTreeTest_Performance, TraverseLeaves_Twolevel_All_ByReading) {
|
||||
auto blockId = CreateFullTwoLevel()->blockId();
|
||||
auto tree = treeStore.load(blockId).value();
|
||||
blockStore->resetCounters();
|
||||
|
||||
TraverseByReading(tree.get(), 0, maxChildrenPerInnerNode);
|
||||
|
||||
EXPECT_EQ(1u + maxChildrenPerInnerNode, blockStore->loadedBlocks().size()); // Has to read the rightmost leaf an additional time in the beginning to determine size.
|
||||
EXPECT_EQ(0u, blockStore->createdBlocks());
|
||||
EXPECT_EQ(0u, blockStore->removedBlocks().size());
|
||||
EXPECT_EQ(0u, blockStore->distinctWrittenBlocks().size());
|
||||
EXPECT_EQ(0u, blockStore->resizedBlocks().size());
|
||||
}
|
||||
|
||||
TEST_F(DataTreeTest_Performance, TraverseLeaves_Twolevel_Some_ByWriting) {
|
||||
auto blockId = CreateFullTwoLevel()->blockId();
|
||||
auto tree = treeStore.load(blockId).value();
|
||||
blockStore->resetCounters();
|
||||
|
||||
TraverseByWriting(tree.get(), 3, 5);
|
||||
|
||||
EXPECT_EQ(0u, blockStore->loadedBlocks().size());
|
||||
EXPECT_EQ(0u, blockStore->createdBlocks());
|
||||
EXPECT_EQ(0u, blockStore->removedBlocks().size());
|
||||
EXPECT_EQ(2u, blockStore->distinctWrittenBlocks().size());
|
||||
EXPECT_EQ(0u, blockStore->resizedBlocks().size());
|
||||
}
|
||||
|
||||
TEST_F(DataTreeTest_Performance, TraverseLeaves_Twolevel_Some_ByReading) {
|
||||
auto blockId = CreateFullTwoLevel()->blockId();
|
||||
auto tree = treeStore.load(blockId).value();
|
||||
blockStore->resetCounters();
|
||||
|
||||
TraverseByReading(tree.get(), 3, 5);
|
||||
|
||||
EXPECT_EQ(3u, blockStore->loadedBlocks().size()); // reads 2 leaves and the rightmost leaf to determine size
|
||||
EXPECT_EQ(0u, blockStore->createdBlocks());
|
||||
EXPECT_EQ(0u, blockStore->removedBlocks().size());
|
||||
EXPECT_EQ(0u, blockStore->distinctWrittenBlocks().size());
|
||||
EXPECT_EQ(0u, blockStore->resizedBlocks().size());
|
||||
}
|
||||
|
||||
TEST_F(DataTreeTest_Performance, TraverseLeaves_Threelevel_All_ByWriting) {
|
||||
auto blockId = CreateFullThreeLevel()->blockId();
|
||||
auto tree = treeStore.load(blockId).value();
|
||||
blockStore->resetCounters();
|
||||
|
||||
TraverseByWriting(tree.get(), 0, maxChildrenPerInnerNode * maxChildrenPerInnerNode);
|
||||
|
||||
EXPECT_EQ(maxChildrenPerInnerNode + 1, blockStore->loadedBlocks().size()); // Loads inner nodes and has to load the rightmost leaf once to adapt its size, rest of the leaves aren't loaded but just overwritten.
|
||||
EXPECT_EQ(0u, blockStore->createdBlocks());
|
||||
EXPECT_EQ(0u, blockStore->removedBlocks().size());
|
||||
EXPECT_EQ(maxChildrenPerInnerNode*maxChildrenPerInnerNode, blockStore->distinctWrittenBlocks().size());
|
||||
EXPECT_EQ(0u, blockStore->resizedBlocks().size());
|
||||
}
|
||||
|
||||
TEST_F(DataTreeTest_Performance, TraverseLeaves_Threelevel_All_ByReading) {
|
||||
auto blockId = CreateFullThreeLevel()->blockId();
|
||||
auto tree = treeStore.load(blockId).value();
|
||||
blockStore->resetCounters();
|
||||
|
||||
TraverseByReading(tree.get(), 0, maxChildrenPerInnerNode * maxChildrenPerInnerNode);
|
||||
|
||||
EXPECT_EQ(maxChildrenPerInnerNode*maxChildrenPerInnerNode + maxChildrenPerInnerNode + 2, blockStore->loadedBlocks().size()); // Loads inner nodes and leaves. Has to load the rightmost inner node and leaf an additional time at the beginning to compute size
|
||||
EXPECT_EQ(0u, blockStore->createdBlocks());
|
||||
EXPECT_EQ(0u, blockStore->removedBlocks().size());
|
||||
EXPECT_EQ(0u, blockStore->distinctWrittenBlocks().size());
|
||||
EXPECT_EQ(0u, blockStore->resizedBlocks().size());
|
||||
}
|
||||
|
||||
TEST_F(DataTreeTest_Performance, TraverseLeaves_Threelevel_InOneInner_ByWriting) {
|
||||
auto blockId = CreateFullThreeLevel()->blockId();
|
||||
auto tree = treeStore.load(blockId).value();
|
||||
blockStore->resetCounters();
|
||||
|
||||
TraverseByWriting(tree.get(), 3, 5);
|
||||
|
||||
EXPECT_EQ(1u, blockStore->loadedBlocks().size()); // Loads inner node. Doesn't load the leaves, they're just overwritten.
|
||||
EXPECT_EQ(0u, blockStore->createdBlocks());
|
||||
EXPECT_EQ(0u, blockStore->removedBlocks().size());
|
||||
EXPECT_EQ(2u, blockStore->distinctWrittenBlocks().size());
|
||||
EXPECT_EQ(0u, blockStore->resizedBlocks().size());
|
||||
}
|
||||
|
||||
TEST_F(DataTreeTest_Performance, TraverseLeaves_Threelevel_InOneInner_ByReading) {
|
||||
auto blockId = CreateFullThreeLevel()->blockId();
|
||||
auto tree = treeStore.load(blockId).value();
|
||||
blockStore->resetCounters();
|
||||
|
||||
TraverseByReading(tree.get(), 3, 5);
|
||||
|
||||
EXPECT_EQ(5u, blockStore->loadedBlocks().size()); // reads 2 leaves and the inner node, also has to read the rightmost inner node and leaf additionally at the beginning to determine size
|
||||
EXPECT_EQ(0u, blockStore->createdBlocks());
|
||||
EXPECT_EQ(0u, blockStore->removedBlocks().size());
|
||||
EXPECT_EQ(0u, blockStore->distinctWrittenBlocks().size());
|
||||
EXPECT_EQ(0u, blockStore->resizedBlocks().size());
|
||||
}
|
||||
|
||||
TEST_F(DataTreeTest_Performance, TraverseLeaves_Threelevel_InTwoInner_ByWriting) {
|
||||
auto blockId = CreateFullThreeLevel()->blockId();
|
||||
auto tree = treeStore.load(blockId).value();
|
||||
blockStore->resetCounters();
|
||||
|
||||
TraverseByWriting(tree.get(), 3, 3 + maxChildrenPerInnerNode);
|
||||
|
||||
EXPECT_EQ(2u, blockStore->loadedBlocks().size()); // Loads both inner node
|
||||
EXPECT_EQ(0u, blockStore->createdBlocks());
|
||||
EXPECT_EQ(0u, blockStore->removedBlocks().size());
|
||||
EXPECT_EQ(maxChildrenPerInnerNode, blockStore->distinctWrittenBlocks().size());
|
||||
EXPECT_EQ(0u, blockStore->resizedBlocks().size());
|
||||
}
|
||||
|
||||
TEST_F(DataTreeTest_Performance, TraverseLeaves_Threelevel_InTwoInner_ByReading) {
|
||||
auto blockId = CreateFullThreeLevel()->blockId();
|
||||
auto tree = treeStore.load(blockId).value();
|
||||
blockStore->resetCounters();
|
||||
|
||||
TraverseByReading(tree.get(), 3, 3 + maxChildrenPerInnerNode);
|
||||
|
||||
EXPECT_EQ(4u + maxChildrenPerInnerNode, blockStore->loadedBlocks().size()); // Loads both inner nodes and the requested leaves. Also has to load rightmost inner node and leaf additionally in the beginning to determine size.
|
||||
EXPECT_EQ(0u, blockStore->createdBlocks());
|
||||
EXPECT_EQ(0u, blockStore->removedBlocks().size());
|
||||
EXPECT_EQ(0u, blockStore->distinctWrittenBlocks().size());
|
||||
EXPECT_EQ(0u, blockStore->resizedBlocks().size());
|
||||
}
|
||||
|
||||
TEST_F(DataTreeTest_Performance, TraverseLeaves_Threelevel_WholeInner_ByWriting) {
|
||||
auto blockId = CreateFullThreeLevel()->blockId();
|
||||
auto tree = treeStore.load(blockId).value();
|
||||
blockStore->resetCounters();
|
||||
|
||||
TraverseByWriting(tree.get(), maxChildrenPerInnerNode, 2*maxChildrenPerInnerNode);
|
||||
|
||||
EXPECT_EQ(1u, blockStore->loadedBlocks().size()); // Loads inner node. Doesn't load the leaves, they're just overwritten.
|
||||
EXPECT_EQ(0u, blockStore->createdBlocks());
|
||||
EXPECT_EQ(0u, blockStore->removedBlocks().size());
|
||||
EXPECT_EQ(maxChildrenPerInnerNode, blockStore->distinctWrittenBlocks().size());
|
||||
EXPECT_EQ(0u, blockStore->resizedBlocks().size());
|
||||
}
|
||||
|
||||
TEST_F(DataTreeTest_Performance, TraverseLeaves_Threelevel_WholeInner_ByReading) {
|
||||
auto blockId = CreateFullThreeLevel()->blockId();
|
||||
auto tree = treeStore.load(blockId).value();
|
||||
blockStore->resetCounters();
|
||||
|
||||
TraverseByReading(tree.get(), maxChildrenPerInnerNode, 2*maxChildrenPerInnerNode);
|
||||
|
||||
EXPECT_EQ(3u + maxChildrenPerInnerNode, blockStore->loadedBlocks().size()); // Loads inner node and all requested leaves. Also has to load rightmost inner node and leaf additionally in the beginning to determine size.
|
||||
EXPECT_EQ(0u, blockStore->createdBlocks());
|
||||
EXPECT_EQ(0u, blockStore->removedBlocks().size());
|
||||
EXPECT_EQ(0u, blockStore->distinctWrittenBlocks().size());
|
||||
EXPECT_EQ(0u, blockStore->resizedBlocks().size());
|
||||
}
|
||||
|
||||
TEST_F(DataTreeTest_Performance, TraverseLeaves_GrowingTree_StartingInside) {
|
||||
auto blockId = CreateInner({CreateLeaf(), CreateLeaf()})->blockId();
|
||||
auto tree = treeStore.load(blockId).value();
|
||||
blockStore->resetCounters();
|
||||
|
||||
TraverseByWriting(tree.get(), 1, 4);
|
||||
|
||||
EXPECT_EQ(1u, blockStore->loadedBlocks().size()); // Loads last old child (for growing it)
|
||||
EXPECT_EQ(2u, blockStore->createdBlocks());
|
||||
EXPECT_EQ(0u, blockStore->removedBlocks().size());
|
||||
EXPECT_EQ(2u, blockStore->distinctWrittenBlocks().size()); // write the data and add children to inner node
|
||||
EXPECT_EQ(0u, blockStore->resizedBlocks().size());
|
||||
}
|
||||
|
||||
TEST_F(DataTreeTest_Performance, TraverseLeaves_GrowingTree_StartingOutside_TwoLevel) {
|
||||
auto blockId = CreateInner({CreateLeaf(), CreateLeaf()})->blockId();
|
||||
auto tree = treeStore.load(blockId).value();
|
||||
blockStore->resetCounters();
|
||||
|
||||
TraverseByWriting(tree.get(), 4, 5);
|
||||
|
||||
EXPECT_EQ(1u, blockStore->loadedBlocks().size()); // Loads last old leaf for growing it
|
||||
EXPECT_EQ(3u, blockStore->createdBlocks());
|
||||
EXPECT_EQ(0u, blockStore->removedBlocks().size());
|
||||
EXPECT_EQ(1u, blockStore->distinctWrittenBlocks().size()); // add child to inner node
|
||||
EXPECT_EQ(0u, blockStore->resizedBlocks().size());
|
||||
}
|
||||
|
||||
TEST_F(DataTreeTest_Performance, TraverseLeaves_GrowingTree_StartingOutside_ThreeLevel) {
|
||||
auto blockId = CreateInner({CreateFullTwoLevel(), CreateFullTwoLevel()})->blockId();
|
||||
auto tree = treeStore.load(blockId).value();
|
||||
blockStore->resetCounters();
|
||||
|
||||
TraverseByWriting(tree.get(), 2*maxChildrenPerInnerNode+1, 2*maxChildrenPerInnerNode+2);
|
||||
|
||||
EXPECT_EQ(2u, blockStore->loadedBlocks().size()); // Loads last old leaf (and its inner node) for growing it
|
||||
EXPECT_EQ(3u, blockStore->createdBlocks()); // inner node and two leaves
|
||||
EXPECT_EQ(0u, blockStore->removedBlocks().size());
|
||||
EXPECT_EQ(1u, blockStore->distinctWrittenBlocks().size()); // add children to existing inner node
|
||||
EXPECT_EQ(0u, blockStore->resizedBlocks().size());
|
||||
}
|
||||
|
||||
TEST_F(DataTreeTest_Performance, TraverseLeaves_GrowingTree_StartingAtBeginOfChild) {
|
||||
auto blockId = CreateInner({CreateFullTwoLevel(), CreateFullTwoLevel()})->blockId();
|
||||
auto tree = treeStore.load(blockId).value();
|
||||
blockStore->resetCounters();
|
||||
|
||||
TraverseByWriting(tree.get(), maxChildrenPerInnerNode, 3*maxChildrenPerInnerNode);
|
||||
|
||||
EXPECT_EQ(2u, blockStore->loadedBlocks().size()); // Loads inner node and one leaf to check whether we have to grow it. Doesn't load the leaves, but returns the keys of the leaves to the callback.
|
||||
EXPECT_EQ(1u + maxChildrenPerInnerNode, blockStore->createdBlocks()); // Creates an inner node and its leaves
|
||||
EXPECT_EQ(0u, blockStore->removedBlocks().size());
|
||||
EXPECT_EQ(maxChildrenPerInnerNode + 1u, blockStore->distinctWrittenBlocks().size()); // write data and add children to existing inner node
|
||||
EXPECT_EQ(0u, blockStore->resizedBlocks().size());
|
||||
}
|
||||
|
||||
TEST_F(DataTreeTest_Performance, TraverseLeaves_GrowingTreeDepth_StartingInOldDepth) {
|
||||
auto blockId = CreateInner({CreateLeaf(), CreateLeaf()})->blockId();
|
||||
auto tree = treeStore.load(blockId).value();
|
||||
blockStore->resetCounters();
|
||||
|
||||
TraverseByWriting(tree.get(), 4, maxChildrenPerInnerNode+2);
|
||||
|
||||
EXPECT_EQ(1u, blockStore->loadedBlocks().size()); // Loads last old leaf for growing it
|
||||
EXPECT_EQ(2u + maxChildrenPerInnerNode, blockStore->createdBlocks()); // 2x new inner node + leaves
|
||||
EXPECT_EQ(0u, blockStore->removedBlocks().size());
|
||||
EXPECT_EQ(1u, blockStore->distinctWrittenBlocks().size()); // Add children to existing inner node
|
||||
EXPECT_EQ(0u, blockStore->resizedBlocks().size());
|
||||
}
|
||||
|
||||
TEST_F(DataTreeTest_Performance, TraverseLeaves_GrowingTreeDepth_StartingInOldDepth_ResizeLastLeaf) {
|
||||
auto blockId = CreateInner({CreateLeaf(), CreateLeafWithSize(5)})->blockId();
|
||||
auto tree = treeStore.load(blockId).value();
|
||||
blockStore->resetCounters();
|
||||
|
||||
TraverseByWriting(tree.get(), 4, maxChildrenPerInnerNode+2);
|
||||
|
||||
EXPECT_EQ(1u, blockStore->loadedBlocks().size()); // Loads last old leaf for growing it
|
||||
EXPECT_EQ(2u + maxChildrenPerInnerNode, blockStore->createdBlocks()); // 2x new inner node + leaves
|
||||
EXPECT_EQ(0u, blockStore->removedBlocks().size());
|
||||
EXPECT_EQ(2u, blockStore->distinctWrittenBlocks().size()); // Resize last leaf and add children to existing inner node
|
||||
EXPECT_EQ(0u, blockStore->resizedBlocks().size());
|
||||
}
|
||||
|
||||
TEST_F(DataTreeTest_Performance, TraverseLeaves_GrowingTreeDepth_StartingInNewDepth) {
|
||||
auto blockId = CreateInner({CreateLeaf(), CreateLeaf()})->blockId();
|
||||
auto tree = treeStore.load(blockId).value();
|
||||
blockStore->resetCounters();
|
||||
|
||||
TraverseByWriting(tree.get(), maxChildrenPerInnerNode, maxChildrenPerInnerNode+2);
|
||||
|
||||
EXPECT_EQ(1u, blockStore->loadedBlocks().size()); // Loads last old leaf for growing it
|
||||
EXPECT_EQ(2u + maxChildrenPerInnerNode, blockStore->createdBlocks()); // 2x new inner node + leaves
|
||||
EXPECT_EQ(0u, blockStore->removedBlocks().size());
|
||||
EXPECT_EQ(1u, blockStore->distinctWrittenBlocks().size()); // Add children to existing inner node
|
||||
EXPECT_EQ(0u, blockStore->resizedBlocks().size());
|
||||
}
|
||||
|
||||
TEST_F(DataTreeTest_Performance, TraverseLeaves_GrowingTreeDepth_StartingInNewDepth_ResizeLastLeaf) {
|
||||
auto blockId = CreateInner({CreateLeaf(), CreateLeafWithSize(5)})->blockId();
|
||||
auto tree = treeStore.load(blockId).value();
|
||||
blockStore->resetCounters();
|
||||
|
||||
TraverseByWriting(tree.get(), maxChildrenPerInnerNode, maxChildrenPerInnerNode+2);
|
||||
|
||||
EXPECT_EQ(1u, blockStore->loadedBlocks().size()); // Loads last old leaf for growing it
|
||||
EXPECT_EQ(2u + maxChildrenPerInnerNode, blockStore->createdBlocks()); // 2x new inner node + leaves
|
||||
EXPECT_EQ(0u, blockStore->removedBlocks().size());
|
||||
EXPECT_EQ(2u, blockStore->distinctWrittenBlocks().size()); // Resize last leaf and add children to existing inner node
|
||||
EXPECT_EQ(0u, blockStore->resizedBlocks().size());
|
||||
}
|
||||
|
||||
TEST_F(DataTreeTest_Performance, ResizeNumBytes_ZeroToZero) {
|
||||
auto blockId = CreateLeafWithSize(0)->blockId();
|
||||
auto tree = treeStore.load(blockId).value();
|
||||
blockStore->resetCounters();
|
||||
|
||||
tree->resizeNumBytes(0);
|
||||
|
||||
EXPECT_EQ(0u, blockStore->loadedBlocks().size());
|
||||
EXPECT_EQ(0u, blockStore->createdBlocks());
|
||||
EXPECT_EQ(0u, blockStore->removedBlocks().size());
|
||||
EXPECT_EQ(0u, blockStore->distinctWrittenBlocks().size());
|
||||
EXPECT_EQ(0u, blockStore->resizedBlocks().size());
|
||||
}
|
||||
|
||||
TEST_F(DataTreeTest_Performance, ResizeNumBytes_GrowOneLeaf) {
|
||||
auto blockId = CreateLeafWithSize(0)->blockId();
|
||||
auto tree = treeStore.load(blockId).value();
|
||||
blockStore->resetCounters();
|
||||
|
||||
tree->resizeNumBytes(5);
|
||||
|
||||
EXPECT_EQ(0u, blockStore->loadedBlocks().size());
|
||||
EXPECT_EQ(0u, blockStore->createdBlocks());
|
||||
EXPECT_EQ(0u, blockStore->removedBlocks().size());
|
||||
EXPECT_EQ(1u, blockStore->distinctWrittenBlocks().size());
|
||||
EXPECT_EQ(0u, blockStore->resizedBlocks().size());
|
||||
}
|
||||
|
||||
TEST_F(DataTreeTest_Performance, ResizeNumBytes_ShrinkOneLeaf) {
|
||||
auto blockId = CreateLeafWithSize(5)->blockId();
|
||||
auto tree = treeStore.load(blockId).value();
|
||||
blockStore->resetCounters();
|
||||
|
||||
tree->resizeNumBytes(2);
|
||||
|
||||
EXPECT_EQ(0u, blockStore->loadedBlocks().size());
|
||||
EXPECT_EQ(0u, blockStore->createdBlocks());
|
||||
EXPECT_EQ(0u, blockStore->removedBlocks().size());
|
||||
EXPECT_EQ(1u, blockStore->distinctWrittenBlocks().size());
|
||||
EXPECT_EQ(0u, blockStore->resizedBlocks().size());
|
||||
}
|
||||
|
||||
TEST_F(DataTreeTest_Performance, ResizeNumBytes_ShrinkOneLeafToZero) {
|
||||
auto blockId = CreateLeafWithSize(5)->blockId();
|
||||
auto tree = treeStore.load(blockId).value();
|
||||
blockStore->resetCounters();
|
||||
|
||||
tree->resizeNumBytes(0);
|
||||
|
||||
EXPECT_EQ(0u, blockStore->loadedBlocks().size());
|
||||
EXPECT_EQ(0u, blockStore->createdBlocks());
|
||||
EXPECT_EQ(0u, blockStore->removedBlocks().size());
|
||||
EXPECT_EQ(1u, blockStore->distinctWrittenBlocks().size());
|
||||
EXPECT_EQ(0u, blockStore->resizedBlocks().size());
|
||||
}
|
||||
|
||||
TEST_F(DataTreeTest_Performance, ResizeNumBytes_GrowOneLeafInLargerTree) {
|
||||
auto blockId = CreateInner({CreateFullTwoLevel(), CreateInner({CreateLeaf(), CreateLeafWithSize(5)})})->blockId();
|
||||
auto tree = treeStore.load(blockId).value();
|
||||
blockStore->resetCounters();
|
||||
|
||||
tree->resizeNumBytes(maxBytesPerLeaf*(maxChildrenPerInnerNode+1)+6); // Grow by one byte
|
||||
|
||||
EXPECT_EQ(2u, blockStore->loadedBlocks().size()); // Load inner node and leaf
|
||||
EXPECT_EQ(0u, blockStore->createdBlocks());
|
||||
EXPECT_EQ(0u, blockStore->removedBlocks().size());
|
||||
EXPECT_EQ(1u, blockStore->distinctWrittenBlocks().size());
|
||||
EXPECT_EQ(0u, blockStore->resizedBlocks().size());
|
||||
}
|
||||
|
||||
TEST_F(DataTreeTest_Performance, ResizeNumBytes_GrowByOneLeaf) {
|
||||
auto blockId = CreateInner({CreateLeaf(), CreateLeaf()})->blockId();
|
||||
auto tree = treeStore.load(blockId).value();
|
||||
blockStore->resetCounters();
|
||||
|
||||
tree->resizeNumBytes(maxBytesPerLeaf*2+1); // Grow by one byte
|
||||
|
||||
EXPECT_EQ(1u, blockStore->loadedBlocks().size());
|
||||
EXPECT_EQ(1u, blockStore->createdBlocks());
|
||||
EXPECT_EQ(0u, blockStore->removedBlocks().size());
|
||||
EXPECT_EQ(1u, blockStore->distinctWrittenBlocks().size()); // add child to inner node
|
||||
EXPECT_EQ(0u, blockStore->resizedBlocks().size());
|
||||
}
|
||||
|
||||
TEST_F(DataTreeTest_Performance, ResizeNumBytes_GrowByOneLeaf_GrowLastLeaf) {
|
||||
auto blockId = CreateInner({CreateLeaf(), CreateLeafWithSize(5)})->blockId();
|
||||
auto tree = treeStore.load(blockId).value();
|
||||
blockStore->resetCounters();
|
||||
|
||||
tree->resizeNumBytes(maxBytesPerLeaf*2+1); // Grow by one byte
|
||||
|
||||
EXPECT_EQ(1u, blockStore->loadedBlocks().size());
|
||||
EXPECT_EQ(1u, blockStore->createdBlocks());
|
||||
EXPECT_EQ(0u, blockStore->removedBlocks().size());
|
||||
EXPECT_EQ(2u, blockStore->distinctWrittenBlocks().size()); // add child to inner node and resize old last leaf
|
||||
EXPECT_EQ(0u, blockStore->resizedBlocks().size());
|
||||
}
|
||||
|
||||
TEST_F(DataTreeTest_Performance, ResizeNumBytes_ShrinkByOneLeaf) {
|
||||
auto blockId = CreateInner({CreateLeaf(), CreateLeaf(), CreateLeaf()})->blockId();
|
||||
auto tree = treeStore.load(blockId).value();
|
||||
blockStore->resetCounters();
|
||||
|
||||
tree->resizeNumBytes(2*maxBytesPerLeaf-1);
|
||||
|
||||
EXPECT_EQ(1u, blockStore->loadedBlocks().size());
|
||||
EXPECT_EQ(0u, blockStore->createdBlocks());
|
||||
EXPECT_EQ(1u, blockStore->removedBlocks().size());
|
||||
EXPECT_EQ(2u, blockStore->distinctWrittenBlocks().size()); // resize new last leaf and remove leaf from inner node
|
||||
EXPECT_EQ(0u, blockStore->resizedBlocks().size());
|
||||
}
|
||||
|
||||
TEST_F(DataTreeTest_Performance, ResizeNumBytes_IncreaseTreeDepth_0to1) {
|
||||
auto blockId = CreateLeaf()->blockId();
|
||||
auto tree = treeStore.load(blockId).value();
|
||||
blockStore->resetCounters();
|
||||
|
||||
tree->resizeNumBytes(maxBytesPerLeaf+1);
|
||||
|
||||
EXPECT_EQ(0u, blockStore->loadedBlocks().size());
|
||||
EXPECT_EQ(2u, blockStore->createdBlocks());
|
||||
EXPECT_EQ(0u, blockStore->removedBlocks().size());
|
||||
EXPECT_EQ(1u, blockStore->distinctWrittenBlocks().size()); // rewrite root node to be an inner node
|
||||
EXPECT_EQ(0u, blockStore->resizedBlocks().size());
|
||||
}
|
||||
|
||||
TEST_F(DataTreeTest_Performance, ResizeNumBytes_IncreaseTreeDepth_1to2) {
|
||||
auto blockId = CreateFullTwoLevel()->blockId();
|
||||
auto tree = treeStore.load(blockId).value();
|
||||
blockStore->resetCounters();
|
||||
|
||||
tree->resizeNumBytes(maxBytesPerLeaf*maxChildrenPerInnerNode+1);
|
||||
|
||||
EXPECT_EQ(1u, blockStore->loadedBlocks().size()); // check whether we have to grow last leaf
|
||||
EXPECT_EQ(3u, blockStore->createdBlocks());
|
||||
EXPECT_EQ(0u, blockStore->removedBlocks().size());
|
||||
EXPECT_EQ(1u, blockStore->distinctWrittenBlocks().size()); // rewrite root node to be an inner node
|
||||
EXPECT_EQ(0u, blockStore->resizedBlocks().size());
|
||||
}
|
||||
|
||||
TEST_F(DataTreeTest_Performance, ResizeNumBytes_IncreaseTreeDepth_0to2) {
|
||||
auto blockId = CreateLeaf()->blockId();
|
||||
auto tree = treeStore.load(blockId).value();
|
||||
blockStore->resetCounters();
|
||||
|
||||
tree->resizeNumBytes(maxBytesPerLeaf*maxChildrenPerInnerNode+1);
|
||||
|
||||
EXPECT_EQ(0u, blockStore->loadedBlocks().size());
|
||||
EXPECT_EQ(3u + maxChildrenPerInnerNode, blockStore->createdBlocks());
|
||||
EXPECT_EQ(0u, blockStore->removedBlocks().size());
|
||||
EXPECT_EQ(1u, blockStore->distinctWrittenBlocks().size()); // rewrite root node to be an inner node
|
||||
EXPECT_EQ(0u, blockStore->resizedBlocks().size());
|
||||
}
|
||||
|
||||
TEST_F(DataTreeTest_Performance, ResizeNumBytes_DecreaseTreeDepth_1to0) {
|
||||
auto blockId = CreateInner({CreateLeaf(), CreateLeaf()})->blockId();
|
||||
auto tree = treeStore.load(blockId).value();
|
||||
blockStore->resetCounters();
|
||||
|
||||
tree->resizeNumBytes(maxBytesPerLeaf);
|
||||
|
||||
EXPECT_EQ(2u, blockStore->loadedBlocks().size()); // read content of first leaf and load first leaf to replace root with it
|
||||
EXPECT_EQ(0u, blockStore->createdBlocks());
|
||||
EXPECT_EQ(2u, blockStore->removedBlocks().size());
|
||||
EXPECT_EQ(1u, blockStore->distinctWrittenBlocks().size()); // rewrite root node to be a leaf
|
||||
EXPECT_EQ(0u, blockStore->resizedBlocks().size());
|
||||
}
|
||||
|
||||
TEST_F(DataTreeTest_Performance, ResizeNumBytes_DecreaseTreeDepth_2to1) {
|
||||
auto blockId = CreateInner({CreateFullTwoLevel(), CreateInner({CreateLeaf()})})->blockId();
|
||||
auto tree = treeStore.load(blockId).value();
|
||||
blockStore->resetCounters();
|
||||
|
||||
tree->resizeNumBytes(maxBytesPerLeaf*maxChildrenPerInnerNode);
|
||||
|
||||
EXPECT_EQ(4u, blockStore->loadedBlocks().size()); // load new last leaf (+inner node), load second inner node to remove its subtree, then load first child of root to replace root with its child.
|
||||
EXPECT_EQ(0u, blockStore->createdBlocks());
|
||||
EXPECT_EQ(3u, blockStore->removedBlocks().size());
|
||||
EXPECT_EQ(1u, blockStore->distinctWrittenBlocks().size()); // rewrite root node to be a leaf
|
||||
EXPECT_EQ(0u, blockStore->resizedBlocks().size());
|
||||
}
|
||||
|
||||
TEST_F(DataTreeTest_Performance, ResizeNumBytes_DecreaseTreeDepth_2to0) {
|
||||
auto blockId = CreateInner({CreateFullTwoLevel(), CreateInner({CreateLeaf()})})->blockId();
|
||||
auto tree = treeStore.load(blockId).value();
|
||||
blockStore->resetCounters();
|
||||
|
||||
tree->resizeNumBytes(maxBytesPerLeaf);
|
||||
|
||||
EXPECT_EQ(5u, blockStore->loadedBlocks().size()); // load new last leaf (+inner node), load second inner node to remove its subtree, then 2x load first child of root to replace root with its child.
|
||||
EXPECT_EQ(0u, blockStore->createdBlocks());
|
||||
EXPECT_EQ(3u + maxChildrenPerInnerNode, blockStore->removedBlocks().size());
|
||||
EXPECT_EQ(2u, blockStore->distinctWrittenBlocks().size()); // remove children from inner node and rewrite root node to be a leaf
|
||||
EXPECT_EQ(0u, blockStore->resizedBlocks().size());
|
||||
}
|
@ -1,234 +0,0 @@
|
||||
#include "testutils/DataTreeTest.h"
|
||||
#include "testutils/TwoLevelDataFixture.h"
|
||||
#include "blobstore/implementations/onblocks/utils/Math.h"
|
||||
#include <cpp-utils/data/Data.h>
|
||||
|
||||
#include <tuple>
|
||||
|
||||
using ::testing::WithParamInterface;
|
||||
using ::testing::Values;
|
||||
using ::testing::Combine;
|
||||
using std::tuple;
|
||||
using std::get;
|
||||
using std::function;
|
||||
using std::mem_fn;
|
||||
using cpputils::dynamic_pointer_move;
|
||||
|
||||
using blobstore::onblocks::datanodestore::DataLeafNode;
|
||||
using blobstore::onblocks::datanodestore::DataInnerNode;
|
||||
using blobstore::onblocks::datanodestore::DataNode;
|
||||
using blobstore::onblocks::datanodestore::DataNodeLayout;
|
||||
using blobstore::onblocks::datatreestore::DataTree;
|
||||
using blobstore::onblocks::utils::ceilDivision;
|
||||
using blockstore::BlockId;
|
||||
using cpputils::Data;
|
||||
using boost::none;
|
||||
|
||||
using cpputils::unique_ref;
|
||||
|
||||
class DataTreeTest_ResizeByTraversing: public DataTreeTest {
|
||||
public:
|
||||
static constexpr DataNodeLayout LAYOUT = DataNodeLayout(BLOCKSIZE_BYTES);
|
||||
|
||||
unique_ref<DataTree> CreateTree(unique_ref<DataNode> root) {
|
||||
BlockId blockId = root->blockId();
|
||||
cpputils::destruct(std::move(root));
|
||||
return treeStore.load(blockId).value();
|
||||
}
|
||||
|
||||
unique_ref<DataTree> CreateLeafTreeWithSize(uint32_t size) {
|
||||
return CreateTree(CreateLeafWithSize(size));
|
||||
}
|
||||
|
||||
unique_ref<DataTree> CreateTwoLeafTreeWithSecondLeafSize(uint32_t size) {
|
||||
return CreateTree(CreateTwoLeafWithSecondLeafSize(size));
|
||||
}
|
||||
|
||||
unique_ref<DataTree> CreateFullTwoLevelTreeWithLastLeafSize(uint32_t size) {
|
||||
return CreateTree(CreateFullTwoLevelWithLastLeafSize(size));
|
||||
}
|
||||
|
||||
unique_ref<DataTree> CreateThreeLevelTreeWithTwoChildrenAndLastLeafSize(uint32_t size) {
|
||||
return CreateTree(CreateThreeLevelWithTwoChildrenAndLastLeafSize(size));
|
||||
}
|
||||
|
||||
unique_ref<DataTree> CreateThreeLevelTreeWithThreeChildrenAndLastLeafSize(uint32_t size) {
|
||||
return CreateTree(CreateThreeLevelWithThreeChildrenAndLastLeafSize(size));
|
||||
}
|
||||
|
||||
unique_ref<DataTree> CreateFullThreeLevelTreeWithLastLeafSize(uint32_t size) {
|
||||
return CreateTree(CreateFullThreeLevelWithLastLeafSize(size));
|
||||
}
|
||||
|
||||
unique_ref<DataTree> CreateFourLevelMinDataTreeWithLastLeafSize(uint32_t size) {
|
||||
return CreateTree(CreateFourLevelMinDataWithLastLeafSize(size));
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(misc-no-recursion)
|
||||
void EXPECT_IS_LEFTMAXDATA_TREE(const BlockId &blockId) {
|
||||
auto root = nodeStore->load(blockId).value();
|
||||
DataInnerNode *inner = dynamic_cast<DataInnerNode*>(root.get());
|
||||
if (inner != nullptr) {
|
||||
for (uint32_t i = 0; i < inner->numChildren()-1; ++i) {
|
||||
EXPECT_IS_MAXDATA_TREE(inner->readChild(i).blockId());
|
||||
}
|
||||
EXPECT_IS_LEFTMAXDATA_TREE(inner->readLastChild().blockId());
|
||||
}
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(misc-no-recursion)
|
||||
void EXPECT_IS_MAXDATA_TREE(const BlockId &blockId) {
|
||||
auto root = nodeStore->load(blockId).value();
|
||||
DataInnerNode *inner = dynamic_cast<DataInnerNode*>(root.get());
|
||||
if (inner != nullptr) {
|
||||
for (uint32_t i = 0; i < inner->numChildren(); ++i) {
|
||||
EXPECT_IS_MAXDATA_TREE(inner->readChild(i).blockId());
|
||||
}
|
||||
} else {
|
||||
DataLeafNode *leaf = dynamic_cast<DataLeafNode*>(root.get());
|
||||
EXPECT_EQ(nodeStore->layout().maxBytesPerLeaf(), leaf->numBytes());
|
||||
}
|
||||
}
|
||||
};
|
||||
constexpr DataNodeLayout DataTreeTest_ResizeByTraversing::LAYOUT;
|
||||
|
||||
class DataTreeTest_ResizeByTraversing_P: public DataTreeTest_ResizeByTraversing, public WithParamInterface<tuple<function<unique_ref<DataTree>(DataTreeTest_ResizeByTraversing*, uint32_t)>, uint32_t, uint32_t, std::function<uint32_t (uint32_t oldNumberOfLeaves, uint32_t newNumberOfLeaves)>>> {
|
||||
public:
|
||||
DataTreeTest_ResizeByTraversing_P()
|
||||
: oldLastLeafSize(get<1>(GetParam())),
|
||||
tree(get<0>(GetParam())(this, oldLastLeafSize)),
|
||||
numberOfLeavesToAdd(get<2>(GetParam())),
|
||||
newNumberOfLeaves(tree->numLeaves()+numberOfLeavesToAdd),
|
||||
traversalBeginIndex(get<3>(GetParam())(tree->numLeaves(), newNumberOfLeaves)),
|
||||
ZEROES(LAYOUT.maxBytesPerLeaf())
|
||||
{
|
||||
ZEROES.FillWithZeroes();
|
||||
}
|
||||
|
||||
void GrowTree(const BlockId &blockId) {
|
||||
auto tree = treeStore.load(blockId);
|
||||
GrowTree(tree.get().get());
|
||||
}
|
||||
|
||||
void GrowTree(DataTree *tree) {
|
||||
uint64_t maxBytesPerLeaf = tree->maxBytesPerLeaf();
|
||||
uint64_t offset = traversalBeginIndex * maxBytesPerLeaf;
|
||||
uint64_t count = newNumberOfLeaves * maxBytesPerLeaf - offset;
|
||||
Data data(count);
|
||||
data.FillWithZeroes();
|
||||
tree->writeBytes(data.data(), offset, count);
|
||||
tree->flush();
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(misc-no-recursion)
|
||||
unique_ref<DataLeafNode> LastLeaf(const BlockId &blockId) {
|
||||
auto root = nodeStore->load(blockId).value();
|
||||
auto leaf = dynamic_pointer_move<DataLeafNode>(root);
|
||||
if (leaf != none) {
|
||||
return std::move(*leaf);
|
||||
}
|
||||
auto inner = dynamic_pointer_move<DataInnerNode>(root).value();
|
||||
return LastLeaf(inner->readLastChild().blockId());
|
||||
}
|
||||
|
||||
uint32_t oldLastLeafSize;
|
||||
unique_ref<DataTree> tree;
|
||||
uint32_t numberOfLeavesToAdd;
|
||||
uint32_t newNumberOfLeaves;
|
||||
uint32_t traversalBeginIndex;
|
||||
Data ZEROES;
|
||||
};
|
||||
INSTANTIATE_TEST_SUITE_P(DataTreeTest_ResizeByTraversing_P, DataTreeTest_ResizeByTraversing_P,
|
||||
Combine(
|
||||
//Tree we're starting with
|
||||
Values<function<unique_ref<DataTree>(DataTreeTest_ResizeByTraversing*, uint32_t)>>(
|
||||
mem_fn(&DataTreeTest_ResizeByTraversing::CreateLeafTreeWithSize),
|
||||
mem_fn(&DataTreeTest_ResizeByTraversing::CreateTwoLeafTreeWithSecondLeafSize),
|
||||
mem_fn(&DataTreeTest_ResizeByTraversing::CreateFullTwoLevelTreeWithLastLeafSize),
|
||||
mem_fn(&DataTreeTest_ResizeByTraversing::CreateThreeLevelTreeWithTwoChildrenAndLastLeafSize),
|
||||
mem_fn(&DataTreeTest_ResizeByTraversing::CreateThreeLevelTreeWithThreeChildrenAndLastLeafSize),
|
||||
mem_fn(&DataTreeTest_ResizeByTraversing::CreateFullThreeLevelTreeWithLastLeafSize),
|
||||
mem_fn(&DataTreeTest_ResizeByTraversing::CreateFourLevelMinDataTreeWithLastLeafSize)
|
||||
),
|
||||
//Last leaf size of the start tree
|
||||
Values(
|
||||
0u,
|
||||
1u,
|
||||
10u,
|
||||
DataTreeTest_ResizeByTraversing::LAYOUT.maxBytesPerLeaf()
|
||||
),
|
||||
//Number of leaves we're adding
|
||||
Values(
|
||||
1u,
|
||||
2u,
|
||||
DataTreeTest_ResizeByTraversing::LAYOUT.maxChildrenPerInnerNode(), //Full two level tree
|
||||
2* DataTreeTest_ResizeByTraversing::LAYOUT.maxChildrenPerInnerNode(), //Three level tree with two children
|
||||
3* DataTreeTest_ResizeByTraversing::LAYOUT.maxChildrenPerInnerNode(), //Three level tree with three children
|
||||
DataTreeTest_ResizeByTraversing::LAYOUT.maxChildrenPerInnerNode() * DataTreeTest_ResizeByTraversing::LAYOUT.maxChildrenPerInnerNode(), //Full three level tree
|
||||
DataTreeTest_ResizeByTraversing::LAYOUT.maxChildrenPerInnerNode() * DataTreeTest_ResizeByTraversing::LAYOUT.maxChildrenPerInnerNode() + 1 //Four level mindata tree
|
||||
),
|
||||
//Decide the traversal begin index
|
||||
Values(
|
||||
[] (uint32_t /*oldNumberOfLeaves*/, uint32_t newNumberOfLeaves) {return newNumberOfLeaves-1;}, // Traverse last leaf (begin==end-1)
|
||||
[] (uint32_t oldNumberOfLeaves, uint32_t newNumberOfLeaves) {return (oldNumberOfLeaves+newNumberOfLeaves)/2;}, // Start traversal in middle of new leaves
|
||||
[] (uint32_t oldNumberOfLeaves, uint32_t /*newNumberOfLeaves*/) {return oldNumberOfLeaves-1;}, // Start traversal with last old leaf
|
||||
[] (uint32_t oldNumberOfLeaves, uint32_t /*newNumberOfLeaves*/) {return oldNumberOfLeaves;}, // Start traversal with first new leaf
|
||||
[] (uint32_t /*oldNumberOfLeaves*/, uint32_t /*newNumberOfLeaves*/) {return 0;}, // Traverse full tree
|
||||
[] (uint32_t /*oldNumberOfLeaves*/, uint32_t /*newNumberOfLeaves*/) {return 1;} // Traverse full tree except first leaf
|
||||
)
|
||||
)
|
||||
);
|
||||
|
||||
TEST_P(DataTreeTest_ResizeByTraversing_P, StructureIsValid) {
|
||||
GrowTree(tree.get());
|
||||
EXPECT_IS_LEFTMAXDATA_TREE(tree->blockId());
|
||||
}
|
||||
|
||||
TEST_P(DataTreeTest_ResizeByTraversing_P, NumLeavesIsCorrect_FromCache) {
|
||||
tree->numLeaves(); // fill cache with old value
|
||||
GrowTree(tree.get());
|
||||
// tree->numLeaves() only goes down the right border nodes and expects the tree to be a left max data tree.
|
||||
// This is what the StructureIsValid test case is for.
|
||||
EXPECT_EQ(newNumberOfLeaves, tree->numLeaves());
|
||||
}
|
||||
|
||||
TEST_P(DataTreeTest_ResizeByTraversing_P, NumLeavesIsCorrect) {
|
||||
GrowTree(tree.get());
|
||||
// tree->forceComputeNumLeaves() only goes down the right border nodes and expects the tree to be a left max data tree.
|
||||
// This is what the StructureIsValid test case is for.
|
||||
EXPECT_EQ(newNumberOfLeaves, tree->forceComputeNumLeaves());
|
||||
}
|
||||
|
||||
TEST_P(DataTreeTest_ResizeByTraversing_P, DepthFlagsAreCorrect) {
|
||||
GrowTree(tree.get());
|
||||
uint32_t depth = ceil(log(newNumberOfLeaves)/log(DataTreeTest_ResizeByTraversing::LAYOUT.maxChildrenPerInnerNode()));
|
||||
CHECK_DEPTH(depth, tree->blockId());
|
||||
}
|
||||
|
||||
TEST_P(DataTreeTest_ResizeByTraversing_P, KeyDoesntChange) {
|
||||
BlockId blockId = tree->blockId();
|
||||
tree->flush();
|
||||
GrowTree(tree.get());
|
||||
EXPECT_EQ(blockId, tree->blockId());
|
||||
}
|
||||
|
||||
TEST_P(DataTreeTest_ResizeByTraversing_P, DataStaysIntact) {
|
||||
uint32_t oldNumberOfLeaves = std::max(UINT64_C(1), ceilDivision(tree->numBytes(), static_cast<uint64_t>(nodeStore->layout().maxBytesPerLeaf())));
|
||||
|
||||
TwoLevelDataFixture data(nodeStore, TwoLevelDataFixture::SizePolicy::Unchanged);
|
||||
BlockId blockId = tree->blockId();
|
||||
cpputils::destruct(std::move(tree));
|
||||
data.FillInto(nodeStore->load(blockId).get().get());
|
||||
|
||||
GrowTree(blockId);
|
||||
|
||||
if (traversalBeginIndex < oldNumberOfLeaves) {
|
||||
// Traversal wrote over part of the pre-existing data, we can only check the data before it.
|
||||
if (traversalBeginIndex != 0) {
|
||||
data.EXPECT_DATA_CORRECT(nodeStore->load(blockId).get().get(), static_cast<int>(traversalBeginIndex - 1));
|
||||
}
|
||||
} else {
|
||||
// Here, traversal was entirely outside the preexisting data, we can check all preexisting data.
|
||||
data.EXPECT_DATA_CORRECT(nodeStore->load(blockId).get().get(), oldNumberOfLeaves, oldLastLeafSize);
|
||||
}
|
||||
}
|
@ -1,266 +0,0 @@
|
||||
#include "testutils/DataTreeTest.h"
|
||||
#include "testutils/TwoLevelDataFixture.h"
|
||||
#include "blobstore/implementations/onblocks/utils/Math.h"
|
||||
#include <cpp-utils/data/Data.h>
|
||||
|
||||
#include <tuple>
|
||||
|
||||
using ::testing::WithParamInterface;
|
||||
using ::testing::Values;
|
||||
using ::testing::Combine;
|
||||
using std::tuple;
|
||||
using std::get;
|
||||
using std::function;
|
||||
using std::mem_fn;
|
||||
using cpputils::dynamic_pointer_move;
|
||||
|
||||
using blobstore::onblocks::datanodestore::DataLeafNode;
|
||||
using blobstore::onblocks::datanodestore::DataInnerNode;
|
||||
using blobstore::onblocks::datanodestore::DataNode;
|
||||
using blobstore::onblocks::datanodestore::DataNodeLayout;
|
||||
using blobstore::onblocks::datatreestore::DataTree;
|
||||
using blobstore::onblocks::utils::ceilDivision;
|
||||
using blockstore::BlockId;
|
||||
using cpputils::Data;
|
||||
using boost::none;
|
||||
|
||||
using cpputils::unique_ref;
|
||||
|
||||
class DataTreeTest_ResizeNumBytes: public DataTreeTest {
|
||||
public:
|
||||
static constexpr DataNodeLayout LAYOUT = DataNodeLayout(BLOCKSIZE_BYTES);
|
||||
|
||||
unique_ref<DataTree> CreateTree(unique_ref<DataNode> root) {
|
||||
BlockId blockId = root->blockId();
|
||||
cpputils::destruct(std::move(root));
|
||||
return treeStore.load(blockId).value();
|
||||
}
|
||||
|
||||
unique_ref<DataTree> CreateLeafTreeWithSize(uint32_t size) {
|
||||
return CreateTree(CreateLeafWithSize(size));
|
||||
}
|
||||
|
||||
unique_ref<DataTree> CreateTwoLeafTreeWithSecondLeafSize(uint32_t size) {
|
||||
return CreateTree(CreateTwoLeafWithSecondLeafSize(size));
|
||||
}
|
||||
|
||||
unique_ref<DataTree> CreateFullTwoLevelTreeWithLastLeafSize(uint32_t size) {
|
||||
return CreateTree(CreateFullTwoLevelWithLastLeafSize(size));
|
||||
}
|
||||
|
||||
unique_ref<DataTree> CreateThreeLevelTreeWithTwoChildrenAndLastLeafSize(uint32_t size) {
|
||||
return CreateTree(CreateThreeLevelWithTwoChildrenAndLastLeafSize(size));
|
||||
}
|
||||
|
||||
unique_ref<DataTree> CreateThreeLevelTreeWithThreeChildrenAndLastLeafSize(uint32_t size) {
|
||||
return CreateTree(CreateThreeLevelWithThreeChildrenAndLastLeafSize(size));
|
||||
}
|
||||
|
||||
unique_ref<DataTree> CreateFullThreeLevelTreeWithLastLeafSize(uint32_t size) {
|
||||
return CreateTree(CreateFullThreeLevelWithLastLeafSize(size));
|
||||
}
|
||||
|
||||
unique_ref<DataTree> CreateFourLevelMinDataTreeWithLastLeafSize(uint32_t size) {
|
||||
return CreateTree(CreateFourLevelMinDataWithLastLeafSize(size));
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(misc-no-recursion)
|
||||
void EXPECT_IS_LEFTMAXDATA_TREE(const BlockId &blockId) {
|
||||
auto root = nodeStore->load(blockId).value();
|
||||
DataInnerNode *inner = dynamic_cast<DataInnerNode*>(root.get());
|
||||
if (inner != nullptr) {
|
||||
for (uint32_t i = 0; i < inner->numChildren()-1; ++i) {
|
||||
EXPECT_IS_MAXDATA_TREE(inner->readChild(i).blockId());
|
||||
}
|
||||
EXPECT_IS_LEFTMAXDATA_TREE(inner->readLastChild().blockId());
|
||||
}
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(misc-no-recursion)
|
||||
void EXPECT_IS_MAXDATA_TREE(const BlockId &blockId) {
|
||||
auto root = nodeStore->load(blockId).value();
|
||||
DataInnerNode *inner = dynamic_cast<DataInnerNode*>(root.get());
|
||||
if (inner != nullptr) {
|
||||
for (uint32_t i = 0; i < inner->numChildren(); ++i) {
|
||||
EXPECT_IS_MAXDATA_TREE(inner->readChild(i).blockId());
|
||||
}
|
||||
} else {
|
||||
DataLeafNode *leaf = dynamic_cast<DataLeafNode*>(root.get());
|
||||
EXPECT_EQ(nodeStore->layout().maxBytesPerLeaf(), leaf->numBytes());
|
||||
}
|
||||
}
|
||||
};
|
||||
constexpr DataNodeLayout DataTreeTest_ResizeNumBytes::LAYOUT;
|
||||
|
||||
class DataTreeTest_ResizeNumBytes_P: public DataTreeTest_ResizeNumBytes, public WithParamInterface<tuple<function<unique_ref<DataTree>(DataTreeTest_ResizeNumBytes*, uint32_t)>, uint32_t, uint32_t, uint32_t>> {
|
||||
public:
|
||||
DataTreeTest_ResizeNumBytes_P()
|
||||
: oldLastLeafSize(get<1>(GetParam())),
|
||||
tree(get<0>(GetParam())(this, oldLastLeafSize)),
|
||||
newNumberOfLeaves(get<2>(GetParam())),
|
||||
newLastLeafSize(get<3>(GetParam())),
|
||||
newSize((newNumberOfLeaves-1) * LAYOUT.maxBytesPerLeaf() + newLastLeafSize),
|
||||
ZEROES(LAYOUT.maxBytesPerLeaf())
|
||||
{
|
||||
ZEROES.FillWithZeroes();
|
||||
}
|
||||
|
||||
void ResizeTree(const BlockId &blockId, uint64_t size) {
|
||||
treeStore.load(blockId).get()->resizeNumBytes(size);
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(misc-no-recursion)
|
||||
unique_ref<DataLeafNode> LastLeaf(const BlockId &blockId) {
|
||||
auto root = nodeStore->load(blockId).value();
|
||||
auto leaf = dynamic_pointer_move<DataLeafNode>(root);
|
||||
if (leaf != none) {
|
||||
return std::move(*leaf);
|
||||
}
|
||||
auto inner = dynamic_pointer_move<DataInnerNode>(root).value();
|
||||
return LastLeaf(inner->readLastChild().blockId());
|
||||
}
|
||||
|
||||
uint32_t oldLastLeafSize;
|
||||
unique_ref<DataTree> tree;
|
||||
uint32_t newNumberOfLeaves;
|
||||
uint32_t newLastLeafSize;
|
||||
uint64_t newSize;
|
||||
Data ZEROES;
|
||||
};
|
||||
INSTANTIATE_TEST_SUITE_P(DataTreeTest_ResizeNumBytes_P, DataTreeTest_ResizeNumBytes_P,
|
||||
Combine(
|
||||
//Tree we're starting with
|
||||
Values<function<unique_ref<DataTree>(DataTreeTest_ResizeNumBytes*, uint32_t)>>(
|
||||
mem_fn(&DataTreeTest_ResizeNumBytes::CreateLeafTreeWithSize),
|
||||
mem_fn(&DataTreeTest_ResizeNumBytes::CreateTwoLeafTreeWithSecondLeafSize),
|
||||
mem_fn(&DataTreeTest_ResizeNumBytes::CreateFullTwoLevelTreeWithLastLeafSize),
|
||||
mem_fn(&DataTreeTest_ResizeNumBytes::CreateThreeLevelTreeWithTwoChildrenAndLastLeafSize),
|
||||
mem_fn(&DataTreeTest_ResizeNumBytes::CreateThreeLevelTreeWithThreeChildrenAndLastLeafSize),
|
||||
mem_fn(&DataTreeTest_ResizeNumBytes::CreateFullThreeLevelTreeWithLastLeafSize),
|
||||
mem_fn(&DataTreeTest_ResizeNumBytes::CreateFourLevelMinDataTreeWithLastLeafSize)
|
||||
),
|
||||
//Last leaf size of the start tree
|
||||
Values(
|
||||
0u,
|
||||
1u,
|
||||
10u,
|
||||
DataTreeTest_ResizeNumBytes::LAYOUT.maxBytesPerLeaf()
|
||||
),
|
||||
//Number of leaves we're resizing to
|
||||
Values(
|
||||
1u,
|
||||
2u,
|
||||
DataTreeTest_ResizeNumBytes::LAYOUT.maxChildrenPerInnerNode(), //Full two level tree
|
||||
2* DataTreeTest_ResizeNumBytes::LAYOUT.maxChildrenPerInnerNode(), //Three level tree with two children
|
||||
3* DataTreeTest_ResizeNumBytes::LAYOUT.maxChildrenPerInnerNode(), //Three level tree with three children
|
||||
DataTreeTest_ResizeNumBytes::LAYOUT.maxChildrenPerInnerNode() * DataTreeTest_ResizeNumBytes::LAYOUT.maxChildrenPerInnerNode(), //Full three level tree
|
||||
DataTreeTest_ResizeNumBytes::LAYOUT.maxChildrenPerInnerNode() * DataTreeTest_ResizeNumBytes::LAYOUT.maxChildrenPerInnerNode() + 1 //Four level mindata tree
|
||||
),
|
||||
//Last leaf size of the resized tree
|
||||
Values(
|
||||
1u,
|
||||
10u,
|
||||
DataTreeTest_ResizeNumBytes::LAYOUT.maxBytesPerLeaf()
|
||||
)
|
||||
)
|
||||
);
|
||||
|
||||
TEST_P(DataTreeTest_ResizeNumBytes_P, StructureIsValid) {
|
||||
tree->resizeNumBytes(newSize);
|
||||
tree->flush();
|
||||
EXPECT_IS_LEFTMAXDATA_TREE(tree->blockId());
|
||||
}
|
||||
|
||||
TEST_P(DataTreeTest_ResizeNumBytes_P, NumBytesIsCorrect) {
|
||||
tree->resizeNumBytes(newSize);
|
||||
tree->flush();
|
||||
// tree->numBytes() only goes down the right border nodes and expects the tree to be a left max data tree.
|
||||
// This is what the StructureIsValid test case is for.
|
||||
EXPECT_EQ(newSize, tree->numBytes());
|
||||
}
|
||||
|
||||
TEST_P(DataTreeTest_ResizeNumBytes_P, NumLeavesIsCorrect) {
|
||||
tree->resizeNumBytes(newSize);
|
||||
tree->flush();
|
||||
// tree->numLeaves() only goes down the right border nodes and expects the tree to be a left max data tree.
|
||||
// This is what the StructureIsValid test case is for.
|
||||
EXPECT_EQ(newNumberOfLeaves, tree->forceComputeNumLeaves());
|
||||
}
|
||||
|
||||
TEST_P(DataTreeTest_ResizeNumBytes_P, NumLeavesIsCorrect_FromCache) {
|
||||
tree->numLeaves(); // fill cache with old value
|
||||
tree->resizeNumBytes(newSize);
|
||||
tree->flush();
|
||||
// tree->numLeaves() only goes down the right border nodes and expects the tree to be a left max data tree.
|
||||
// This is what the StructureIsValid test case is for.
|
||||
EXPECT_EQ(newNumberOfLeaves, tree->numLeaves());
|
||||
}
|
||||
|
||||
TEST_P(DataTreeTest_ResizeNumBytes_P, DepthFlagsAreCorrect) {
|
||||
tree->resizeNumBytes(newSize);
|
||||
tree->flush();
|
||||
uint32_t depth = ceil(log(newNumberOfLeaves)/log(DataTreeTest_ResizeNumBytes::LAYOUT.maxChildrenPerInnerNode()) - 0.00000000001); // The subtraction takes care of double inaccuracies if newNumberOfLeaves == maxChildrenPerInnerNode
|
||||
CHECK_DEPTH(depth, tree->blockId());
|
||||
}
|
||||
|
||||
TEST_P(DataTreeTest_ResizeNumBytes_P, KeyDoesntChange) {
|
||||
BlockId blockId = tree->blockId();
|
||||
tree->flush();
|
||||
tree->resizeNumBytes(newSize);
|
||||
EXPECT_EQ(blockId, tree->blockId());
|
||||
}
|
||||
|
||||
TEST_P(DataTreeTest_ResizeNumBytes_P, DataStaysIntact) {
|
||||
uint32_t oldNumberOfLeaves = std::max(UINT64_C(1), ceilDivision(tree->numBytes(), static_cast<uint64_t>(nodeStore->layout().maxBytesPerLeaf())));
|
||||
TwoLevelDataFixture data(nodeStore, TwoLevelDataFixture::SizePolicy::Unchanged);
|
||||
BlockId blockId = tree->blockId();
|
||||
cpputils::destruct(std::move(tree));
|
||||
data.FillInto(nodeStore->load(blockId).get().get());
|
||||
|
||||
ResizeTree(blockId, newSize);
|
||||
|
||||
if (oldNumberOfLeaves < newNumberOfLeaves || (oldNumberOfLeaves == newNumberOfLeaves && oldLastLeafSize < newLastLeafSize)) {
|
||||
data.EXPECT_DATA_CORRECT(nodeStore->load(blockId).get().get(), oldNumberOfLeaves, oldLastLeafSize);
|
||||
} else {
|
||||
data.EXPECT_DATA_CORRECT(nodeStore->load(blockId).get().get(), newNumberOfLeaves, newLastLeafSize);
|
||||
}
|
||||
}
|
||||
|
||||
TEST_P(DataTreeTest_ResizeNumBytes_P, UnneededBlocksGetDeletedWhenShrinking) {
|
||||
tree->resizeNumBytes(newSize);
|
||||
tree->flush();
|
||||
|
||||
uint64_t expectedNumNodes = 1; // 1 for the root node
|
||||
uint64_t nodesOnCurrentLevel = newNumberOfLeaves;
|
||||
while (nodesOnCurrentLevel > 1) {
|
||||
expectedNumNodes += nodesOnCurrentLevel;
|
||||
nodesOnCurrentLevel = ceilDivision(nodesOnCurrentLevel, nodeStore->layout().maxChildrenPerInnerNode());
|
||||
}
|
||||
EXPECT_EQ(expectedNumNodes, nodeStore->numNodes());
|
||||
}
|
||||
|
||||
//Resize to zero is not caught in the parametrized test above, in the following, we test it separately.
|
||||
|
||||
TEST_F(DataTreeTest_ResizeNumBytes, ResizeToZero_NumBytesIsCorrect) {
|
||||
auto tree = CreateThreeLevelTreeWithThreeChildrenAndLastLeafSize(10u);
|
||||
tree->resizeNumBytes(0);
|
||||
BlockId blockId = tree->blockId();
|
||||
cpputils::destruct(std::move(tree));
|
||||
auto leaf = LoadLeafNode(blockId);
|
||||
EXPECT_EQ(0u, leaf->numBytes());
|
||||
}
|
||||
|
||||
TEST_F(DataTreeTest_ResizeNumBytes, ResizeToZero_blockIdDoesntChange) {
|
||||
auto tree = CreateThreeLevelTreeWithThreeChildrenAndLastLeafSize(10u);
|
||||
BlockId blockId = tree->blockId();
|
||||
tree->resizeNumBytes(0);
|
||||
tree->flush();
|
||||
EXPECT_EQ(blockId, tree->blockId());
|
||||
}
|
||||
|
||||
TEST_F(DataTreeTest_ResizeNumBytes, ResizeToZero_UnneededBlocksGetDeletedWhenShrinking) {
|
||||
auto tree = CreateThreeLevelTreeWithThreeChildrenAndLastLeafSize(10u);
|
||||
tree->resizeNumBytes(0);
|
||||
tree->flush();
|
||||
EXPECT_EQ(1u, nodeStore->numNodes());
|
||||
}
|
@ -1,449 +0,0 @@
|
||||
#include "testutils/DataTreeTest.h"
|
||||
#include <blobstore/implementations/onblocks/datatreestore/impl/LeafTraverser.h>
|
||||
#include <gmock/gmock.h>
|
||||
|
||||
using ::testing::Invoke;
|
||||
using ::testing::Eq;
|
||||
|
||||
using blobstore::onblocks::datanodestore::DataLeafNode;
|
||||
using blobstore::onblocks::datanodestore::DataInnerNode;
|
||||
using blobstore::onblocks::datanodestore::DataNode;
|
||||
using blobstore::onblocks::datatreestore::LeafHandle;
|
||||
using blobstore::onblocks::datatreestore::LeafTraverser;
|
||||
using blockstore::BlockId;
|
||||
|
||||
using cpputils::unique_ref;
|
||||
using cpputils::Data;
|
||||
using std::shared_ptr;
|
||||
using std::make_shared;
|
||||
|
||||
class TraversorMock {
|
||||
public:
|
||||
MOCK_METHOD(void, calledExistingLeaf, (DataLeafNode*, bool, uint32_t));
|
||||
MOCK_METHOD(shared_ptr<Data>, calledCreateLeaf, (uint32_t));
|
||||
};
|
||||
|
||||
MATCHER_P(KeyEq, expected, "node blockId equals") {
|
||||
return arg->blockId() == expected;
|
||||
}
|
||||
|
||||
class LeafTraverserTest: public DataTreeTest {
|
||||
public:
|
||||
LeafTraverserTest() :traversor() {}
|
||||
|
||||
unique_ref<DataInnerNode> CreateThreeLevel() {
|
||||
return CreateInner({
|
||||
CreateFullTwoLevel(),
|
||||
CreateFullTwoLevel(),
|
||||
CreateFullTwoLevel(),
|
||||
CreateFullTwoLevel(),
|
||||
CreateFullTwoLevel(),
|
||||
CreateInner({CreateLeaf(), CreateLeaf(), CreateLeaf()})});
|
||||
}
|
||||
|
||||
unique_ref<DataInnerNode> CreateFourLevel() {
|
||||
return CreateInner({
|
||||
CreateFullThreeLevel(),
|
||||
CreateFullThreeLevel(),
|
||||
CreateInner({CreateFullTwoLevel(), CreateInner({CreateLeaf()})})
|
||||
});
|
||||
}
|
||||
|
||||
void EXPECT_CREATE_LEAF(uint32_t leafIndex) {
|
||||
uint64_t maxBytesPerLeaf = nodeStore->layout().maxBytesPerLeaf();
|
||||
EXPECT_CALL(traversor, calledCreateLeaf(Eq(leafIndex))).Times(1).WillOnce(Invoke([maxBytesPerLeaf] (uint32_t) {
|
||||
return make_shared<Data>(maxBytesPerLeaf);
|
||||
}));
|
||||
}
|
||||
|
||||
void EXPECT_TRAVERSE_LEAF(const BlockId &blockId, bool isRightBorderLeaf, uint32_t leafIndex) {
|
||||
EXPECT_CALL(traversor, calledExistingLeaf(KeyEq(blockId), isRightBorderLeaf, leafIndex)).Times(1);
|
||||
}
|
||||
|
||||
void EXPECT_TRAVERSE_ALL_CHILDREN_OF(const DataInnerNode &node, bool isRightBorderNode, uint32_t firstLeafIndex) {
|
||||
for (unsigned int i = 0; i < node.numChildren(); ++i) {
|
||||
EXPECT_TRAVERSE_LEAF(node.readChild(i).blockId(), isRightBorderNode && i == node.numChildren()-1, firstLeafIndex+i);
|
||||
}
|
||||
}
|
||||
|
||||
void EXPECT_DONT_TRAVERSE_ANY_LEAVES() {
|
||||
EXPECT_CALL(traversor, calledExistingLeaf(testing::_, testing::_, testing::_)).Times(0);
|
||||
EXPECT_CALL(traversor, calledCreateLeaf(testing::_)).Times(0);
|
||||
}
|
||||
|
||||
void TraverseLeaves(unique_ref<DataNode> root, uint32_t beginIndex, uint32_t endIndex, bool expectReadOnly) {
|
||||
root->flush();
|
||||
auto tree = treeStore.load(root->blockId()).value();
|
||||
auto* old_root = root.get();
|
||||
LeafTraverser(nodeStore, expectReadOnly).traverseAndUpdateRoot(&root, beginIndex, endIndex, [this] (uint32_t nodeIndex, bool isRightBorderNode,LeafHandle leaf) {
|
||||
traversor.calledExistingLeaf(leaf.node(), isRightBorderNode, nodeIndex);
|
||||
}, [this] (uint32_t nodeIndex) -> Data {
|
||||
return traversor.calledCreateLeaf(nodeIndex)->copy();
|
||||
}, [] (auto) {});
|
||||
if (expectReadOnly) {
|
||||
EXPECT_EQ(old_root, root.get());
|
||||
} else {
|
||||
EXPECT_NE(old_root, root.get());
|
||||
}
|
||||
}
|
||||
|
||||
TraversorMock traversor;
|
||||
};
|
||||
|
||||
TEST_F(LeafTraverserTest, TraverseSingleLeafTree) {
|
||||
unique_ref<DataNode> root = CreateLeaf();
|
||||
EXPECT_TRAVERSE_LEAF(root->blockId(), true, 0);
|
||||
|
||||
TraverseLeaves(std::move(root), 0, 1, true);
|
||||
}
|
||||
|
||||
TEST_F(LeafTraverserTest, TraverseNothingInSingleLeafTree1) {
|
||||
unique_ref<DataNode> root = CreateLeaf();
|
||||
EXPECT_DONT_TRAVERSE_ANY_LEAVES();
|
||||
|
||||
TraverseLeaves(std::move(root), 0, 0, true);
|
||||
}
|
||||
|
||||
TEST_F(LeafTraverserTest, TraverseNothingInSingleLeafTree2) {
|
||||
unique_ref<DataNode> root = CreateLeaf();
|
||||
EXPECT_DONT_TRAVERSE_ANY_LEAVES();
|
||||
|
||||
TraverseLeaves(std::move(root), 1, 1, true);
|
||||
}
|
||||
|
||||
TEST_F(LeafTraverserTest, TraverseFirstLeafOfFullTwolevelTree) {
|
||||
auto root = CreateFullTwoLevel();
|
||||
EXPECT_TRAVERSE_LEAF(root->readChild(0).blockId(), false, 0);
|
||||
|
||||
TraverseLeaves(std::move(root), 0, 1, true);
|
||||
}
|
||||
|
||||
TEST_F(LeafTraverserTest, TraverseMiddleLeafOfFullTwolevelTree) {
|
||||
auto root = CreateFullTwoLevel();
|
||||
EXPECT_TRAVERSE_LEAF(root->readChild(5).blockId(), false, 5);
|
||||
|
||||
TraverseLeaves(std::move(root), 5, 6, true);
|
||||
}
|
||||
|
||||
TEST_F(LeafTraverserTest, TraverseLastLeafOfFullTwolevelTree) {
|
||||
auto root = CreateFullTwoLevel();
|
||||
EXPECT_TRAVERSE_LEAF(root->readChild(nodeStore->layout().maxChildrenPerInnerNode()-1).blockId(), true, nodeStore->layout().maxChildrenPerInnerNode()-1);
|
||||
|
||||
TraverseLeaves(std::move(root), nodeStore->layout().maxChildrenPerInnerNode()-1, nodeStore->layout().maxChildrenPerInnerNode(), true);
|
||||
}
|
||||
|
||||
TEST_F(LeafTraverserTest, TraverseNothingInFullTwolevelTree1) {
|
||||
auto root = CreateFullTwoLevel();
|
||||
EXPECT_DONT_TRAVERSE_ANY_LEAVES();
|
||||
|
||||
TraverseLeaves(std::move(root), 0, 0, true);
|
||||
}
|
||||
|
||||
TEST_F(LeafTraverserTest, TraverseNothingInFullTwolevelTree2) {
|
||||
auto root = CreateFullTwoLevel();
|
||||
EXPECT_DONT_TRAVERSE_ANY_LEAVES();
|
||||
|
||||
TraverseLeaves(std::move(root), nodeStore->layout().maxChildrenPerInnerNode(), nodeStore->layout().maxChildrenPerInnerNode(), true);
|
||||
}
|
||||
|
||||
TEST_F(LeafTraverserTest, TraverseFirstLeafOfThreeLevelMinDataTree) {
|
||||
auto root = CreateThreeLevelMinData();
|
||||
EXPECT_TRAVERSE_LEAF(LoadInnerNode(root->readChild(0).blockId())->readChild(0).blockId(), false, 0);
|
||||
|
||||
TraverseLeaves(std::move(root), 0, 1, true);
|
||||
}
|
||||
|
||||
TEST_F(LeafTraverserTest, TraverseMiddleLeafOfThreeLevelMinDataTree) {
|
||||
auto root = CreateThreeLevelMinData();
|
||||
EXPECT_TRAVERSE_LEAF(LoadInnerNode(root->readChild(0).blockId())->readChild(5).blockId(), false, 5);
|
||||
|
||||
TraverseLeaves(std::move(root), 5, 6, true);
|
||||
}
|
||||
|
||||
TEST_F(LeafTraverserTest, TraverseLastLeafOfThreeLevelMinDataTree) {
|
||||
auto root = CreateThreeLevelMinData();
|
||||
EXPECT_TRAVERSE_LEAF(LoadInnerNode(root->readChild(1).blockId())->readChild(0).blockId(), true, nodeStore->layout().maxChildrenPerInnerNode());
|
||||
|
||||
TraverseLeaves(std::move(root), nodeStore->layout().maxChildrenPerInnerNode(), nodeStore->layout().maxChildrenPerInnerNode()+1, true);
|
||||
}
|
||||
|
||||
TEST_F(LeafTraverserTest, TraverseAllLeavesOfFullTwolevelTree) {
|
||||
auto root = CreateFullTwoLevel();
|
||||
EXPECT_TRAVERSE_ALL_CHILDREN_OF(*root, true, 0);
|
||||
|
||||
TraverseLeaves(std::move(root), 0, nodeStore->layout().maxChildrenPerInnerNode(), true);
|
||||
}
|
||||
|
||||
TEST_F(LeafTraverserTest, TraverseAllLeavesOfThreelevelMinDataTree) {
|
||||
auto root = CreateThreeLevelMinData();
|
||||
EXPECT_TRAVERSE_ALL_CHILDREN_OF(*LoadInnerNode(root->readChild(0).blockId()), false, 0);
|
||||
EXPECT_TRAVERSE_LEAF(LoadInnerNode(root->readChild(1).blockId())->readChild(0).blockId(), true, nodeStore->layout().maxChildrenPerInnerNode());
|
||||
|
||||
TraverseLeaves(std::move(root), 0, nodeStore->layout().maxChildrenPerInnerNode()+1, true);
|
||||
}
|
||||
|
||||
TEST_F(LeafTraverserTest, TraverseFirstChildOfThreelevelMinDataTree) {
|
||||
auto root = CreateThreeLevelMinData();
|
||||
EXPECT_TRAVERSE_ALL_CHILDREN_OF(*LoadInnerNode(root->readChild(0).blockId()), false, 0);
|
||||
|
||||
TraverseLeaves(std::move(root), 0, nodeStore->layout().maxChildrenPerInnerNode(), true);
|
||||
}
|
||||
|
||||
TEST_F(LeafTraverserTest, TraverseFirstPartOfFullTwolevelTree) {
|
||||
auto root = CreateFullTwoLevel();
|
||||
for (unsigned int i = 0; i < 5; ++i) {
|
||||
EXPECT_TRAVERSE_LEAF(root->readChild(i).blockId(), false, i);
|
||||
}
|
||||
|
||||
TraverseLeaves(std::move(root), 0, 5, true);
|
||||
}
|
||||
|
||||
TEST_F(LeafTraverserTest, TraverseInnerPartOfFullTwolevelTree) {
|
||||
auto root = CreateFullTwoLevel();
|
||||
for (unsigned int i = 5; i < 10; ++i) {
|
||||
EXPECT_TRAVERSE_LEAF(root->readChild(i).blockId(), false, i);
|
||||
}
|
||||
|
||||
TraverseLeaves(std::move(root), 5, 10, true);
|
||||
}
|
||||
|
||||
TEST_F(LeafTraverserTest, TraverseLastPartOfFullTwolevelTree) {
|
||||
auto root = CreateFullTwoLevel();
|
||||
for (unsigned int i = 5; i < nodeStore->layout().maxChildrenPerInnerNode(); ++i) {
|
||||
EXPECT_TRAVERSE_LEAF(root->readChild(i).blockId(), i==nodeStore->layout().maxChildrenPerInnerNode()-1, i);
|
||||
}
|
||||
|
||||
TraverseLeaves(std::move(root), 5, nodeStore->layout().maxChildrenPerInnerNode(), true);
|
||||
}
|
||||
|
||||
TEST_F(LeafTraverserTest, TraverseFirstPartOfThreelevelMinDataTree) {
|
||||
auto root = CreateThreeLevelMinData();
|
||||
auto node = LoadInnerNode(root->readChild(0).blockId());
|
||||
for (unsigned int i = 0; i < 5; ++i) {
|
||||
EXPECT_TRAVERSE_LEAF(node->readChild(i).blockId(), false, i);
|
||||
}
|
||||
|
||||
TraverseLeaves(std::move(root), 0, 5, true);
|
||||
}
|
||||
|
||||
TEST_F(LeafTraverserTest, TraverseInnerPartOfThreelevelMinDataTree) {
|
||||
auto root = CreateThreeLevelMinData();
|
||||
auto node = LoadInnerNode(root->readChild(0).blockId());
|
||||
for (unsigned int i = 5; i < 10; ++i) {
|
||||
EXPECT_TRAVERSE_LEAF(node->readChild(i).blockId(), false, i);
|
||||
}
|
||||
|
||||
TraverseLeaves(std::move(root), 5, 10, true);
|
||||
}
|
||||
|
||||
TEST_F(LeafTraverserTest, TraverseLastPartOfThreelevelMinDataTree) {
|
||||
auto root = CreateThreeLevelMinData();
|
||||
auto node = LoadInnerNode(root->readChild(0).blockId());
|
||||
for (unsigned int i = 5; i < nodeStore->layout().maxChildrenPerInnerNode(); ++i) {
|
||||
EXPECT_TRAVERSE_LEAF(node->readChild(i).blockId(), false, i);
|
||||
}
|
||||
EXPECT_TRAVERSE_LEAF(LoadInnerNode(root->readChild(1).blockId())->readChild(0).blockId(), true, nodeStore->layout().maxChildrenPerInnerNode());
|
||||
|
||||
TraverseLeaves(std::move(root), 5, nodeStore->layout().maxChildrenPerInnerNode()+1, true);
|
||||
}
|
||||
|
||||
TEST_F(LeafTraverserTest, TraverseFirstLeafOfThreelevelTree) {
|
||||
auto root = CreateThreeLevel();
|
||||
EXPECT_TRAVERSE_LEAF(LoadInnerNode(root->readChild(0).blockId())->readChild(0).blockId(), false, 0);
|
||||
|
||||
TraverseLeaves(std::move(root), 0, 1, true);
|
||||
}
|
||||
|
||||
TEST_F(LeafTraverserTest, TraverseLastLeafOfThreelevelTree) {
|
||||
auto root = CreateThreeLevel();
|
||||
uint32_t numLeaves = nodeStore->layout().maxChildrenPerInnerNode() * 5 + 3;
|
||||
EXPECT_TRAVERSE_LEAF(LoadInnerNode(root->readLastChild().blockId())->readLastChild().blockId(), true, numLeaves-1);
|
||||
|
||||
TraverseLeaves(std::move(root), numLeaves-1, numLeaves, true);
|
||||
}
|
||||
|
||||
TEST_F(LeafTraverserTest, TraverseMiddleLeafOfThreelevelTree) {
|
||||
auto root = CreateThreeLevel();
|
||||
uint32_t wantedLeafIndex = nodeStore->layout().maxChildrenPerInnerNode() * 2 + 5;
|
||||
EXPECT_TRAVERSE_LEAF(LoadInnerNode(root->readChild(2).blockId())->readChild(5).blockId(), false, wantedLeafIndex);
|
||||
|
||||
TraverseLeaves(std::move(root), wantedLeafIndex, wantedLeafIndex+1, true);
|
||||
}
|
||||
|
||||
TEST_F(LeafTraverserTest, TraverseFirstPartOfThreelevelTree) {
|
||||
auto root = CreateThreeLevel();
|
||||
//Traverse all leaves in the first two children of the root
|
||||
for(unsigned int i = 0; i < 2; ++i) {
|
||||
EXPECT_TRAVERSE_ALL_CHILDREN_OF(*LoadInnerNode(root->readChild(i).blockId()), false, i * nodeStore->layout().maxChildrenPerInnerNode());
|
||||
}
|
||||
//Traverse some of the leaves in the third child of the root
|
||||
auto child = LoadInnerNode(root->readChild(2).blockId());
|
||||
for(unsigned int i = 0; i < 5; ++i) {
|
||||
EXPECT_TRAVERSE_LEAF(child->readChild(i).blockId(), false, 2 * nodeStore->layout().maxChildrenPerInnerNode() + i);
|
||||
}
|
||||
|
||||
TraverseLeaves(std::move(root), 0, 2 * nodeStore->layout().maxChildrenPerInnerNode() + 5, true);
|
||||
}
|
||||
|
||||
TEST_F(LeafTraverserTest, TraverseMiddlePartOfThreelevelTree_OnlyFullChildren) {
|
||||
auto root = CreateThreeLevel();
|
||||
//Traverse some of the leaves in the second child of the root
|
||||
auto child = LoadInnerNode(root->readChild(1).blockId());
|
||||
for(unsigned int i = 5; i < nodeStore->layout().maxChildrenPerInnerNode(); ++i) {
|
||||
EXPECT_TRAVERSE_LEAF(child->readChild(i).blockId(), false, nodeStore->layout().maxChildrenPerInnerNode() + i);
|
||||
}
|
||||
//Traverse all leaves in the third and fourth child of the root
|
||||
for(unsigned int i = 2; i < 4; ++i) {
|
||||
EXPECT_TRAVERSE_ALL_CHILDREN_OF(*LoadInnerNode(root->readChild(i).blockId()),false, i * nodeStore->layout().maxChildrenPerInnerNode());
|
||||
}
|
||||
//Traverse some of the leaves in the fifth child of the root
|
||||
child = LoadInnerNode(root->readChild(4).blockId());
|
||||
for(unsigned int i = 0; i < 5; ++i) {
|
||||
EXPECT_TRAVERSE_LEAF(child->readChild(i).blockId(), false, 4 * nodeStore->layout().maxChildrenPerInnerNode() + i);
|
||||
}
|
||||
|
||||
TraverseLeaves(std::move(root), nodeStore->layout().maxChildrenPerInnerNode() + 5, 4 * nodeStore->layout().maxChildrenPerInnerNode() + 5, true);
|
||||
}
|
||||
|
||||
TEST_F(LeafTraverserTest, TraverseMiddlePartOfThreelevelTree_AlsoLastNonfullChild) {
|
||||
auto root = CreateThreeLevel();
|
||||
//Traverse some of the leaves in the second child of the root
|
||||
auto child = LoadInnerNode(root->readChild(1).blockId());
|
||||
for(unsigned int i = 5; i < nodeStore->layout().maxChildrenPerInnerNode(); ++i) {
|
||||
EXPECT_TRAVERSE_LEAF(child->readChild(i).blockId(), false, nodeStore->layout().maxChildrenPerInnerNode() + i);
|
||||
}
|
||||
//Traverse all leaves in the third, fourth and fifth child of the root
|
||||
for(unsigned int i = 2; i < 5; ++i) {
|
||||
EXPECT_TRAVERSE_ALL_CHILDREN_OF(*LoadInnerNode(root->readChild(i).blockId()), false, i * nodeStore->layout().maxChildrenPerInnerNode());
|
||||
}
|
||||
//Traverse some of the leaves in the sixth child of the root
|
||||
child = LoadInnerNode(root->readChild(5).blockId());
|
||||
for(unsigned int i = 0; i < 2; ++i) {
|
||||
EXPECT_TRAVERSE_LEAF(child->readChild(i).blockId(), false, 5 * nodeStore->layout().maxChildrenPerInnerNode() + i);
|
||||
}
|
||||
|
||||
TraverseLeaves(std::move(root), nodeStore->layout().maxChildrenPerInnerNode() + 5, 5 * nodeStore->layout().maxChildrenPerInnerNode() + 2, true);
|
||||
}
|
||||
|
||||
TEST_F(LeafTraverserTest, TraverseLastPartOfThreelevelTree) {
|
||||
auto root = CreateThreeLevel();
|
||||
//Traverse some of the leaves in the second child of the root
|
||||
auto child = LoadInnerNode(root->readChild(1).blockId());
|
||||
for(unsigned int i = 5; i < nodeStore->layout().maxChildrenPerInnerNode(); ++i) {
|
||||
EXPECT_TRAVERSE_LEAF(child->readChild(i).blockId(), false, nodeStore->layout().maxChildrenPerInnerNode() + i);
|
||||
}
|
||||
//Traverse all leaves in the third, fourth and fifth child of the root
|
||||
for(unsigned int i = 2; i < 5; ++i) {
|
||||
EXPECT_TRAVERSE_ALL_CHILDREN_OF(*LoadInnerNode(root->readChild(i).blockId()), false, i * nodeStore->layout().maxChildrenPerInnerNode());
|
||||
}
|
||||
//Traverse all of the leaves in the sixth child of the root
|
||||
child = LoadInnerNode(root->readChild(5).blockId());
|
||||
for(unsigned int i = 0; i < child->numChildren(); ++i) {
|
||||
EXPECT_TRAVERSE_LEAF(child->readChild(i).blockId(), i == child->numChildren()-1, 5 * nodeStore->layout().maxChildrenPerInnerNode() + i);
|
||||
}
|
||||
|
||||
TraverseLeaves(std::move(root), nodeStore->layout().maxChildrenPerInnerNode() + 5, 5 * nodeStore->layout().maxChildrenPerInnerNode() + child->numChildren(), true);
|
||||
}
|
||||
|
||||
TEST_F(LeafTraverserTest, TraverseAllLeavesOfThreelevelTree) {
|
||||
auto root = CreateThreeLevel();
|
||||
//Traverse all leaves in the third, fourth and fifth child of the root
|
||||
for(unsigned int i = 0; i < 5; ++i) {
|
||||
EXPECT_TRAVERSE_ALL_CHILDREN_OF(*LoadInnerNode(root->readChild(i).blockId()), false, i * nodeStore->layout().maxChildrenPerInnerNode());
|
||||
}
|
||||
//Traverse all of the leaves in the sixth child of the root
|
||||
auto child = LoadInnerNode(root->readChild(5).blockId());
|
||||
for(unsigned int i = 0; i < child->numChildren(); ++i) {
|
||||
EXPECT_TRAVERSE_LEAF(child->readChild(i).blockId(), i==child->numChildren()-1, 5 * nodeStore->layout().maxChildrenPerInnerNode() + i);
|
||||
}
|
||||
|
||||
TraverseLeaves(std::move(root), 0, 5 * nodeStore->layout().maxChildrenPerInnerNode() + child->numChildren(), true);
|
||||
}
|
||||
|
||||
TEST_F(LeafTraverserTest, TraverseAllLeavesOfFourLevelTree) {
|
||||
auto root = CreateFourLevel();
|
||||
//Traverse all leaves of the full threelevel tree in the first child
|
||||
auto firstChild = LoadInnerNode(root->readChild(0).blockId());
|
||||
for(unsigned int i = 0; i < firstChild->numChildren(); ++i) {
|
||||
EXPECT_TRAVERSE_ALL_CHILDREN_OF(*LoadInnerNode(firstChild->readChild(i).blockId()), false, i * nodeStore->layout().maxChildrenPerInnerNode());
|
||||
}
|
||||
//Traverse all leaves of the full threelevel tree in the second child
|
||||
auto secondChild = LoadInnerNode(root->readChild(1).blockId());
|
||||
for(unsigned int i = 0; i < secondChild->numChildren(); ++i) {
|
||||
EXPECT_TRAVERSE_ALL_CHILDREN_OF(*LoadInnerNode(secondChild->readChild(i).blockId()), false, (nodeStore->layout().maxChildrenPerInnerNode() + i) * nodeStore->layout().maxChildrenPerInnerNode());
|
||||
}
|
||||
//Traverse all leaves of the non-full threelevel tree in the third child
|
||||
auto thirdChild = LoadInnerNode(root->readChild(2).blockId());
|
||||
EXPECT_TRAVERSE_ALL_CHILDREN_OF(*LoadInnerNode(thirdChild->readChild(0).blockId()), false, 2 * nodeStore->layout().maxChildrenPerInnerNode() * nodeStore->layout().maxChildrenPerInnerNode());
|
||||
EXPECT_TRAVERSE_LEAF(LoadInnerNode(thirdChild->readChild(1).blockId())->readChild(0).blockId(), true, 2 * nodeStore->layout().maxChildrenPerInnerNode() * nodeStore->layout().maxChildrenPerInnerNode() + nodeStore->layout().maxChildrenPerInnerNode());
|
||||
|
||||
TraverseLeaves(std::move(root), 0, 2*nodeStore->layout().maxChildrenPerInnerNode()*nodeStore->layout().maxChildrenPerInnerNode() + nodeStore->layout().maxChildrenPerInnerNode() + 1, true);
|
||||
}
|
||||
|
||||
TEST_F(LeafTraverserTest, TraverseMiddlePartOfFourLevelTree) {
|
||||
auto root = CreateFourLevel();
|
||||
//Traverse some leaves of the full threelevel tree in the first child
|
||||
auto firstChild = LoadInnerNode(root->readChild(0).blockId());
|
||||
auto secondChildOfFirstChild = LoadInnerNode(firstChild->readChild(1).blockId());
|
||||
for(unsigned int i = 5; i < secondChildOfFirstChild->numChildren(); ++i) {
|
||||
EXPECT_TRAVERSE_LEAF(secondChildOfFirstChild->readChild(i).blockId(), false, nodeStore->layout().maxChildrenPerInnerNode()+i);
|
||||
}
|
||||
for(unsigned int i = 2; i < firstChild->numChildren(); ++i) {
|
||||
EXPECT_TRAVERSE_ALL_CHILDREN_OF(*LoadInnerNode(firstChild->readChild(i).blockId()), false, i * nodeStore->layout().maxChildrenPerInnerNode());
|
||||
}
|
||||
//Traverse all leaves of the full threelevel tree in the second child
|
||||
auto secondChild = LoadInnerNode(root->readChild(1).blockId());
|
||||
for(unsigned int i = 0; i < secondChild->numChildren(); ++i) {
|
||||
EXPECT_TRAVERSE_ALL_CHILDREN_OF(*LoadInnerNode(secondChild->readChild(i).blockId()), false, (nodeStore->layout().maxChildrenPerInnerNode() + i) * nodeStore->layout().maxChildrenPerInnerNode());
|
||||
}
|
||||
//Traverse some leaves of the non-full threelevel tree in the third child
|
||||
auto thirdChild = LoadInnerNode(root->readChild(2).blockId());
|
||||
auto firstChildOfThirdChild = LoadInnerNode(thirdChild->readChild(0).blockId());
|
||||
for(unsigned int i = 0; i < firstChildOfThirdChild->numChildren()-1; ++i) {
|
||||
EXPECT_TRAVERSE_LEAF(firstChildOfThirdChild->readChild(i).blockId(), false, 2 * nodeStore->layout().maxChildrenPerInnerNode()*nodeStore->layout().maxChildrenPerInnerNode()+i);
|
||||
}
|
||||
|
||||
TraverseLeaves(std::move(root), nodeStore->layout().maxChildrenPerInnerNode()+5, 2*nodeStore->layout().maxChildrenPerInnerNode()*nodeStore->layout().maxChildrenPerInnerNode() + nodeStore->layout().maxChildrenPerInnerNode() -1, true);
|
||||
}
|
||||
|
||||
TEST_F(LeafTraverserTest, LastLeafIsAlreadyResizedInCallback) {
|
||||
unique_ref<DataNode> root = CreateLeaf();
|
||||
root->flush();
|
||||
auto* old_root = root.get();
|
||||
auto tree = treeStore.load(root->blockId()).value();
|
||||
LeafTraverser(nodeStore, false).traverseAndUpdateRoot(&root, 0, 2, [this] (uint32_t leafIndex, bool /*isRightBorderNode*/, LeafHandle leaf) {
|
||||
if (leafIndex == 0) {
|
||||
EXPECT_EQ(nodeStore->layout().maxBytesPerLeaf(), leaf.node()->numBytes());
|
||||
} else {
|
||||
EXPECT_TRUE(false) << "only two nodes";
|
||||
}
|
||||
}, [] (uint32_t /*nodeIndex*/) -> Data {
|
||||
return Data(1);
|
||||
}, [] (auto) {});
|
||||
EXPECT_NE(old_root, root.get()); // expect that we grew the tree
|
||||
}
|
||||
|
||||
TEST_F(LeafTraverserTest, LastLeafIsAlreadyResizedInCallback_TwoLevel) {
|
||||
unique_ref<DataNode> root = CreateFullTwoLevelWithLastLeafSize(5);
|
||||
root->flush();
|
||||
auto* old_root = root.get();
|
||||
auto tree = treeStore.load(root->blockId()).value();
|
||||
LeafTraverser(nodeStore, false).traverseAndUpdateRoot(&root, 0, nodeStore->layout().maxChildrenPerInnerNode()+1, [this] (uint32_t /*leafIndex*/, bool /*isRightBorderNode*/, LeafHandle leaf) {
|
||||
EXPECT_EQ(nodeStore->layout().maxBytesPerLeaf(), leaf.node()->numBytes());
|
||||
}, [] (uint32_t /*nodeIndex*/) -> Data {
|
||||
return Data(1);
|
||||
}, [] (auto) {});
|
||||
EXPECT_NE(old_root, root.get()); // expect that we grew the tree
|
||||
}
|
||||
|
||||
TEST_F(LeafTraverserTest, ResizeFromOneLeafToMultipleLeaves) {
|
||||
auto root = CreateLeaf();
|
||||
EXPECT_TRAVERSE_LEAF(root->blockId(), false, 0);
|
||||
//EXPECT_CALL(traversor, calledExistingLeaf(_, false, 0)).Times(1);
|
||||
for (uint32_t i = 1; i < 10; ++i) {
|
||||
EXPECT_CREATE_LEAF(i);
|
||||
}
|
||||
TraverseLeaves(std::move(root), 0, 10, false);
|
||||
}
|
||||
|
||||
////TODO Refactor the test cases that are too long
|
@ -1,87 +0,0 @@
|
||||
#include <gtest/gtest.h>
|
||||
|
||||
#include "../testutils/DataTreeTest.h"
|
||||
#include "blobstore/implementations/onblocks/datatreestore/DataTree.h"
|
||||
#include "blobstore/implementations/onblocks/datanodestore/DataLeafNode.h"
|
||||
#include "blobstore/implementations/onblocks/datanodestore/DataInnerNode.h"
|
||||
#include <blockstore/implementations/testfake/FakeBlockStore.h>
|
||||
#include "blobstore/implementations/onblocks/datatreestore/impl/algorithms.h"
|
||||
|
||||
|
||||
using blockstore::BlockId;
|
||||
using cpputils::Data;
|
||||
using namespace blobstore::onblocks::datatreestore::algorithms;
|
||||
|
||||
class GetLowestInnerRightBorderNodeWithLessThanKChildrenOrNullTest: public DataTreeTest {
|
||||
public:
|
||||
struct TestData {
|
||||
BlockId rootNode;
|
||||
BlockId expectedResult;
|
||||
};
|
||||
|
||||
void check(const TestData &testData) {
|
||||
auto root = nodeStore->load(testData.rootNode).value();
|
||||
auto result = GetLowestInnerRightBorderNodeWithLessThanKChildrenOrNull(nodeStore, root.get());
|
||||
EXPECT_EQ(testData.expectedResult, result->blockId());
|
||||
}
|
||||
|
||||
TestData CreateTwoRightBorderNodes() {
|
||||
auto node = CreateInner({CreateLeaf()});
|
||||
return TestData{node->blockId(), node->blockId()};
|
||||
}
|
||||
|
||||
TestData CreateThreeRightBorderNodes() {
|
||||
auto node = CreateInner({CreateLeaf()});
|
||||
auto root = CreateInner({node.get()});
|
||||
return TestData{root->blockId(), node->blockId()};
|
||||
}
|
||||
|
||||
TestData CreateThreeRightBorderNodes_LastFull() {
|
||||
auto root = CreateInner({CreateFullTwoLevel()});
|
||||
return TestData{root->blockId(), root->blockId()};
|
||||
}
|
||||
|
||||
TestData CreateLargerTree() {
|
||||
auto node = CreateInner({CreateLeaf(), CreateLeaf()});
|
||||
auto root = CreateInner({CreateFullTwoLevel().get(), node.get()});
|
||||
return TestData{root->blockId(), node->blockId()};
|
||||
}
|
||||
};
|
||||
|
||||
TEST_F(GetLowestInnerRightBorderNodeWithLessThanKChildrenOrNullTest, Leaf) {
|
||||
auto leaf = nodeStore->createNewLeafNode(Data(0));
|
||||
auto result = GetLowestInnerRightBorderNodeWithLessThanKChildrenOrNull(nodeStore, leaf.get());
|
||||
EXPECT_EQ(nullptr, result.get());
|
||||
}
|
||||
|
||||
TEST_F(GetLowestInnerRightBorderNodeWithLessThanKChildrenOrNullTest, TwoRightBorderNodes) {
|
||||
auto testData = CreateTwoRightBorderNodes();
|
||||
check(testData);
|
||||
}
|
||||
|
||||
TEST_F(GetLowestInnerRightBorderNodeWithLessThanKChildrenOrNullTest, ThreeRightBorderNodes) {
|
||||
auto testData = CreateThreeRightBorderNodes();
|
||||
check(testData);
|
||||
}
|
||||
|
||||
TEST_F(GetLowestInnerRightBorderNodeWithLessThanKChildrenOrNullTest, ThreeRightBorderNodes_LastFull) {
|
||||
auto testData = CreateThreeRightBorderNodes_LastFull();
|
||||
check(testData);
|
||||
}
|
||||
|
||||
TEST_F(GetLowestInnerRightBorderNodeWithLessThanKChildrenOrNullTest, LargerTree) {
|
||||
auto testData = CreateLargerTree();
|
||||
check(testData);
|
||||
}
|
||||
|
||||
TEST_F(GetLowestInnerRightBorderNodeWithLessThanKChildrenOrNullTest, FullTwoLevelTree) {
|
||||
auto root = CreateFullTwoLevel();
|
||||
auto result = GetLowestInnerRightBorderNodeWithLessThanKChildrenOrNull(nodeStore, root.get());
|
||||
EXPECT_EQ(nullptr, result.get());
|
||||
}
|
||||
|
||||
TEST_F(GetLowestInnerRightBorderNodeWithLessThanKChildrenOrNullTest, FullThreeLevelTree) {
|
||||
auto root = CreateFullThreeLevel();
|
||||
auto result = GetLowestInnerRightBorderNodeWithLessThanKChildrenOrNull(nodeStore, root.get());
|
||||
EXPECT_EQ(nullptr, result.get());
|
||||
}
|
@ -1,119 +0,0 @@
|
||||
#include <gtest/gtest.h>
|
||||
|
||||
#include "../testutils/DataTreeTest.h"
|
||||
#include "blobstore/implementations/onblocks/datatreestore/DataTree.h"
|
||||
#include "blobstore/implementations/onblocks/datanodestore/DataLeafNode.h"
|
||||
#include "blobstore/implementations/onblocks/datanodestore/DataInnerNode.h"
|
||||
#include <blockstore/implementations/testfake/FakeBlockStore.h>
|
||||
#include "blobstore/implementations/onblocks/datatreestore/impl/algorithms.h"
|
||||
|
||||
|
||||
using blockstore::BlockId;
|
||||
using namespace blobstore::onblocks::datatreestore::algorithms;
|
||||
|
||||
class GetLowestRightBorderNodeWithMoreThanOneChildOrNullTest: public DataTreeTest {
|
||||
public:
|
||||
struct TestData {
|
||||
BlockId rootNode;
|
||||
BlockId expectedResult;
|
||||
};
|
||||
|
||||
void check(const TestData &testData) {
|
||||
auto root = nodeStore->load(testData.rootNode).value();
|
||||
auto result = GetLowestRightBorderNodeWithMoreThanOneChildOrNull(nodeStore, root.get());
|
||||
EXPECT_EQ(testData.expectedResult, result->blockId());
|
||||
}
|
||||
|
||||
BlockId CreateLeafOnlyTree() {
|
||||
return CreateLeaf()->blockId();
|
||||
}
|
||||
|
||||
BlockId CreateTwoRightBorderNodes() {
|
||||
return CreateInner({CreateLeaf()})->blockId();
|
||||
}
|
||||
|
||||
BlockId CreateThreeRightBorderNodes() {
|
||||
return CreateInner({CreateInner({CreateLeaf()})})->blockId();
|
||||
}
|
||||
|
||||
TestData CreateThreeRightBorderNodes_LastFull() {
|
||||
auto node = CreateFullTwoLevel();
|
||||
auto root = CreateInner({node.get()});
|
||||
return TestData{root->blockId(), node->blockId()};
|
||||
}
|
||||
|
||||
TestData CreateLargerTree() {
|
||||
auto node = CreateInner({CreateLeaf(), CreateLeaf()});
|
||||
auto root = CreateInner({CreateFullTwoLevel().get(), node.get()});
|
||||
return TestData{root->blockId(), node->blockId()};
|
||||
}
|
||||
|
||||
TestData CreateThreeLevelTreeWithRightBorderSingleNodeChain() {
|
||||
auto root = CreateInner({CreateFullTwoLevel(), CreateInner({CreateLeaf()})});
|
||||
return TestData{root->blockId(), root->blockId()};
|
||||
}
|
||||
|
||||
TestData CreateThreeLevelTree() {
|
||||
auto node = CreateInner({CreateLeaf(), CreateLeaf()});
|
||||
auto root = CreateInner({CreateFullTwoLevel().get(), node.get()});
|
||||
return TestData{root->blockId(), node->blockId()};
|
||||
}
|
||||
|
||||
TestData CreateFullTwoLevelTree() {
|
||||
auto node = CreateFullTwoLevel();
|
||||
return TestData{node->blockId(), node->blockId()};
|
||||
}
|
||||
|
||||
TestData CreateFullThreeLevelTree() {
|
||||
auto root = CreateFullThreeLevel();
|
||||
return TestData{root->blockId(), root->readLastChild().blockId()};
|
||||
}
|
||||
};
|
||||
|
||||
TEST_F(GetLowestRightBorderNodeWithMoreThanOneChildOrNullTest, Leaf) {
|
||||
auto leaf = nodeStore->load(CreateLeafOnlyTree()).value();
|
||||
auto result = GetLowestRightBorderNodeWithMoreThanOneChildOrNull(nodeStore, leaf.get());
|
||||
EXPECT_EQ(nullptr, result.get());
|
||||
}
|
||||
|
||||
TEST_F(GetLowestRightBorderNodeWithMoreThanOneChildOrNullTest, TwoRightBorderNodes) {
|
||||
auto node = nodeStore->load(CreateTwoRightBorderNodes()).value();
|
||||
auto result = GetLowestRightBorderNodeWithMoreThanOneChildOrNull(nodeStore, node.get());
|
||||
EXPECT_EQ(nullptr, result.get());
|
||||
}
|
||||
|
||||
TEST_F(GetLowestRightBorderNodeWithMoreThanOneChildOrNullTest, ThreeRightBorderNodes) {
|
||||
auto node = nodeStore->load(CreateThreeRightBorderNodes()).value();
|
||||
auto result = GetLowestRightBorderNodeWithMoreThanOneChildOrNull(nodeStore, node.get());
|
||||
EXPECT_EQ(nullptr, result.get());
|
||||
}
|
||||
|
||||
TEST_F(GetLowestRightBorderNodeWithMoreThanOneChildOrNullTest, ThreeRightBorderNodes_LastFull) {
|
||||
auto testData = CreateThreeRightBorderNodes_LastFull();
|
||||
check(testData);
|
||||
}
|
||||
|
||||
TEST_F(GetLowestRightBorderNodeWithMoreThanOneChildOrNullTest, LargerTree) {
|
||||
auto testData = CreateLargerTree();
|
||||
check(testData);
|
||||
}
|
||||
|
||||
TEST_F(GetLowestRightBorderNodeWithMoreThanOneChildOrNullTest, FullTwoLevelTree) {
|
||||
auto testData = CreateFullTwoLevelTree();
|
||||
check(testData);
|
||||
}
|
||||
|
||||
TEST_F(GetLowestRightBorderNodeWithMoreThanOneChildOrNullTest, FullThreeLevelTree) {
|
||||
auto testData = CreateFullThreeLevelTree();
|
||||
check(testData);
|
||||
}
|
||||
|
||||
TEST_F(GetLowestRightBorderNodeWithMoreThanOneChildOrNullTest, ThreeLevelTreeWithRightBorderSingleNodeChain) {
|
||||
auto testData = CreateThreeLevelTreeWithRightBorderSingleNodeChain();
|
||||
check(testData);
|
||||
}
|
||||
|
||||
TEST_F(GetLowestRightBorderNodeWithMoreThanOneChildOrNullTest, ThreeLevelTree) {
|
||||
auto testData = CreateThreeLevelTree();
|
||||
check(testData);
|
||||
}
|
@ -1,242 +0,0 @@
|
||||
#include "DataTreeTest.h"
|
||||
|
||||
#include <blockstore/implementations/testfake/FakeBlockStore.h>
|
||||
#include <cpp-utils/pointer/cast.h>
|
||||
#include <cpp-utils/pointer/unique_ref_boost_optional_gtest_workaround.h>
|
||||
|
||||
using blobstore::onblocks::datanodestore::DataNodeStore;
|
||||
using blobstore::onblocks::datanodestore::DataNode;
|
||||
using blobstore::onblocks::datanodestore::DataInnerNode;
|
||||
using blobstore::onblocks::datanodestore::DataLeafNode;
|
||||
using blobstore::onblocks::datatreestore::DataTree;
|
||||
using blockstore::mock::MockBlockStore;
|
||||
using blockstore::BlockId;
|
||||
using cpputils::unique_ref;
|
||||
using cpputils::make_unique_ref;
|
||||
using std::initializer_list;
|
||||
using std::vector;
|
||||
using boost::none;
|
||||
using cpputils::dynamic_pointer_move;
|
||||
using cpputils::Data;
|
||||
|
||||
constexpr uint32_t DataTreeTest::BLOCKSIZE_BYTES;
|
||||
|
||||
DataTreeTest::DataTreeTest()
|
||||
:_blockStore(make_unique_ref<MockBlockStore>()),
|
||||
blockStore(_blockStore.get()),
|
||||
_nodeStore(make_unique_ref<DataNodeStore>(std::move(_blockStore), BLOCKSIZE_BYTES)),
|
||||
nodeStore(_nodeStore.get()),
|
||||
treeStore(std::move(_nodeStore)) {
|
||||
}
|
||||
|
||||
unique_ref<DataLeafNode> DataTreeTest::CreateLeaf() {
|
||||
return nodeStore->createNewLeafNode(Data(nodeStore->layout().maxBytesPerLeaf()));
|
||||
}
|
||||
|
||||
unique_ref<DataInnerNode> DataTreeTest::CreateInner(initializer_list<unique_ref<DataNode>> children) {
|
||||
vector<const DataNode*> childrenVector(children.size());
|
||||
std::transform(children.begin(), children.end(), childrenVector.begin(), [](const unique_ref<DataNode> &ptr) {return ptr.get();});
|
||||
return CreateInner(childrenVector);
|
||||
}
|
||||
|
||||
unique_ref<DataInnerNode> DataTreeTest::CreateInner(initializer_list<const DataNode*> children) {
|
||||
return CreateInner(vector<const DataNode*>(children));
|
||||
}
|
||||
|
||||
unique_ref<DataInnerNode> DataTreeTest::CreateInner(vector<const DataNode*> children) {
|
||||
ASSERT(children.size() >= 1, "An inner node must have at least one child");
|
||||
vector<BlockId> childrenKeys;
|
||||
childrenKeys.reserve(children.size());
|
||||
for (const DataNode *child : children) {
|
||||
ASSERT(child->depth() == (*children.begin())->depth(), "Children with different depth");
|
||||
childrenKeys.push_back(child->blockId());
|
||||
}
|
||||
auto node = nodeStore->createNewInnerNode((*children.begin())->depth()+1, childrenKeys);
|
||||
return node;
|
||||
}
|
||||
|
||||
unique_ref<DataTree> DataTreeTest::CreateLeafOnlyTree() {
|
||||
auto blockId = CreateLeaf()->blockId();
|
||||
return treeStore.load(blockId).value();
|
||||
}
|
||||
|
||||
void DataTreeTest::FillNode(DataInnerNode *node) {
|
||||
for(unsigned int i=node->numChildren(); i < nodeStore->layout().maxChildrenPerInnerNode(); ++i) {
|
||||
node->addChild(*CreateLeaf());
|
||||
}
|
||||
}
|
||||
|
||||
void DataTreeTest::FillNodeTwoLevel(DataInnerNode *node) {
|
||||
for(unsigned int i=node->numChildren(); i < nodeStore->layout().maxChildrenPerInnerNode(); ++i) {
|
||||
node->addChild(*CreateFullTwoLevel());
|
||||
}
|
||||
}
|
||||
|
||||
unique_ref<DataInnerNode> DataTreeTest::CreateFullTwoLevel() {
|
||||
auto root = CreateInner({CreateLeaf().get()});
|
||||
FillNode(root.get());
|
||||
return root;
|
||||
}
|
||||
|
||||
unique_ref<DataInnerNode> DataTreeTest::CreateThreeLevelMinData() {
|
||||
return CreateInner({
|
||||
CreateFullTwoLevel(),
|
||||
CreateInner({CreateLeaf()})
|
||||
});
|
||||
}
|
||||
|
||||
unique_ref<DataInnerNode> DataTreeTest::CreateFourLevelMinData() {
|
||||
return CreateInner({
|
||||
CreateFullThreeLevel(),
|
||||
CreateInner({CreateInner({CreateLeaf()})})
|
||||
});
|
||||
}
|
||||
|
||||
unique_ref<DataInnerNode> DataTreeTest::CreateFullThreeLevel() {
|
||||
auto root = CreateInner({CreateFullTwoLevel().get()});
|
||||
FillNodeTwoLevel(root.get());
|
||||
return root;
|
||||
}
|
||||
|
||||
unique_ref<DataInnerNode> DataTreeTest::LoadInnerNode(const BlockId &blockId) {
|
||||
auto node = nodeStore->load(blockId).value();
|
||||
auto casted = dynamic_pointer_move<DataInnerNode>(node);
|
||||
EXPECT_NE(none, casted) << "Is not an inner node";
|
||||
return std::move(*casted);
|
||||
}
|
||||
|
||||
unique_ref<DataLeafNode> DataTreeTest::LoadLeafNode(const BlockId &blockId) {
|
||||
auto node = nodeStore->load(blockId).value();
|
||||
auto casted = dynamic_pointer_move<DataLeafNode>(node);
|
||||
EXPECT_NE(none, casted) << "Is not a leaf node";
|
||||
return std::move(*casted);
|
||||
}
|
||||
|
||||
unique_ref<DataInnerNode> DataTreeTest::CreateTwoLeaf() {
|
||||
return CreateInner({CreateLeaf().get(), CreateLeaf().get()});
|
||||
}
|
||||
|
||||
unique_ref<DataTree> DataTreeTest::CreateTwoLeafTree() {
|
||||
auto blockId = CreateTwoLeaf()->blockId();
|
||||
return treeStore.load(blockId).value();
|
||||
}
|
||||
|
||||
unique_ref<DataLeafNode> DataTreeTest::CreateLeafWithSize(uint32_t size) {
|
||||
auto leaf = CreateLeaf();
|
||||
leaf->resize(size);
|
||||
return leaf;
|
||||
}
|
||||
|
||||
unique_ref<DataInnerNode> DataTreeTest::CreateTwoLeafWithSecondLeafSize(uint32_t size) {
|
||||
return CreateInner({
|
||||
CreateLeafWithSize(nodeStore->layout().maxBytesPerLeaf()),
|
||||
CreateLeafWithSize(size)
|
||||
});
|
||||
}
|
||||
|
||||
unique_ref<DataInnerNode> DataTreeTest::CreateFullTwoLevelWithLastLeafSize(uint32_t size) {
|
||||
auto root = CreateFullTwoLevel();
|
||||
for (uint32_t i = 0; i < root->numChildren()-1; ++i) {
|
||||
LoadLeafNode(root->readChild(i).blockId())->resize(nodeStore->layout().maxBytesPerLeaf());
|
||||
}
|
||||
LoadLeafNode(root->readLastChild().blockId())->resize(size);
|
||||
return root;
|
||||
}
|
||||
|
||||
unique_ref<DataInnerNode> DataTreeTest::CreateThreeLevelWithOneChildAndLastLeafSize(uint32_t size) {
|
||||
return CreateInner({
|
||||
CreateInner({
|
||||
CreateLeafWithSize(nodeStore->layout().maxBytesPerLeaf()),
|
||||
CreateLeafWithSize(size)
|
||||
})
|
||||
});
|
||||
}
|
||||
|
||||
unique_ref<DataInnerNode> DataTreeTest::CreateThreeLevelWithTwoChildrenAndLastLeafSize(uint32_t size) {
|
||||
return CreateInner({
|
||||
CreateFullTwoLevelWithLastLeafSize(nodeStore->layout().maxBytesPerLeaf()),
|
||||
CreateInner({
|
||||
CreateLeafWithSize(nodeStore->layout().maxBytesPerLeaf()),
|
||||
CreateLeafWithSize(size)
|
||||
})
|
||||
});
|
||||
}
|
||||
|
||||
unique_ref<DataInnerNode> DataTreeTest::CreateThreeLevelWithThreeChildrenAndLastLeafSize(uint32_t size) {
|
||||
return CreateInner({
|
||||
CreateFullTwoLevelWithLastLeafSize(nodeStore->layout().maxBytesPerLeaf()),
|
||||
CreateFullTwoLevelWithLastLeafSize(nodeStore->layout().maxBytesPerLeaf()),
|
||||
CreateInner({
|
||||
CreateLeafWithSize(nodeStore->layout().maxBytesPerLeaf()),
|
||||
CreateLeafWithSize(size)
|
||||
})
|
||||
});
|
||||
}
|
||||
|
||||
unique_ref<DataInnerNode> DataTreeTest::CreateFullThreeLevelWithLastLeafSize(uint32_t size) {
|
||||
auto root = CreateFullThreeLevel();
|
||||
for (uint32_t i = 0; i < root->numChildren(); ++i) {
|
||||
auto node = LoadInnerNode(root->readChild(i).blockId());
|
||||
for (uint32_t j = 0; j < node->numChildren(); ++j) {
|
||||
LoadLeafNode(node->readChild(j).blockId())->resize(nodeStore->layout().maxBytesPerLeaf());
|
||||
}
|
||||
}
|
||||
LoadLeafNode(LoadInnerNode(root->readLastChild().blockId())->readLastChild().blockId())->resize(size);
|
||||
return root;
|
||||
}
|
||||
|
||||
unique_ref<DataInnerNode> DataTreeTest::CreateFourLevelMinDataWithLastLeafSize(uint32_t size) {
|
||||
return CreateInner({
|
||||
CreateFullThreeLevelWithLastLeafSize(nodeStore->layout().maxBytesPerLeaf()),
|
||||
CreateInner({CreateInner({CreateLeafWithSize(size)})})
|
||||
});
|
||||
}
|
||||
|
||||
void DataTreeTest::EXPECT_IS_LEAF_NODE(const BlockId &blockId) {
|
||||
auto node = LoadLeafNode(blockId);
|
||||
EXPECT_NE(nullptr, node.get());
|
||||
}
|
||||
|
||||
void DataTreeTest::EXPECT_IS_INNER_NODE(const BlockId &blockId) {
|
||||
auto node = LoadInnerNode(blockId);
|
||||
EXPECT_NE(nullptr, node.get());
|
||||
}
|
||||
|
||||
void DataTreeTest::EXPECT_IS_TWONODE_CHAIN(const BlockId &blockId) {
|
||||
auto node = LoadInnerNode(blockId);
|
||||
EXPECT_EQ(1u, node->numChildren());
|
||||
EXPECT_IS_LEAF_NODE(node->readChild(0).blockId());
|
||||
}
|
||||
|
||||
void DataTreeTest::EXPECT_IS_FULL_TWOLEVEL_TREE(const BlockId &blockId) {
|
||||
auto node = LoadInnerNode(blockId);
|
||||
EXPECT_EQ(nodeStore->layout().maxChildrenPerInnerNode(), node->numChildren());
|
||||
for (unsigned int i = 0; i < node->numChildren(); ++i) {
|
||||
EXPECT_IS_LEAF_NODE(node->readChild(i).blockId());
|
||||
}
|
||||
}
|
||||
|
||||
void DataTreeTest::EXPECT_IS_FULL_THREELEVEL_TREE(const BlockId &blockId) {
|
||||
auto root = LoadInnerNode(blockId);
|
||||
EXPECT_EQ(nodeStore->layout().maxChildrenPerInnerNode(), root->numChildren());
|
||||
for (unsigned int i = 0; i < root->numChildren(); ++i) {
|
||||
auto node = LoadInnerNode(root->readChild(i).blockId());
|
||||
EXPECT_EQ(nodeStore->layout().maxChildrenPerInnerNode(), node->numChildren());
|
||||
for (unsigned int j = 0; j < node->numChildren(); ++j) {
|
||||
EXPECT_IS_LEAF_NODE(node->readChild(j).blockId());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(misc-no-recursion)
|
||||
void DataTreeTest::CHECK_DEPTH(int depth, const BlockId &blockId) {
|
||||
if (depth == 0) {
|
||||
EXPECT_IS_LEAF_NODE(blockId);
|
||||
} else {
|
||||
auto node = LoadInnerNode(blockId);
|
||||
EXPECT_EQ(depth, node->depth());
|
||||
for (uint32_t i = 0; i < node->numChildren(); ++i) {
|
||||
CHECK_DEPTH(depth-1, node->readChild(i).blockId());
|
||||
}
|
||||
}
|
||||
}
|
@ -1,68 +0,0 @@
|
||||
#pragma once
|
||||
#ifndef MESSMER_BLOBSTORE_TEST_IMPLEMENTATIONS_ONBLOCKS_DATATREESTORE_DATATREETEST_H_
|
||||
#define MESSMER_BLOBSTORE_TEST_IMPLEMENTATIONS_ONBLOCKS_DATATREESTORE_DATATREETEST_H_
|
||||
|
||||
#include <gtest/gtest.h>
|
||||
#include <blockstore/implementations/testfake/FakeBlockStore.h>
|
||||
|
||||
#include "blobstore/implementations/onblocks/datanodestore/DataNodeStore.h"
|
||||
#include "blobstore/implementations/onblocks/datanodestore/DataInnerNode.h"
|
||||
#include "blobstore/implementations/onblocks/datanodestore/DataLeafNode.h"
|
||||
#include "blobstore/implementations/onblocks/datatreestore/DataTree.h"
|
||||
#include "blobstore/implementations/onblocks/datatreestore/DataTreeStore.h"
|
||||
#include "blockstore/implementations/mock/MockBlockStore.h"
|
||||
|
||||
class DataTreeTest: public ::testing::Test {
|
||||
public:
|
||||
DataTreeTest();
|
||||
|
||||
static constexpr uint32_t BLOCKSIZE_BYTES = 256;
|
||||
|
||||
cpputils::unique_ref<blobstore::onblocks::datanodestore::DataLeafNode> CreateLeaf();
|
||||
cpputils::unique_ref<blobstore::onblocks::datanodestore::DataInnerNode> CreateInner(std::vector<const blobstore::onblocks::datanodestore::DataNode *> children);
|
||||
cpputils::unique_ref<blobstore::onblocks::datanodestore::DataInnerNode> CreateInner(std::initializer_list<const blobstore::onblocks::datanodestore::DataNode *> children);
|
||||
cpputils::unique_ref<blobstore::onblocks::datanodestore::DataInnerNode> CreateInner(std::initializer_list<cpputils::unique_ref<blobstore::onblocks::datanodestore::DataNode>> children);
|
||||
|
||||
cpputils::unique_ref<blobstore::onblocks::datatreestore::DataTree> CreateLeafOnlyTree();
|
||||
cpputils::unique_ref<blobstore::onblocks::datanodestore::DataInnerNode> CreateTwoLeaf();
|
||||
cpputils::unique_ref<blobstore::onblocks::datatreestore::DataTree> CreateTwoLeafTree();
|
||||
void FillNode(blobstore::onblocks::datanodestore::DataInnerNode *node);
|
||||
void FillNodeTwoLevel(blobstore::onblocks::datanodestore::DataInnerNode *node);
|
||||
cpputils::unique_ref<blobstore::onblocks::datanodestore::DataInnerNode> CreateFullTwoLevel();
|
||||
cpputils::unique_ref<blobstore::onblocks::datanodestore::DataInnerNode> CreateFullThreeLevel();
|
||||
|
||||
cpputils::unique_ref<blobstore::onblocks::datanodestore::DataInnerNode> CreateThreeLevelMinData();
|
||||
cpputils::unique_ref<blobstore::onblocks::datanodestore::DataInnerNode> CreateFourLevelMinData();
|
||||
|
||||
cpputils::unique_ref<blobstore::onblocks::datanodestore::DataInnerNode> LoadInnerNode(const blockstore::BlockId &blockId);
|
||||
cpputils::unique_ref<blobstore::onblocks::datanodestore::DataLeafNode> LoadLeafNode(const blockstore::BlockId &blockId);
|
||||
|
||||
cpputils::unique_ref<blobstore::onblocks::datanodestore::DataLeafNode> CreateLeafWithSize(uint32_t size);
|
||||
cpputils::unique_ref<blobstore::onblocks::datanodestore::DataInnerNode> CreateTwoLeafWithSecondLeafSize(uint32_t size);
|
||||
cpputils::unique_ref<blobstore::onblocks::datanodestore::DataInnerNode> CreateFullTwoLevelWithLastLeafSize(uint32_t size);
|
||||
cpputils::unique_ref<blobstore::onblocks::datanodestore::DataInnerNode> CreateThreeLevelWithOneChildAndLastLeafSize(uint32_t size);
|
||||
cpputils::unique_ref<blobstore::onblocks::datanodestore::DataInnerNode> CreateThreeLevelWithTwoChildrenAndLastLeafSize(uint32_t size);
|
||||
cpputils::unique_ref<blobstore::onblocks::datanodestore::DataInnerNode> CreateThreeLevelWithThreeChildrenAndLastLeafSize(uint32_t size);
|
||||
cpputils::unique_ref<blobstore::onblocks::datanodestore::DataInnerNode> CreateFullThreeLevelWithLastLeafSize(uint32_t size);
|
||||
cpputils::unique_ref<blobstore::onblocks::datanodestore::DataInnerNode> CreateFourLevelMinDataWithLastLeafSize(uint32_t size);
|
||||
|
||||
cpputils::unique_ref<blockstore::mock::MockBlockStore> _blockStore;
|
||||
blockstore::mock::MockBlockStore *blockStore;
|
||||
cpputils::unique_ref<blobstore::onblocks::datanodestore::DataNodeStore> _nodeStore;
|
||||
blobstore::onblocks::datanodestore::DataNodeStore *nodeStore;
|
||||
blobstore::onblocks::datatreestore::DataTreeStore treeStore;
|
||||
|
||||
void EXPECT_IS_LEAF_NODE(const blockstore::BlockId &blockId);
|
||||
void EXPECT_IS_INNER_NODE(const blockstore::BlockId &blockId);
|
||||
void EXPECT_IS_TWONODE_CHAIN(const blockstore::BlockId &blockId);
|
||||
void EXPECT_IS_FULL_TWOLEVEL_TREE(const blockstore::BlockId &blockId);
|
||||
void EXPECT_IS_FULL_THREELEVEL_TREE(const blockstore::BlockId &blockId);
|
||||
|
||||
void CHECK_DEPTH(int depth, const blockstore::BlockId &blockId);
|
||||
|
||||
private:
|
||||
DISALLOW_COPY_AND_ASSIGN(DataTreeTest);
|
||||
};
|
||||
|
||||
|
||||
#endif
|
@ -1,40 +0,0 @@
|
||||
#pragma once
|
||||
#ifndef MESSMER_BLOBSTORE_TEST_IMPLEMENTATIONS_ONBLOCKS_DATATREESTORE_GROWING_TESTUTILS_LEAFDATAFIXTURE_H_
|
||||
#define MESSMER_BLOBSTORE_TEST_IMPLEMENTATIONS_ONBLOCKS_DATATREESTORE_GROWING_TESTUTILS_LEAFDATAFIXTURE_H_
|
||||
|
||||
#include <gtest/gtest.h>
|
||||
|
||||
#include <cpp-utils/data/DataFixture.h>
|
||||
|
||||
// A data fixture containing data for a leaf.
|
||||
// The class can fill this data into a given leaf
|
||||
// and check, whether the data stored in a given leaf is correct.
|
||||
class LeafDataFixture {
|
||||
public:
|
||||
LeafDataFixture(int size, int iv = 0): _data(cpputils::DataFixture::generate(size, iv)) {}
|
||||
|
||||
void FillInto(blobstore::onblocks::datanodestore::DataLeafNode *leaf) const {
|
||||
leaf->resize(_data.size());
|
||||
leaf->write(_data.data(), 0, _data.size());
|
||||
}
|
||||
|
||||
void EXPECT_DATA_CORRECT(const blobstore::onblocks::datanodestore::DataLeafNode &leaf, int onlyCheckNumBytes = -1) const {
|
||||
if (onlyCheckNumBytes == -1) {
|
||||
EXPECT_EQ(_data.size(), leaf.numBytes());
|
||||
EXPECT_EQ(0, std::memcmp(_data.data(), loadData(leaf).data(), _data.size()));
|
||||
} else {
|
||||
EXPECT_LE(onlyCheckNumBytes, static_cast<int>(leaf.numBytes()));
|
||||
EXPECT_EQ(0, std::memcmp(_data.data(), loadData(leaf).data(), onlyCheckNumBytes));
|
||||
}
|
||||
}
|
||||
|
||||
private:
|
||||
static cpputils::Data loadData(const blobstore::onblocks::datanodestore::DataLeafNode &leaf) {
|
||||
cpputils::Data data(leaf.numBytes());
|
||||
leaf.read(data.data(), 0, leaf.numBytes());
|
||||
return data;
|
||||
}
|
||||
cpputils::Data _data;
|
||||
};
|
||||
|
||||
#endif
|
@ -1,87 +0,0 @@
|
||||
#pragma once
|
||||
#ifndef MESSMER_BLOBSTORE_TEST_IMPLEMENTATIONS_ONBLOCKS_DATATREESTORE_GROWING_TESTUTILS_TWOLEVELDATAFIXTURE_H_
|
||||
#define MESSMER_BLOBSTORE_TEST_IMPLEMENTATIONS_ONBLOCKS_DATATREESTORE_GROWING_TESTUTILS_TWOLEVELDATAFIXTURE_H_
|
||||
|
||||
#include <cpp-utils/macros.h>
|
||||
#include <cpp-utils/pointer/cast.h>
|
||||
#include "LeafDataFixture.h"
|
||||
#include <cpp-utils/assert/assert.h>
|
||||
|
||||
//TODO Rename, since we now allow any number of levels
|
||||
// A data fixture containing data for a two-level tree (one inner node with leaf children).
|
||||
// The class can fill this data into the leaf children of a given inner node
|
||||
// and given an inner node can check, whether the data stored is correct.
|
||||
class TwoLevelDataFixture {
|
||||
public:
|
||||
enum class SizePolicy {
|
||||
Random,
|
||||
Full,
|
||||
Unchanged
|
||||
};
|
||||
TwoLevelDataFixture(blobstore::onblocks::datanodestore::DataNodeStore *dataNodeStore, SizePolicy sizePolicy, int iv=0): _dataNodeStore(dataNodeStore), _iv(iv), _sizePolicy(sizePolicy) {}
|
||||
|
||||
void FillInto(blobstore::onblocks::datanodestore::DataNode *node) {
|
||||
// _iv-1 means there is no endLeafIndex - we fill all leaves.
|
||||
ForEachLeaf(node, _iv, _iv-1, [this] (blobstore::onblocks::datanodestore::DataLeafNode *leaf, int leafIndex) {
|
||||
LeafDataFixture(size(leafIndex, leaf), leafIndex).FillInto(leaf);
|
||||
});
|
||||
}
|
||||
|
||||
void EXPECT_DATA_CORRECT(blobstore::onblocks::datanodestore::DataNode *node, int maxCheckedLeaves = 0, int lastLeafMaxCheckedBytes = -1) {
|
||||
ForEachLeaf(node, _iv, _iv+maxCheckedLeaves, [this, maxCheckedLeaves, lastLeafMaxCheckedBytes] (blobstore::onblocks::datanodestore::DataLeafNode *leaf, int leafIndex) {
|
||||
if (leafIndex == _iv+maxCheckedLeaves-1) {
|
||||
// It is the last leaf
|
||||
LeafDataFixture(size(leafIndex, leaf), leafIndex).EXPECT_DATA_CORRECT(*leaf, lastLeafMaxCheckedBytes);
|
||||
} else {
|
||||
LeafDataFixture(size(leafIndex, leaf), leafIndex).EXPECT_DATA_CORRECT(*leaf);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
private:
|
||||
// NOLINTNEXTLINE(misc-no-recursion)
|
||||
int ForEachLeaf(blobstore::onblocks::datanodestore::DataNode *node, int firstLeafIndex, int endLeafIndex, std::function<void (blobstore::onblocks::datanodestore::DataLeafNode*, int)> action) {
|
||||
if (firstLeafIndex == endLeafIndex) {
|
||||
return firstLeafIndex;
|
||||
}
|
||||
auto leaf = dynamic_cast<blobstore::onblocks::datanodestore::DataLeafNode*>(node);
|
||||
if (leaf != nullptr) {
|
||||
action(leaf, firstLeafIndex);
|
||||
return firstLeafIndex + 1;
|
||||
} else {
|
||||
auto inner = dynamic_cast<blobstore::onblocks::datanodestore::DataInnerNode*>(node);
|
||||
int leafIndex = firstLeafIndex;
|
||||
for (uint32_t i = 0; i < inner->numChildren(); ++i) {
|
||||
auto child = _dataNodeStore->load(inner->readChild(i).blockId()).value();
|
||||
leafIndex = ForEachLeaf(child.get(), leafIndex, endLeafIndex, action);
|
||||
}
|
||||
return leafIndex;
|
||||
}
|
||||
}
|
||||
|
||||
blobstore::onblocks::datanodestore::DataNodeStore *_dataNodeStore;
|
||||
int _iv;
|
||||
SizePolicy _sizePolicy;
|
||||
|
||||
int size(int childIndex, blobstore::onblocks::datanodestore::DataLeafNode *leaf) {
|
||||
switch (_sizePolicy) {
|
||||
case SizePolicy::Full:
|
||||
return _dataNodeStore->layout().maxBytesPerLeaf();
|
||||
case SizePolicy::Random:
|
||||
return mod(static_cast<int>(_dataNodeStore->layout().maxBytesPerLeaf() - childIndex), static_cast<int>(_dataNodeStore->layout().maxBytesPerLeaf()));
|
||||
case SizePolicy::Unchanged:
|
||||
return leaf->numBytes();
|
||||
default:
|
||||
ASSERT(false, "Unknown size policy");
|
||||
}
|
||||
}
|
||||
|
||||
int mod(int value, int mod) {
|
||||
while(value < 0) {
|
||||
value += mod;
|
||||
}
|
||||
return value;
|
||||
}
|
||||
};
|
||||
|
||||
#endif
|
@ -1,15 +0,0 @@
|
||||
#include "BlobStoreTest.h"
|
||||
|
||||
#include <blockstore/implementations/testfake/FakeBlockStore.h>
|
||||
#include "blobstore/implementations/onblocks/BlobStoreOnBlocks.h"
|
||||
#include <cpp-utils/pointer/gcc_4_8_compatibility.h>
|
||||
|
||||
using blobstore::onblocks::BlobStoreOnBlocks;
|
||||
using blockstore::testfake::FakeBlockStore;
|
||||
using cpputils::make_unique_ref;
|
||||
|
||||
constexpr uint32_t BlobStoreTest::BLOCKSIZE_BYTES;
|
||||
|
||||
BlobStoreTest::BlobStoreTest()
|
||||
: blobStore(make_unique_ref<BlobStoreOnBlocks>(make_unique_ref<FakeBlockStore>(), BLOCKSIZE_BYTES)) {
|
||||
}
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user