diff --git a/.circleci/cat_ignore_eagain b/.circleci/cat_ignore_eagain deleted file mode 100755 index fc0f00d09..000000000 --- a/.circleci/cat_ignore_eagain +++ /dev/null @@ -1,54 +0,0 @@ -#! /bin/bash - -# Work around issue with parallel make output causing random error, as in -# make[1]: write error: stdout -# Probably due to a kernel bug: -# https://bugs.launchpad.net/ubuntu/+source/linux-signed/+bug/1814393 -# Seems to affect image ubuntu-1604:201903-01 and ubuntu-1604:202004-01 - -cd "$(dirname $0)" - -if [ ! -x cat_ignore_eagain.out ]; then - cc -x c -o cat_ignore_eagain.out - << EOF -#include -#include -#include -int main() { - int n, m, p; - char buf[1024]; - for (;;) { - n = read(STDIN_FILENO, buf, 1024); - if (n > 0 && n <= 1024) { - for (m = 0; m < n;) { - p = write(STDOUT_FILENO, buf + m, n - m); - if (p < 0) { - if (errno == EAGAIN) { - // ignore but pause a bit - usleep(100); - } else { - perror("write failed"); - return 42; - } - } else { - m += p; - } - } - } else if (n < 0) { - if (errno == EAGAIN) { - // ignore but pause a bit - usleep(100); - } else { - // Some non-ignorable error - perror("read failed"); - return 43; - } - } else { - // EOF - return 0; - } - } -} -EOF -fi - -exec ./cat_ignore_eagain.out diff --git a/.circleci/config.yml b/.circleci/config.yml deleted file mode 100644 index ce33da9ef..000000000 --- a/.circleci/config.yml +++ /dev/null @@ -1,872 +0,0 @@ -version: 2.1 - -orbs: - win: circleci/windows@2.4.0 - slack: circleci/slack@3.4.2 - -aliases: - - ¬ify-on-main-failure - fail_only: true - only_for_branches: main - -commands: - install-cmake-on-macos: - steps: - - run: - name: Install cmake on macos - command: | - HOMEBREW_NO_AUTO_UPDATE=1 brew install cmake - - install-jdk8-on-macos: - steps: - - run: - name: Install JDK 8 on macos - command: | - brew install --cask adoptopenjdk/openjdk/adoptopenjdk8 - - increase-max-open-files-on-macos: - steps: - - run: - name: Increase max open files - command: | - sudo sysctl -w kern.maxfiles=1048576 - sudo sysctl -w kern.maxfilesperproc=1048576 - sudo launchctl limit maxfiles 1048576 - - pre-steps: - steps: - - checkout - - run: - name: Setup Environment Variables - command: | - echo "export GTEST_THROW_ON_FAILURE=0" >> $BASH_ENV - echo "export GTEST_OUTPUT=\"xml:/tmp/test-results/\"" >> $BASH_ENV - echo "export SKIP_FORMAT_BUCK_CHECKS=1" >> $BASH_ENV - echo "export GTEST_COLOR=1" >> $BASH_ENV - echo "export CTEST_OUTPUT_ON_FAILURE=1" >> $BASH_ENV - echo "export CTEST_TEST_TIMEOUT=300" >> $BASH_ENV - echo "export ZLIB_DOWNLOAD_BASE=https://rocksdb-deps.s3.us-west-2.amazonaws.com/pkgs/zlib" >> $BASH_ENV - echo "export BZIP2_DOWNLOAD_BASE=https://rocksdb-deps.s3.us-west-2.amazonaws.com/pkgs/bzip2" >> $BASH_ENV - echo "export SNAPPY_DOWNLOAD_BASE=https://rocksdb-deps.s3.us-west-2.amazonaws.com/pkgs/snappy" >> $BASH_ENV - echo "export LZ4_DOWNLOAD_BASE=https://rocksdb-deps.s3.us-west-2.amazonaws.com/pkgs/lz4" >> $BASH_ENV - echo "export ZSTD_DOWNLOAD_BASE=https://rocksdb-deps.s3.us-west-2.amazonaws.com/pkgs/zstd" >> $BASH_ENV - - pre-steps-macos: - steps: - - pre-steps - - post-steps: - steps: - - slack/status: *notify-on-main-failure - - store_test_results: # store test result if there's any - path: /tmp/test-results - - store_artifacts: # store LOG for debugging if there's any - path: LOG - - run: # on fail, compress Test Logs for diagnosing the issue - name: Compress Test Logs - command: tar -cvzf t.tar.gz t - when: on_fail - - store_artifacts: # on fail, store Test Logs for diagnosing the issue - path: t.tar.gz - destination: test_logs - when: on_fail - - install-clang-10: - steps: - - run: - name: Install Clang 10 - command: | - echo "deb http://apt.llvm.org/xenial/ llvm-toolchain-xenial-10 main" | sudo tee -a /etc/apt/sources.list - echo "deb-src http://apt.llvm.org/xenial/ llvm-toolchain-xenial-10 main" | sudo tee -a /etc/apt/sources.list - echo "APT::Acquire::Retries \"10\";" | sudo tee -a /etc/apt/apt.conf.d/80-retries # llvm.org unreliable - sudo apt-get update -y && sudo apt-get install -y clang-10 - - install-clang-13: - steps: - - run: - name: Install Clang 13 - command: | - echo "deb http://apt.llvm.org/focal/ llvm-toolchain-focal-13 main" | sudo tee -a /etc/apt/sources.list - echo "deb-src http://apt.llvm.org/focal/ llvm-toolchain-focal-13 main" | sudo tee -a /etc/apt/sources.list - echo "APT::Acquire::Retries \"10\";" | sudo tee -a /etc/apt/apt.conf.d/80-retries # llvm.org unreliable - wget -O - https://apt.llvm.org/llvm-snapshot.gpg.key|sudo apt-key add - - sudo apt-get update -y && sudo apt-get install -y clang-13 - - install-gflags: - steps: - - run: - name: Install gflags - command: | - sudo apt-get update -y && sudo apt-get install -y libgflags-dev - - install-benchmark: - steps: - - run: # currently doesn't support ubuntu-1604 which doesn't have libbenchmark package, user can still install by building it youself - name: Install benchmark - command: | - sudo apt-get update -y && sudo apt-get install -y libbenchmark-dev - - install-librados: - steps: - - run: - name: Install librados - command: | - sudo apt-get update -y && sudo apt-get install -y librados-dev - - upgrade-cmake: - steps: - - run: - name: Upgrade cmake - command: | - sudo apt remove --purge cmake - sudo snap install cmake --classic - - install-gflags-on-macos: - steps: - - run: - name: Install gflags on macos - command: | - HOMEBREW_NO_AUTO_UPDATE=1 brew install gflags - - install-gtest-parallel: - steps: - - run: - name: Install gtest-parallel - command: | - git clone --single-branch --branch master --depth 1 https://github.com/google/gtest-parallel.git ~/gtest-parallel - echo 'export PATH=$HOME/gtest-parallel:$PATH' >> $BASH_ENV - - install-compression-libs: - steps: - - run: - name: Install compression libs - command: | - sudo apt-get update -y && sudo apt-get install -y libsnappy-dev zlib1g-dev libbz2-dev liblz4-dev libzstd-dev - -executors: - windows-2xlarge: - machine: - image: 'windows-server-2019-vs2019:stable' - resource_class: windows.2xlarge - shell: bash.exe - -jobs: - build-macos: - macos: - xcode: 12.5.1 - resource_class: large - environment: - ROCKSDB_DISABLE_JEMALLOC: 1 # jemalloc cause env_test hang, disable it for now - steps: - - increase-max-open-files-on-macos - - install-gflags-on-macos - - pre-steps-macos - - run: ulimit -S -n 1048576 && OPT=-DCIRCLECI make V=1 J=32 -j32 check 2>&1 | .circleci/cat_ignore_eagain - - post-steps - - build-macos-cmake: - macos: - xcode: 12.5.1 - resource_class: large - steps: - - increase-max-open-files-on-macos - - install-cmake-on-macos - - install-gflags-on-macos - - pre-steps-macos - - run: ulimit -S -n 1048576 && (mkdir build && cd build && cmake -DWITH_GFLAGS=1 .. && make V=1 -j32 && ctest -j10) 2>&1 | .circleci/cat_ignore_eagain - - post-steps - - build-linux: - machine: - image: ubuntu-1604:202104-01 - resource_class: 2xlarge - steps: - - pre-steps - - install-gflags - - run: make V=1 J=32 -j32 check 2>&1 | .circleci/cat_ignore_eagain - - post-steps - - build-linux-mem-env-librados: - machine: - image: ubuntu-1604:202104-01 - resource_class: 2xlarge - steps: - - pre-steps - - install-gflags - - install-librados - - run: MEM_ENV=1 ROCKSDB_USE_LIBRADOS=1 make V=1 J=32 -j32 check 2>&1 | .circleci/cat_ignore_eagain - - post-steps - - build-linux-encrypted-env: - machine: - image: ubuntu-1604:202104-01 - resource_class: 2xlarge - steps: - - pre-steps - - install-gflags - - run: ENCRYPTED_ENV=1 make V=1 J=32 -j32 check 2>&1 | .circleci/cat_ignore_eagain - - post-steps - - build-linux-shared_lib-alt_namespace-status_checked: - machine: - image: ubuntu-1604:202104-01 - resource_class: 2xlarge - steps: - - pre-steps - - install-gflags - - run: ASSERT_STATUS_CHECKED=1 TEST_UINT128_COMPAT=1 ROCKSDB_MODIFY_NPHASH=1 LIB_MODE=shared OPT="-DROCKSDB_NAMESPACE=alternative_rocksdb_ns" make V=1 -j32 check 2>&1 | .circleci/cat_ignore_eagain - - post-steps - - build-linux-release: - machine: - image: ubuntu-1604:202104-01 - resource_class: large - steps: - - checkout # check out the code in the project directory - - run: make V=1 -j8 release 2>&1 | .circleci/cat_ignore_eagain - - run: if ./db_stress --version; then false; else true; fi # ensure without gflags - - install-gflags - - run: make V=1 -j8 release 2>&1 | .circleci/cat_ignore_eagain - - run: ./db_stress --version # ensure with gflags - - post-steps - - build-linux-release-rtti: - machine: - image: ubuntu-1604:201903-01 - resource_class: large - steps: - - checkout # check out the code in the project directory - - run: make clean - - run: USE_RTTI=1 DEBUG_LEVEL=0 make V=1 -j8 static_lib tools db_bench 2>&1 | .circleci/cat_ignore_eagain - - run: if ./db_stress --version; then false; else true; fi # ensure without gflags - - run: sudo apt-get update -y && sudo apt-get install -y libgflags-dev - - run: make clean - - run: USE_RTTI=1 DEBUG_LEVEL=0 make V=1 -j8 static_lib tools db_bench 2>&1 | .circleci/cat_ignore_eagain - - run: ./db_stress --version # ensure with gflags - - build-linux-lite: - machine: - image: ubuntu-1604:202104-01 - resource_class: 2xlarge - steps: - - pre-steps - - install-gflags - - run: LITE=1 make V=1 J=32 -j32 check 2>&1 | .circleci/cat_ignore_eagain - - post-steps - - build-linux-lite-release: - machine: - image: ubuntu-1604:202104-01 - resource_class: large - steps: - - checkout # check out the code in the project directory - - run: LITE=1 make V=1 -j8 release 2>&1 | .circleci/cat_ignore_eagain - - run: if ./db_stress --version; then false; else true; fi # ensure without gflags - - install-gflags - - run: LITE=1 make V=1 -j8 release 2>&1 | .circleci/cat_ignore_eagain - - run: ./db_stress --version # ensure with gflags - - post-steps - - build-linux-clang-no_test_run: - machine: - image: ubuntu-1604:202104-01 - resource_class: xlarge - steps: - - checkout # check out the code in the project directory - - run: sudo apt-get update -y && sudo apt-get install -y clang libgflags-dev libtbb-dev - - run: CC=clang CXX=clang++ USE_CLANG=1 PORTABLE=1 make V=1 -j16 all 2>&1 | .circleci/cat_ignore_eagain - - post-steps - - build-linux-clang10-asan: - machine: - image: ubuntu-1604:202104-01 - resource_class: 2xlarge - steps: - - pre-steps - - install-gflags - - install-clang-10 - - run: COMPILE_WITH_ASAN=1 CC=clang-10 CXX=clang++-10 ROCKSDB_DISABLE_ALIGNED_NEW=1 USE_CLANG=1 make V=1 -j32 check 2>&1 | .circleci/cat_ignore_eagain # aligned new doesn't work for reason we haven't figured out - - post-steps - - build-linux-clang10-mini-tsan: - machine: - image: ubuntu-1604:202104-01 - resource_class: 2xlarge - steps: - - pre-steps - - install-gflags - - install-clang-10 - - run: COMPILE_WITH_TSAN=1 CC=clang-10 CXX=clang++-10 ROCKSDB_DISABLE_ALIGNED_NEW=1 USE_CLANG=1 make V=1 -j32 check 2>&1 | .circleci/cat_ignore_eagain # aligned new doesn't work for reason we haven't figured out. - - post-steps - - build-linux-clang10-ubsan: - machine: - image: ubuntu-1604:202104-01 - resource_class: 2xlarge - steps: - - pre-steps - - install-gflags - - install-clang-10 - - run: COMPILE_WITH_UBSAN=1 OPT="-fsanitize-blacklist=.circleci/ubsan_suppression_list.txt" CC=clang-10 CXX=clang++-10 ROCKSDB_DISABLE_ALIGNED_NEW=1 USE_CLANG=1 make V=1 -j32 ubsan_check 2>&1 | .circleci/cat_ignore_eagain # aligned new doesn't work for reason we haven't figured out - - post-steps - - build-linux-clang10-clang-analyze: - machine: - image: ubuntu-1604:202104-01 - resource_class: 2xlarge - steps: - - pre-steps - - install-gflags - - install-clang-10 - - run: sudo apt-get update -y && sudo apt-get install -y clang-tools-10 - - run: CC=clang-10 CXX=clang++-10 ROCKSDB_DISABLE_ALIGNED_NEW=1 CLANG_ANALYZER="/usr/bin/clang++-10" CLANG_SCAN_BUILD=scan-build-10 USE_CLANG=1 make V=1 -j32 analyze 2>&1 | .circleci/cat_ignore_eagain # aligned new doesn't work for reason we haven't figured out. For unknown, reason passing "clang++-10" as CLANG_ANALYZER doesn't work, and we need a full path. - - post-steps - - build-linux-cmake: - machine: - image: ubuntu-1604:202104-01 - resource_class: 2xlarge - steps: - - pre-steps - - install-gflags - - upgrade-cmake - - run: (mkdir build && cd build && cmake -DWITH_GFLAGS=1 .. && make V=1 -j20 && ctest -j20) 2>&1 | .circleci/cat_ignore_eagain - - post-steps - - build-linux-cmake-ubuntu-20: - machine: - image: ubuntu-2004:202104-01 - resource_class: 2xlarge - steps: - - pre-steps - - install-gflags - - install-benchmark - - run: (mkdir build && cd build && cmake -DWITH_GFLAGS=1 -DWITH_BENCHMARK=1 .. && make V=1 -j20 && ctest -j20 && make microbench) 2>&1 | .circleci/cat_ignore_eagain - - post-steps - - build-linux-unity-and-headers: - docker: # executor type - - image: gcc:latest - resource_class: large - steps: - - checkout # check out the code in the project directory - - run: apt-get update -y && apt-get install -y libgflags-dev - - run: TEST_TMPDIR=/dev/shm && make V=1 -j8 unity_test 2>&1 | .circleci/cat_ignore_eagain - - run: make V=1 -j8 -k check-headers 2>&1 | .circleci/cat_ignore_eagain # could be moved to a different build - - post-steps - - build-linux-gcc-4_8-no_test_run: - machine: - image: ubuntu-1604:202104-01 - resource_class: large - steps: - - pre-steps - - run: sudo apt-get update -y && sudo apt-get install gcc-4.8 g++-4.8 libgflags-dev - - run: CC=gcc-4.8 CXX=g++-4.8 V=1 SKIP_LINK=1 make -j8 all 2>&1 | .circleci/cat_ignore_eagain # Linking broken because libgflags compiled with newer ABI - - post-steps - - build-linux-gcc-8-no_test_run: - machine: - image: ubuntu-2004:202010-01 - resource_class: large - steps: - - pre-steps - - run: sudo apt-get update -y && sudo apt-get install gcc-8 g++-8 libgflags-dev - - run: CC=gcc-8 CXX=g++-8 V=1 SKIP_LINK=1 make -j8 all 2>&1 | .circleci/cat_ignore_eagain # Linking broken because libgflags compiled with newer ABI - - post-steps - - build-linux-gcc-9-no_test_run: - machine: - image: ubuntu-2004:202010-01 - resource_class: large - steps: - - pre-steps - - run: sudo apt-get update -y && sudo apt-get install gcc-9 g++-9 libgflags-dev - - run: CC=gcc-9 CXX=g++-9 V=1 SKIP_LINK=1 make -j8 all 2>&1 | .circleci/cat_ignore_eagain # Linking broken because libgflags compiled with newer ABI - - post-steps - - build-linux-gcc-10-cxx20-no_test_run: - machine: - image: ubuntu-2004:202010-01 - resource_class: xlarge - steps: - - pre-steps - - run: sudo apt-get update -y && sudo apt-get install gcc-10 g++-10 libgflags-dev - - run: CC=gcc-10 CXX=g++-10 V=1 SKIP_LINK=1 ROCKSDB_CXX_STANDARD=c++20 make -j16 all 2>&1 | .circleci/cat_ignore_eagain # Linking broken because libgflags compiled with newer ABI - - post-steps - - build-linux-gcc-11-no_test_run: - machine: - image: ubuntu-2004:202010-01 - resource_class: xlarge - steps: - - pre-steps - - run: sudo apt-get update -y && sudo add-apt-repository -y ppa:ubuntu-toolchain-r/test && sudo apt-get install gcc-11 g++-11 libgflags-dev - - run: CC=gcc-11 CXX=g++-11 V=1 SKIP_LINK=1 make -j16 all 2>&1 | .circleci/cat_ignore_eagain # Linking broken because libgflags compiled with newer ABI - - post-steps - - build-linux-clang-13-no_test_run: - machine: - image: ubuntu-2004:202010-01 - resource_class: xlarge - steps: - - pre-steps - - install-clang-13 - - run: CC=clang-13 CXX=clang++-13 USE_CLANG=1 make -j16 all 2>&1 | .circleci/cat_ignore_eagain - - post-steps - - # This job is only to make sure the microbench tests are able to run, the benchmark result is not meaningful as the CI host is changing. - build-linux-microbench: - machine: - image: ubuntu-2004:202010-01 - resource_class: xlarge - steps: - - pre-steps - - install-benchmark - - run: DEBUG_LEVEL=0 make microbench 2>&1 | .circleci/cat_ignore_eagain - - post-steps - - build-windows: - executor: windows-2xlarge - parameters: - extra_cmake_opt: - default: "" - type: string - vs_year: - default: "2019" - type: string - cmake_generator: - default: "Visual Studio 16 2019" - type: string - environment: - THIRDPARTY_HOME: C:/Users/circleci/thirdparty - CMAKE_HOME: C:/Users/circleci/thirdparty/cmake-3.16.4-win64-x64 - CMAKE_BIN: C:/Users/circleci/thirdparty/cmake-3.16.4-win64-x64/bin/cmake.exe - SNAPPY_HOME: C:/Users/circleci/thirdparty/snappy-1.1.7 - SNAPPY_INCLUDE: C:/Users/circleci/thirdparty/snappy-1.1.7;C:/Users/circleci/thirdparty/snappy-1.1.7/build - SNAPPY_LIB_DEBUG: C:/Users/circleci/thirdparty/snappy-1.1.7/build/Debug/snappy.lib - VS_YEAR: <> - CMAKE_GENERATOR: <> - steps: - - checkout - - run: - name: "Setup VS" - command: | - if [[ "${VS_YEAR}" == "2019" ]]; then - echo "VS2019 already present." - elif [[ "${VS_YEAR}" == "2017" ]]; then - echo "Installing VS2017..." - powershell .circleci/vs2017_install.ps1 - elif [[ "${VS_YEAR}" == "2015" ]]; then - echo "Installing VS2015..." - powershell .circleci/vs2015_install.ps1 - fi - - store_artifacts: - path: \Users\circleci\AppData\Local\Temp\vslogs.zip - - run: - name: "Install thirdparty dependencies" - command: | - mkdir ${THIRDPARTY_HOME} - cd ${THIRDPARTY_HOME} - echo "Installing CMake..." - curl --fail --silent --show-error --output cmake-3.16.4-win64-x64.zip --location https://github.com/Kitware/CMake/releases/download/v3.16.4/cmake-3.16.4-win64-x64.zip - unzip -q cmake-3.16.4-win64-x64.zip - echo "Building Snappy dependency..." - curl --fail --silent --show-error --output snappy-1.1.7.zip --location https://github.com/google/snappy/archive/1.1.7.zip - unzip -q snappy-1.1.7.zip - cd snappy-1.1.7 - mkdir build - cd build - ${CMAKE_BIN} -G "${CMAKE_GENERATOR}" .. - msbuild.exe Snappy.sln -maxCpuCount -property:Configuration=Debug -property:Platform=x64 - - run: - name: "Build RocksDB" - command: | - mkdir build - cd build - ${CMAKE_BIN} -G "${CMAKE_GENERATOR}" -DCMAKE_BUILD_TYPE=Debug -DOPTDBG=1 -DPORTABLE=1 -DSNAPPY=1 -DJNI=1 << parameters.extra_cmake_opt >> .. - cd .. - echo "Building with VS version: ${CMAKE_GENERATOR}" - msbuild.exe build/rocksdb.sln -maxCpuCount -property:Configuration=Debug -property:Platform=x64 - - run: - name: "Test RocksDB" - shell: powershell.exe - command: | - build_tools\run_ci_db_test.ps1 -SuiteRun db_basic_test,db_test,db_test2,db_merge_operand_test,bloom_test,c_test,coding_test,crc32c_test,dynamic_bloom_test,env_basic_test,env_test,hash_test,random_test -Concurrency 16 - - build-linux-java: - machine: - image: ubuntu-1604:202104-01 - resource_class: large - environment: - JAVA_HOME: /usr/lib/jvm/java-1.8.0-openjdk-amd64 - steps: - - pre-steps - - install-gflags - - run: - name: "Set Java Environment" - command: | - echo "JAVA_HOME=${JAVA_HOME}" - echo 'export PATH=$JAVA_HOME/bin:$PATH' >> $BASH_ENV - which java && java -version - which javac && javac -version - - run: - name: "Build RocksDBJava Shared Library" - command: make V=1 J=8 -j8 rocksdbjava 2>&1 | .circleci/cat_ignore_eagain - - run: - name: "Test RocksDBJava" - command: make V=1 J=8 -j8 jtest 2>&1 | .circleci/cat_ignore_eagain - - post-steps - - build-linux-java-static: - machine: - image: ubuntu-1604:202104-01 - resource_class: large - environment: - JAVA_HOME: /usr/lib/jvm/java-1.8.0-openjdk-amd64 - steps: - - pre-steps - - install-gflags - - run: - name: "Set Java Environment" - command: | - echo "JAVA_HOME=${JAVA_HOME}" - echo 'export PATH=$JAVA_HOME/bin:$PATH' >> $BASH_ENV - which java && java -version - which javac && javac -version - - run: - name: "Build RocksDBJava Static Library" - command: make V=1 J=8 -j8 rocksdbjavastatic 2>&1 | .circleci/cat_ignore_eagain - - post-steps - - build-macos-java: - macos: - xcode: 12.5.1 - resource_class: medium - environment: - JAVA_HOME: /Library/Java/JavaVirtualMachines/adoptopenjdk-8.jdk/Contents/Home - ROCKSDB_DISABLE_JEMALLOC: 1 # jemalloc causes java 8 crash - steps: - - increase-max-open-files-on-macos - - install-gflags-on-macos - - install-jdk8-on-macos - - pre-steps-macos - - run: - name: "Set Java Environment" - command: | - echo "JAVA_HOME=${JAVA_HOME}" - echo 'export PATH=$JAVA_HOME/bin:$PATH' >> $BASH_ENV - which java && java -version - which javac && javac -version - - run: - name: "Build RocksDBJava Shared Library" - command: make V=1 J=8 -j8 rocksdbjava 2>&1 | .circleci/cat_ignore_eagain - - run: - name: "Test RocksDBJava" - command: make V=1 J=8 -j8 jtest 2>&1 | .circleci/cat_ignore_eagain - - post-steps - - build-macos-java-static: - macos: - xcode: 12.5.1 - resource_class: medium - environment: - JAVA_HOME: /Library/Java/JavaVirtualMachines/adoptopenjdk-8.jdk/Contents/Home - steps: - - increase-max-open-files-on-macos - - install-gflags-on-macos - - install-cmake-on-macos - - install-jdk8-on-macos - - pre-steps-macos - - run: - name: "Set Java Environment" - command: | - echo "JAVA_HOME=${JAVA_HOME}" - echo 'export PATH=$JAVA_HOME/bin:$PATH' >> $BASH_ENV - which java && java -version - which javac && javac -version - - run: - name: "Build RocksDBJava x86 and ARM Static Libraries" - command: make V=1 J=8 -j8 rocksdbjavastaticosx 2>&1 | .circleci/cat_ignore_eagain - - post-steps - - build-macos-java-static-universal: - macos: - xcode: 12.5.1 - resource_class: medium - environment: - JAVA_HOME: /Library/Java/JavaVirtualMachines/adoptopenjdk-8.jdk/Contents/Home - steps: - - increase-max-open-files-on-macos - - install-gflags-on-macos - - install-cmake-on-macos - - install-jdk8-on-macos - - pre-steps-macos - - run: - name: "Set Java Environment" - command: | - echo "JAVA_HOME=${JAVA_HOME}" - echo 'export PATH=$JAVA_HOME/bin:$PATH' >> $BASH_ENV - which java && java -version - which javac && javac -version - - run: - name: "Build RocksDBJava Universal Binary Static Library" - command: make V=1 J=8 -j8 rocksdbjavastaticosx_ub 2>&1 | .circleci/cat_ignore_eagain - - post-steps - - build-examples: - machine: - image: ubuntu-1604:202104-01 - resource_class: large - steps: - - pre-steps - - install-gflags - - run: - name: "Build examples" - command: | - OPT=-DTRAVIS V=1 make -j4 static_lib && cd examples && make -j4 | ../.circleci/cat_ignore_eagain - - post-steps - - build-cmake-mingw: - machine: - image: ubuntu-1604:202104-01 - resource_class: 2xlarge - steps: - - pre-steps - - install-gflags - - run: sudo apt-get update -y && sudo apt-get install -y mingw-w64 - - run: sudo update-alternatives --set x86_64-w64-mingw32-g++ /usr/bin/x86_64-w64-mingw32-g++-posix - - run: - name: "Build cmake-mingw" - command: | - sudo apt-get install snapd && sudo snap install cmake --beta --classic - export PATH=/snap/bin:$PATH - sudo apt-get install -y openjdk-8-jdk - export JAVA_HOME=/usr/lib/jvm/java-8-openjdk-amd64 - export PATH=$JAVA_HOME/bin:$PATH - echo "JAVA_HOME=${JAVA_HOME}" - which java && java -version - which javac && javac -version - mkdir build && cd build && cmake -DJNI=1 -DWITH_GFLAGS=OFF .. -DCMAKE_C_COMPILER=x86_64-w64-mingw32-gcc -DCMAKE_CXX_COMPILER=x86_64-w64-mingw32-g++ -DCMAKE_SYSTEM_NAME=Windows && make -j4 rocksdb rocksdbjni - - post-steps - - build-linux-non-shm: - machine: - image: ubuntu-1604:202104-01 - resource_class: 2xlarge - parameters: - start_test: - default: "" - type: string - end_test: - default: "" - type: string - steps: - - pre-steps - - install-gflags - - install-gtest-parallel - - run: - name: "Build unit tests" - command: | - echo "env: $(env)" - echo "** done env" - ROCKSDBTESTS_START=<> ROCKSDBTESTS_END=<> ROCKSDBTESTS_SUBSET_TESTS_TO_FILE=/tmp/test_list make V=1 -j32 --output-sync=target build_subset_tests - - run: - name: "Run unit tests in parallel" - command: | - sed -i 's/[[:space:]]*$//; s/ / \.\//g; s/.*/.\/&/' /tmp/test_list - cat /tmp/test_list - export TEST_TMPDIR=/tmp/rocksdb_test_tmp - gtest-parallel $(&1 | .circleci/cat_ignore_eagain - - post-steps - - build-linux-arm: - machine: - image: ubuntu-2004:202101-01 - resource_class: arm.large - steps: - - pre-steps - - install-gflags - - run: ROCKSDBTESTS_PLATFORM_DEPENDENT=only make V=1 J=4 -j4 all_but_some_tests check_some 2>&1 | .circleci/cat_ignore_eagain - - post-steps - - build-linux-arm-cmake-no_test_run: - machine: - image: ubuntu-2004:202101-01 - resource_class: arm.large - environment: - JAVA_HOME: /usr/lib/jvm/java-8-openjdk-arm64 - steps: - - pre-steps - - install-gflags - - run: - name: "Set Java Environment" - command: | - echo "JAVA_HOME=${JAVA_HOME}" - echo 'export PATH=$JAVA_HOME/bin:$PATH' >> $BASH_ENV - which java && java -version - which javac && javac -version - - run: - name: "Build with cmake" - command: | - mkdir build - cd build - cmake -DCMAKE_BUILD_TYPE=Release -DWITH_TESTS=0 -DWITH_GFLAGS=1 -DWITH_BENCHMARK_TOOLS=0 -DWITH_TOOLS=0 -DWITH_CORE_TOOLS=1 .. - make -j4 - - run: - name: "Build Java with cmake" - command: | - rm -rf build - mkdir build - cd build - cmake -DJNI=1 -DCMAKE_BUILD_TYPE=Release -DWITH_GFLAGS=1 .. - make -j4 rocksdb rocksdbjni - - post-steps - - build-format-compatible: - machine: - image: ubuntu-1604:202104-01 - resource_class: 2xlarge - steps: - - pre-steps - - install-gflags - - install-compression-libs - - run: - name: "test" - command: | - export TEST_TMPDIR=/dev/shm/rocksdb - rm -rf /dev/shm/rocksdb - mkdir /dev/shm/rocksdb - tools/check_format_compatible.sh - - post-steps - -workflows: - version: 2 - build-linux: - jobs: - - build-linux - build-linux-cmake: - jobs: - - build-linux-cmake - - build-linux-cmake-ubuntu-20 - build-linux-mem-env-librados: - jobs: - - build-linux-mem-env-librados - build-linux-encrypted-env: - jobs: - - build-linux-encrypted-env - build-linux-shared_lib-alt_namespace-status_checked: - jobs: - - build-linux-shared_lib-alt_namespace-status_checked - build-linux-lite: - jobs: - - build-linux-lite - build-linux-release: - jobs: - - build-linux-release - build-linux-release-rtti: - jobs: - - build-linux-release-rtti - build-linux-lite-release: - jobs: - - build-linux-lite-release - build-linux-clang10-asan: - jobs: - - build-linux-clang10-asan - build-linux-clang10-mini-tsan: - jobs: - - build-linux-clang10-mini-tsan - build-linux-clang10-ubsan: - jobs: - - build-linux-clang10-ubsan - build-linux-clang10-clang-analyze: - jobs: - - build-linux-clang10-clang-analyze - build-linux-unity-and-headers: - jobs: - - build-linux-unity-and-headers - build-windows-vs2019: - jobs: - - build-windows: - name: "build-windows-vs2019" - build-windows-vs2019-cxx20: - jobs: - - build-windows: - name: "build-windows-vs2019-cxx20" - extra_cmake_opt: -DCMAKE_CXX_STANDARD=20 - build-windows-vs2017: - jobs: - - build-windows: - name: "build-windows-vs2017" - vs_year: "2017" - cmake_generator: "Visual Studio 15 Win64" - build-java: - jobs: - - build-linux-java - - build-linux-java-static - - build-macos-java - - build-macos-java-static - - build-macos-java-static-universal - build-examples: - jobs: - - build-examples - build-linux-non-shm: - jobs: - - build-linux-non-shm: - start_test: "" - end_test: "db_options_test" # make sure unique in src.mk - - build-linux-non-shm: - start_test: "db_options_test" # make sure unique in src.mk - end_test: "filename_test" # make sure unique in src.mk - - build-linux-non-shm: - start_test: "filename_test" # make sure unique in src.mk - end_test: "statistics_test" # make sure unique in src.mk - - build-linux-non-shm: - start_test: "statistics_test" # make sure unique in src.mk - end_test: "" - build-linux-compilers-no_test_run: - jobs: - - build-linux-clang-no_test_run - - build-linux-clang-13-no_test_run - - build-linux-gcc-4_8-no_test_run - - build-linux-gcc-8-no_test_run - - build-linux-gcc-9-no_test_run - - build-linux-gcc-10-cxx20-no_test_run - - build-linux-gcc-11-no_test_run - - build-linux-arm-cmake-no_test_run - build-macos: - jobs: - - build-macos - build-macos-cmake: - jobs: - - build-macos-cmake - build-cmake-mingw: - jobs: - - build-cmake-mingw - build-linux-arm: - jobs: - - build-linux-arm - build-microbench: - jobs: - - build-linux-microbench - nightly: - triggers: - - schedule: - cron: "0 0 * * *" - filters: - branches: - only: - - main - jobs: - - build-format-compatible - - build-linux-arm-test-full diff --git a/.circleci/ubsan_suppression_list.txt b/.circleci/ubsan_suppression_list.txt deleted file mode 100644 index d7db81806..000000000 --- a/.circleci/ubsan_suppression_list.txt +++ /dev/null @@ -1,6 +0,0 @@ -# Supress UBSAN warnings related to stl_tree.h, e.g. -# UndefinedBehaviorSanitizer: undefined-behavior /usr/bin/../lib/gcc/x86_64-linux-gnu/5.4.0/../../../../include/c++/5.4.0/bits/stl_tree.h:1505:43 in -# /usr/bin/../lib/gcc/x86_64-linux-gnu/5.4.0/../../../../include/c++/5.4.0/bits/stl_tree.h:1505:43: -# runtime error: upcast of address 0x000001fa8820 with insufficient space for an object of type -# 'std::_Rb_tree_node, rocksdb::(anonymous namespace)::LockHoldingInfo> >' -src:*bits/stl_tree.h diff --git a/.circleci/vs2015_install.ps1 b/.circleci/vs2015_install.ps1 deleted file mode 100644 index 754af0e57..000000000 --- a/.circleci/vs2015_install.ps1 +++ /dev/null @@ -1,24 +0,0 @@ -$VS_DOWNLOAD_LINK = "https://go.microsoft.com/fwlink/?LinkId=691126" -$COLLECT_DOWNLOAD_LINK = "https://aka.ms/vscollect.exe" -curl.exe --retry 3 -kL $VS_DOWNLOAD_LINK --output vs_installer.exe -if ($LASTEXITCODE -ne 0) { - echo "Download of the VS 2015 installer failed" - exit 1 -} -$VS_INSTALL_ARGS = @("/Quiet", "/NoRestart") -$process = Start-Process "${PWD}\vs_installer.exe" -ArgumentList $VS_INSTALL_ARGS -NoNewWindow -Wait -PassThru -Remove-Item -Path vs_installer.exe -Force -$exitCode = $process.ExitCode -if (($exitCode -ne 0) -and ($exitCode -ne 3010)) { - echo "VS 2015 installer exited with code $exitCode, which should be one of [0, 3010]." - curl.exe --retry 3 -kL $COLLECT_DOWNLOAD_LINK --output Collect.exe - if ($LASTEXITCODE -ne 0) { - echo "Download of the VS Collect tool failed." - exit 1 - } - Start-Process "${PWD}\Collect.exe" -NoNewWindow -Wait -PassThru - New-Item -Path "C:\w\build-results" -ItemType "directory" -Force - Copy-Item -Path "C:\Users\circleci\AppData\Local\Temp\vslogs.zip" -Destination "C:\w\build-results\" - exit 1 -} -echo "VS 2015 installed." diff --git a/.circleci/vs2017_install.ps1 b/.circleci/vs2017_install.ps1 deleted file mode 100644 index 93975fff4..000000000 --- a/.circleci/vs2017_install.ps1 +++ /dev/null @@ -1,35 +0,0 @@ -$VS_DOWNLOAD_LINK = "https://aka.ms/vs/15/release/vs_buildtools.exe" -$COLLECT_DOWNLOAD_LINK = "https://aka.ms/vscollect.exe" -$VS_INSTALL_ARGS = @("--nocache","--quiet","--wait", "--add Microsoft.VisualStudio.Workload.VCTools", - "--add Microsoft.VisualStudio.Component.VC.Tools.14.13", - "--add Microsoft.Component.MSBuild", - "--add Microsoft.VisualStudio.Component.Roslyn.Compiler", - "--add Microsoft.VisualStudio.Component.TextTemplating", - "--add Microsoft.VisualStudio.Component.VC.CoreIde", - "--add Microsoft.VisualStudio.Component.VC.Redist.14.Latest", - "--add Microsoft.VisualStudio.ComponentGroup.NativeDesktop.Core", - "--add Microsoft.VisualStudio.Component.VC.Tools.x86.x64", - "--add Microsoft.VisualStudio.ComponentGroup.NativeDesktop.Win81") - -curl.exe --retry 3 -kL $VS_DOWNLOAD_LINK --output vs_installer.exe -if ($LASTEXITCODE -ne 0) { - echo "Download of the VS 2017 installer failed" - exit 1 -} - -$process = Start-Process "${PWD}\vs_installer.exe" -ArgumentList $VS_INSTALL_ARGS -NoNewWindow -Wait -PassThru -Remove-Item -Path vs_installer.exe -Force -$exitCode = $process.ExitCode -if (($exitCode -ne 0) -and ($exitCode -ne 3010)) { - echo "VS 2017 installer exited with code $exitCode, which should be one of [0, 3010]." - curl.exe --retry 3 -kL $COLLECT_DOWNLOAD_LINK --output Collect.exe - if ($LASTEXITCODE -ne 0) { - echo "Download of the VS Collect tool failed." - exit 1 - } - Start-Process "${PWD}\Collect.exe" -NoNewWindow -Wait -PassThru - New-Item -Path "C:\w\build-results" -ItemType "directory" -Force - Copy-Item -Path "C:\Users\circleci\AppData\Local\Temp\vslogs.zip" -Destination "C:\w\build-results\" - exit 1 -} -echo "VS 2017 installed." diff --git a/.github/workflows/sanity_check.yml b/.github/workflows/sanity_check.yml deleted file mode 100644 index e6a5f1591..000000000 --- a/.github/workflows/sanity_check.yml +++ /dev/null @@ -1,44 +0,0 @@ -name: Check buck targets and code format -on: [push, pull_request] -jobs: - check: - name: Check TARGETS file and code format - runs-on: ubuntu-latest - steps: - - name: Checkout feature branch - uses: actions/checkout@v2 - with: - fetch-depth: 0 - - - name: Fetch from upstream - run: | - git remote add upstream https://github.com/facebook/rocksdb.git && git fetch upstream - - - name: Where am I - run: | - echo git status && git status - echo "git remote -v" && git remote -v - echo git branch && git branch - - - name: Setup Python - uses: actions/setup-python@v1 - - - name: Install Dependencies - run: python -m pip install --upgrade pip - - - name: Install argparse - run: pip install argparse - - - name: Download clang-format-diff.py - uses: wei/wget@v1 - with: - args: https://raw.githubusercontent.com/llvm/llvm-project/main/clang/tools/clang-format/clang-format-diff.py - - - name: Check format - run: VERBOSE_CHECK=1 make check-format - - - name: Compare buckify output - run: make check-buck-targets - - - name: Simple source code checks - run: make check-sources diff --git a/.travis.yml b/.travis.yml index 3a71b264b..fbcb8f6a3 100644 --- a/.travis.yml +++ b/.travis.yml @@ -3,303 +3,33 @@ language: cpp os: - linux arch: - - arm64 - ppc64le - - s390x -compiler: - - clang - - gcc -cache: - - ccache -addons: - apt: - update: true - sources: - - ubuntu-toolchain-r-test - packages: - - libgflags-dev - - libbz2-dev - - liblz4-dev - - libsnappy-dev - - liblzma-dev # xv - - libzstd-dev - - zlib1g-dev +services: + - docker env: - - TEST_GROUP=platform_dependent # 16-18 minutes - - TEST_GROUP=1 # 33-35 minutes - - TEST_GROUP=2 # 18-20 minutes - - TEST_GROUP=3 # 20-22 minutes - - TEST_GROUP=4 # 12-14 minutes - # Run java tests - - JOB_NAME=java_test # 4-11 minutes - # Build ROCKSDB_LITE - - JOB_NAME=lite_build # 3-4 minutes - # Build examples - - JOB_NAME=examples # 5-7 minutes - - JOB_NAME=cmake # 3-5 minutes - - JOB_NAME=cmake-gcc8 # 3-5 minutes - - JOB_NAME=cmake-gcc9 # 3-5 minutes - - JOB_NAME=cmake-gcc9-c++20 # 3-5 minutes - - JOB_NAME=cmake-mingw # 3 minutes - - JOB_NAME=make-gcc4.8 - - JOB_NAME=status_checked + global: + - ARTIFACTS_BUCKET=frocksdb-build-artifacts + - ARTIFACTS_KEY=$AWS_ACCESS_KEY_ID + - ARTIFACTS_SECRET=$AWS_ACCESS_SECRET_KEY + jobs: + - CMD=rocksdbjavastaticdockerppc64le + - CMD=rocksdbjavastaticdockerppc64lemusl -matrix: - exclude: - - os : linux - arch: arm64 - env: JOB_NAME=cmake-mingw - - os : linux - arch: arm64 - env: JOB_NAME=make-gcc4.8 - - os: linux - arch: ppc64le - env: JOB_NAME=cmake-mingw - - os: linux - arch: ppc64le - env: JOB_NAME=make-gcc4.8 - - os: linux - arch: s390x - env: JOB_NAME=cmake-mingw - - os: linux - arch: s390x - env: JOB_NAME=make-gcc4.8 - - os: linux - compiler: clang - - if: type = pull_request AND commit_message !~ /FULL_CI/ - os: linux - arch: arm64 - env: TEST_GROUP=platform_dependent - - if: type = pull_request AND commit_message !~ /FULL_CI/ - os : linux - arch: arm64 - env: TEST_GROUP=1 - - if: type = pull_request AND commit_message !~ /FULL_CI/ - os: linux - arch: ppc64le - env: TEST_GROUP=1 - - if: type = pull_request AND commit_message !~ /FULL_CI/ - os: linux - arch: s390x - env: TEST_GROUP=1 - - if: type = pull_request AND commit_message !~ /FULL_CI/ - os : linux - arch: arm64 - env: TEST_GROUP=2 - - if: type = pull_request AND commit_message !~ /FULL_CI/ - os: linux - arch: ppc64le - env: TEST_GROUP=2 - - if: type = pull_request AND commit_message !~ /FULL_CI/ - os: linux - arch: s390x - env: TEST_GROUP=2 - - if: type = pull_request AND commit_message !~ /FULL_CI/ - os : linux - arch: arm64 - env: TEST_GROUP=3 - - if: type = pull_request AND commit_message !~ /FULL_CI/ - os: linux - arch: ppc64le - env: TEST_GROUP=3 - - if: type = pull_request AND commit_message !~ /FULL_CI/ - os: linux - arch: s390x - env: TEST_GROUP=3 - - if: type = pull_request AND commit_message !~ /FULL_CI/ - os : linux - arch: arm64 - env: TEST_GROUP=4 - - if: type = pull_request AND commit_message !~ /FULL_CI/ - os: linux - arch: ppc64le - env: TEST_GROUP=4 - - if: type = pull_request AND commit_message !~ /FULL_CI/ - os: linux - arch: s390x - env: TEST_GROUP=4 - - if: type = pull_request AND commit_message !~ /FULL_CI/ - os : linux - arch: arm64 - env: JOB_NAME=cmake - - if: type = pull_request AND commit_message !~ /FULL_CI/ AND commit_message !~ /java/ - os : linux - arch: arm64 - env: JOB_NAME=java_test - - if: type = pull_request AND commit_message !~ /FULL_CI/ AND commit_message !~ /java/ - os: linux - arch: ppc64le - env: JOB_NAME=java_test - - if: type = pull_request AND commit_message !~ /FULL_CI/ AND commit_message !~ /java/ - os: linux - arch: s390x - env: JOB_NAME=java_test - - if: type = pull_request AND commit_message !~ /FULL_CI/ - os : linux - arch: arm64 - env: JOB_NAME=lite_build - - if: type = pull_request AND commit_message !~ /FULL_CI/ - os: linux - arch: ppc64le - env: JOB_NAME=lite_build - - if: type = pull_request AND commit_message !~ /FULL_CI/ - os: linux - arch: s390x - env: JOB_NAME=lite_build - - if: type = pull_request AND commit_message !~ /FULL_CI/ - os : linux - arch: arm64 - env: JOB_NAME=examples - - if: type = pull_request AND commit_message !~ /FULL_CI/ - os: linux - arch: ppc64le - env: JOB_NAME=examples - - if: type = pull_request AND commit_message !~ /FULL_CI/ - os: linux - arch: s390x - env: JOB_NAME=examples - - if: type = pull_request AND commit_message !~ /FULL_CI/ - os : linux - arch: arm64 - env: JOB_NAME=cmake-gcc8 - - if: type = pull_request AND commit_message !~ /FULL_CI/ - os: linux - arch: ppc64le - env: JOB_NAME=cmake-gcc8 - - if: type = pull_request AND commit_message !~ /FULL_CI/ - os: linux - arch: s390x - env: JOB_NAME=cmake-gcc8 - - if: type = pull_request AND commit_message !~ /FULL_CI/ - os : linux - arch: arm64 - env: JOB_NAME=cmake-gcc9 - - if: type = pull_request AND commit_message !~ /FULL_CI/ - os: linux - arch: ppc64le - env: JOB_NAME=cmake-gcc9 - - if: type = pull_request AND commit_message !~ /FULL_CI/ - os: linux - arch: s390x - env: JOB_NAME=cmake-gcc9 - - if: type = pull_request AND commit_message !~ /FULL_CI/ - os : linux - arch: arm64 - env: JOB_NAME=cmake-gcc9-c++20 - - if: type = pull_request AND commit_message !~ /FULL_CI/ - os: linux - arch: ppc64le - env: JOB_NAME=cmake-gcc9-c++20 - - if: type = pull_request AND commit_message !~ /FULL_CI/ - os: linux - arch: s390x - env: JOB_NAME=cmake-gcc9-c++20 - - if: type = pull_request AND commit_message !~ /FULL_CI/ - os : linux - arch: arm64 - env: JOB_NAME=status_checked - - if: type = pull_request AND commit_message !~ /FULL_CI/ - os: linux - arch: ppc64le - env: JOB_NAME=status_checked - - if: type = pull_request AND commit_message !~ /FULL_CI/ - os: linux - arch: s390x - env: JOB_NAME=status_checked +addons: + artifacts: + paths: + - $TRAVIS_BUILD_DIR/java/target/librocksdbjni-linux-ppc64le-musl.so + - $TRAVIS_BUILD_DIR/java/target/librocksdbjni-linux-ppc64le.so install: - - if [ "${JOB_NAME}" == cmake-gcc8 ]; then - sudo apt-get install -y g++-8 || exit $?; - CC=gcc-8 && CXX=g++-8; - fi - - if [ "${JOB_NAME}" == cmake-gcc9 ] || [ "${JOB_NAME}" == cmake-gcc9-c++20 ]; then - sudo apt-get install -y g++-9 || exit $?; - CC=gcc-9 && CXX=g++-9; - fi - - if [ "${JOB_NAME}" == cmake-mingw ]; then - sudo apt-get install -y mingw-w64 || exit $?; - fi - - if [ "${JOB_NAME}" == make-gcc4.8 ]; then - sudo apt-get install -y g++-4.8 || exit $?; - CC=gcc-4.8 && CXX=g++-4.8; - fi - - | - if [[ "${JOB_NAME}" == cmake* ]]; then - sudo apt-get remove -y cmake cmake-data - export CMAKE_DEB="cmake-3.14.5-Linux-$(uname -m).deb" - export CMAKE_DEB_URL="https://rocksdb-deps.s3-us-west-2.amazonaws.com/cmake/${CMAKE_DEB}" - curl --silent --fail --show-error --location --output "${CMAKE_DEB}" "${CMAKE_DEB_URL}" || exit $? - sudo dpkg -i "${CMAKE_DEB}" || exit $? - which cmake && cmake --version - fi - - | - if [[ "${JOB_NAME}" == java_test || "${JOB_NAME}" == cmake* ]]; then - # Ensure JDK 8 - sudo apt-get install -y openjdk-8-jdk || exit $? - export PATH=/usr/lib/jvm/java-8-openjdk-$(dpkg --print-architecture)/bin:$PATH - export JAVA_HOME=/usr/lib/jvm/java-8-openjdk-$(dpkg --print-architecture) - echo "JAVA_HOME=${JAVA_HOME}" - which java && java -version - which javac && javac -version - fi - -before_script: - # Increase the maximum number of open file descriptors, since some tests use - # more FDs than the default limit. - - ulimit -n 8192 + - sudo apt-get install -y openjdk-8-jdk || exit $? + - export PATH=/usr/lib/jvm/java-8-openjdk-$(dpkg --print-architecture)/bin:$PATH + - export JAVA_HOME=/usr/lib/jvm/java-8-openjdk-$(dpkg --print-architecture) + - echo "JAVA_HOME=${JAVA_HOME}" + - which java && java -version + - which javac && javac -version script: - - date; ${CXX} --version - - if [ `command -v ccache` ]; then ccache -C; fi - - case $TEST_GROUP in - platform_dependent) - OPT=-DTRAVIS LIB_MODE=shared V=1 ROCKSDBTESTS_PLATFORM_DEPENDENT=only make -j4 all_but_some_tests check_some - ;; - 1) - OPT=-DTRAVIS LIB_MODE=shared V=1 ROCKSDBTESTS_PLATFORM_DEPENDENT=exclude ROCKSDBTESTS_END=backupable_db_test make -j4 check_some - ;; - 2) - OPT="-DTRAVIS -DROCKSDB_NAMESPACE=alternative_rocksdb_ns" LIB_MODE=shared V=1 make -j4 tools && OPT="-DTRAVIS -DROCKSDB_NAMESPACE=alternative_rocksdb_ns" LIB_MODE=shared V=1 ROCKSDBTESTS_PLATFORM_DEPENDENT=exclude ROCKSDBTESTS_START=backupable_db_test ROCKSDBTESTS_END=db_universal_compaction_test make -j4 check_some - ;; - 3) - OPT=-DTRAVIS LIB_MODE=shared V=1 ROCKSDBTESTS_PLATFORM_DEPENDENT=exclude ROCKSDBTESTS_START=db_universal_compaction_test ROCKSDBTESTS_END=table_properties_collector_test make -j4 check_some - ;; - 4) - OPT=-DTRAVIS LIB_MODE=shared V=1 ROCKSDBTESTS_PLATFORM_DEPENDENT=exclude ROCKSDBTESTS_START=table_properties_collector_test make -j4 check_some - ;; - esac - - case $JOB_NAME in - java_test) - OPT=-DTRAVIS LIB_MODE=shared V=1 make rocksdbjava jtest - ;; - lite_build) - OPT='-DTRAVIS -DROCKSDB_LITE' LIB_MODE=shared V=1 make -j4 all - ;; - examples) - OPT=-DTRAVIS LIB_MODE=shared V=1 make -j4 static_lib && cd examples && make -j4 - ;; - cmake-mingw) - sudo update-alternatives --set x86_64-w64-mingw32-g++ /usr/bin/x86_64-w64-mingw32-g++-posix; - mkdir build && cd build && cmake -DJNI=1 -DWITH_GFLAGS=OFF .. -DCMAKE_C_COMPILER=x86_64-w64-mingw32-gcc -DCMAKE_CXX_COMPILER=x86_64-w64-mingw32-g++ -DCMAKE_SYSTEM_NAME=Windows && make -j4 rocksdb rocksdbjni - ;; - cmake*) - case $JOB_NAME in - *-c++20) - OPT=-DCMAKE_CXX_STANDARD=20 - ;; - esac - - mkdir build && cd build && cmake -DCMAKE_BUILD_TYPE=Release -DWITH_TESTS=0 -DWITH_GFLAGS=0 -DWITH_BENCHMARK_TOOLS=0 -DWITH_TOOLS=0 -DWITH_CORE_TOOLS=1 .. && make -j4 && cd .. && rm -rf build && mkdir build && cd build && cmake -DJNI=1 .. -DCMAKE_BUILD_TYPE=Release $OPT && make -j4 rocksdb rocksdbjni - ;; - make-gcc4.8) - OPT=-DTRAVIS LIB_MODE=shared V=1 SKIP_LINK=1 make -j4 all && [ "Linking broken because libgflags compiled with newer ABI" ] - ;; - status_checked) - OPT=-DTRAVIS LIB_MODE=shared V=1 ASSERT_STATUS_CHECKED=1 make -j4 check_some - ;; - esac -notifications: - email: - - leveldb@fb.com + - make jclean clean $CMD diff --git a/CMakeLists.txt b/CMakeLists.txt index ec59d4491..27e5eb76a 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -880,6 +880,7 @@ set(SOURCES utilities/fault_injection_env.cc utilities/fault_injection_fs.cc utilities/fault_injection_secondary_cache.cc + utilities/flink/flink_compaction_filter.cc utilities/leveldb_options/leveldb_options.cc utilities/memory/memory_util.cc utilities/merge_operators.cc @@ -1332,6 +1333,7 @@ if(WITH_TESTS) utilities/cassandra/cassandra_row_merge_test.cc utilities/cassandra/cassandra_serialize_test.cc utilities/checkpoint/checkpoint_test.cc + utilities/flink/flink_compaction_filter_test.cc utilities/memory/memory_test.cc utilities/merge_operators/string_append/stringappend_test.cc utilities/object_registry_test.cc diff --git a/FROCKSDB-RELEASE.md b/FROCKSDB-RELEASE.md new file mode 100644 index 000000000..3ec3c2724 --- /dev/null +++ b/FROCKSDB-RELEASE.md @@ -0,0 +1,249 @@ +# FRocksDB Release Process + +## Summary + +FrocksDB-6.x releases are a fat jar file that contain the following binaries: +* .so files for linux32 (glibc and musl-libc) +* .so files for linux64 (glibc and musl-libc) +* .so files for linux [aarch64](https://en.wikipedia.org/wiki/AArch64) (glibc and musl-libc) +* .so files for linux [ppc64le](https://en.wikipedia.org/wiki/Ppc64le) (glibc and musl-libc) +* .jnilib file for Mac OSX +* .dll for Windows x64 + +To build the binaries for a FrocksDB release, building on native architectures is advised. Building the binaries for ppc64le and aarch64 *can* be done using QEMU, but you may run into emulation bugs and the build times will be dramatically slower (up to x20). + +We recommend building the binaries on environments with at least 4 cores, 16GB RAM and 40GB of storage. The following environments are recommended for use in the build process: +* Windows x64 +* Linux aarch64 +* Linux ppc64le +* Mac OSX + +## Build for Windows + +For the Windows binary build, we recommend using a base [AWS Windows EC2 instance](https://aws.amazon.com/windows/products/ec2/) with 4 cores, 16GB RAM, 40GB storage for the build. + +Firstly, install [chocolatey](https://chocolatey.org/install). Once installed, the following required components can be installed using Powershell: + + choco install git.install jdk8 maven visualstudio2017community visualstudio2017-workload-nativedesktop + +Open the "Developer Command Prompt for VS 2017" and run the following commands: + + git clone git@github.com:ververica/frocksdb.git + cd frocksdb + git checkout FRocksDB-6.20.3 # release branch + java\crossbuild\build-win.bat + +The resulting native binary will be built and available at `build\java\Release\rocksdbjni-shared.dll`. You can also find it under project folder with name `librocksdbjni-win64.dll`. +The result windows jar is `build\java\rocksdbjni_classes.jar`. + +There is also a how-to in CMakeLists.txt. + +**Once finished, extract the `librocksdbjni-win64.dll` from the build environment. You will need this .dll in the final crossbuild.** + +## Build for aarch64 + +For the Linux aarch64 binary build, we recommend using a base [AWS Ubuntu Server 20.04 LTS EC2](https://aws.amazon.com/windows/products/ec2/) with a 4 core Arm processor, 16GB RAM, 40GB storage for the build. You can also attempt to build with QEMU on a non-aarch64 processor, but you may run into emulation bugs and very long build times. + +### Building in aarch64 environment + +First, install the required packages such as Java 8 and make: + + sudo apt-get update + sudo apt-get install build-essential openjdk-8-jdk + +then, install and setup [Docker](https://docs.docker.com/engine/install/ubuntu/): + + sudo apt-get install apt-transport-https ca-certificates curl gnupg lsb-release + + curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo gpg --dearmor -o /usr/share/keyrings/docker-archive-keyring.gpg + echo "deb [arch=arm64 signed-by=/usr/share/keyrings/docker-archive-keyring.gpg] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable" | sudo tee /etc/apt/sources.list.d/docker.list > /dev/null + + sudo apt-get update + sudo apt-get install docker-ce docker-ce-cli containerd.io + + sudo groupadd docker + sudo usermod -aG docker $USER + newgrp docker + +Then, clone the FrocksDB repo: + + git clone https://github.com/ververica/frocksdb.git + cd frocksdb + git checkout FRocksDB-6.20.3 # release branch + + +First, build the glibc binary: + + make jclean clean rocksdbjavastaticdockerarm64v8 + +**Once finished, extract the `java/target/librocksdbjni-linux-aarch64.so` from the build environment. You will need this .so in the final crossbuild.** + +Next, build the musl-libc binary: + + make jclean clean rocksdbjavastaticdockerarm64v8musl + +**Once finished, extract the `java/target/librocksdbjni-linux-aarch64-musl.so` from the build environment. You will need this .so in the final crossbuild.** + +### Building via QEMU + +You can use QEMU on, for example, an `x86_64` system to build the aarch64 binaries. To set this up on an Ubuntu envirnment: + + sudo apt-get install qemu binfmt-support qemu-user-static + docker run --rm --privileged multiarch/qemu-user-static --reset -p yes + +To verify that you can now run aarch64 docker images: + + docker run --rm -t arm64v8/ubuntu uname -m + > aarch64 + +You can now attempt to build the aarch64 binaries as in the previous section. + +## Build in PPC64LE + +For the ppc64le binaries, we recommend building on a PowerPC machine if possible, as it can be tricky to spin up a ppc64le cloud environment. However, if a PowerPC machine is not available, [Travis-CI](https://www.travis-ci.com/) offers ppc64le build environments that work perfectly for building these binaries. If neither a machine or Travis are an option, you can use QEMU but the build may take a very long time and be prone to emulation errors. + +### Building in ppc64le environment + +As with the aarch64 environment, the ppc64le environment will require Java 8, Docker and build-essentials installed. Once installed, you can build the 2 binaries: + + make jclean clean rocksdbjavastaticdockerppc64le + +**Once finished, extract the `java/target/librocksdbjni-linux-ppc64le.so` from the build environment. You will need this .so in the final crossbuild.** + + make jclean clean rocksdbjavastaticdockerppc64lemusl + +**Once finished, extract the `java/target/librocksdbjni-linux-ppc64le-musl.so` from the build environment. You will need this .so in the final crossbuild.** + +### Building via Travis + +Travis-CI supports ppc64le build environments, and this can be a convienient way of building in the absence of a PowerPC machine. Assuming that you have an S3 bucket called **my-frocksdb-release-artifacts**, the following Travis configuration will build the release artifacts and push them to the S3 bucket: + +``` +dist: xenial +language: cpp +os: + - linux +arch: + - ppc64le + +services: + - docker +addons: + artifacts: + paths: + - $TRAVIS_BUILD_DIR/java/target/librocksdbjni-linux-ppc64le-musl.so + - $TRAVIS_BUILD_DIR/java/target/librocksdbjni-linux-ppc64le.so + +env: + global: + - ARTIFACTS_BUCKET=my-rocksdb-release-artifacts + jobs: + - CMD=rocksdbjavastaticdockerppc64le + - CMD=rocksdbjavastaticdockerppc64lemusl + +install: + - sudo apt-get install -y openjdk-8-jdk || exit $? + - export PATH=/usr/lib/jvm/java-8-openjdk-$(dpkg --print-architecture)/bin:$PATH + - export JAVA_HOME=/usr/lib/jvm/java-8-openjdk-$(dpkg --print-architecture) + - echo "JAVA_HOME=${JAVA_HOME}" + - which java && java -version + - which javac && javac -version + +script: + - make jclean clean $CMD +``` + +**Make sure to set the `ARTIFACTS_KEY` and `ARTIFACTS_SECRET` environment variables in the Travis Job with valid AWS credentials to access the S3 bucket you defined.** + +**Once finished, the`librocksdbjni-linux-ppce64le.so` and `librocksdbjni-linux-ppce64le-musl.so` binaries will be in the S3 bucket. You will need these .so binaries in the final crossbuild.** + + +### Building via QEMU + +You can use QEMU on, for example, an `x86_64` system to build the ppc64le binaries. To set this up on an Ubuntu envirnment: + + sudo apt-get install qemu binfmt-support qemu-user-static + docker run --rm --privileged multiarch/qemu-user-static --reset -p yes + +To verify that you can now run ppc64le docker images: + + docker run --rm -t ppc64le/ubuntu uname -m + > ppc64le + +You can now attempt to build the ppc64le binaries as in the previous section. + +## Final crossbuild in Mac OSX + +Documentation for the final crossbuild for Mac OSX and Linux is described in [java/RELEASE.md](java/RELEASE.md) as has information on dependencies that should be installed. As above, this tends to be Java 8, build-essentials and Docker. + +Before you run this step, you should have 5 binaries from the previous build steps: + + 1. `librocksdbjni-win64.dll` from the Windows build step. + 2. `librocksdbjni-linux-aarch64.so` from the aarch64 build step. + 3. `librocksdbjni-linux-aarch64-musl.so` from the aarch64 build step. + 3. `librocksdbjni-linux-ppc64le.so` from the ppc64le build step. + 4. `librocksdbjni-linux-ppc64le-musl.so` from the ppc64le build step. + +To start the crossbuild within a Mac OSX environment: + + make jclean clean + mkdir -p java/target + cp /librocksdbjni-win64.dll java/target/librocksdbjni-win64.dll + cp /librocksdbjni-linux-ppc64le.so java/target/librocksdbjni-linux-ppc64le.so + cp /librocksdbjni-linux-ppc64le-musl.so java/target/librocksdbjni-linux-ppc64le-musl.so + cp /librocksdbjni-linux-aarch64.so java/target/librocksdbjni-linux-aarch64.so + cp /librocksdbjni-linux-aarch64-musl.so java/target/librocksdbjni-linux-aarch64-musl.so + FROCKSDB_VERSION=1.0 PORTABLE=1 ROCKSDB_DISABLE_JEMALLOC=true DEBUG_LEVEL=0 make frocksdbjavastaticreleasedocker + +*Note, we disable jemalloc on mac due to https://github.com/facebook/rocksdb/issues/5787*. + +Once finished, there should be a directory at `java/target/frocksdb-release` with the FRocksDB jar, javadoc jar, sources jar and pom in it. You can inspect the jar file and ensure that contains the binaries, history file, etc: + +``` +$ jar tf frocksdbjni-6.20.3-ververica-1.0.jar +META-INF/ +META-INF/MANIFEST.MF +HISTORY-JAVA.md +HISTORY.md +librocksdbjni-linux-aarch64-musl.so +librocksdbjni-linux-aarch64.so +librocksdbjni-linux-ppc64le-musl.so +librocksdbjni-linux-ppc64le.so +librocksdbjni-linux32-musl.so +librocksdbjni-linux32.so +librocksdbjni-linux64-musl.so +librocksdbjni-linux64.so +librocksdbjni-osx.jnilib +librocksdbjni-win64.dl +... +``` + +*Note that it contains linux32/64.so binaries as well as librocksdbjni-osx.jnilib*. + +## Push to Maven Central + +For this step, you will need the following: + +- The OSX Crossbuild artifacts built in `java/target/frocksdb-release` as above. +- A Sonatype account with access to the staging repository. If you do not have permission, open a ticket with Sonatype, [such as this one](https://issues.sonatype.org/browse/OSSRH-72185). +- A GPG key to sign the release, with your public key available for verification (for example, by uploading it to https://keys.openpgp.org/) + +To upload the release to the Sonatype staging repository: +```bash +VERSION= \ +USER= \ +PASSWORD= \ +KEYNAME= \ +PASSPHRASE= \ +java/publish-frocksdbjni.sh +``` + +Go to the staging repositories on Sonatype: + +https://oss.sonatype.org/#stagingRepositories + +Select the open staging repository and click on "Close". + +The staging repository will look something like `https://oss.sonatype.org/content/repositories/xxxx-1020`. You can use this staged release to test the artifacts and ensure they are correct. + +Once you have verified the artifacts are correct, press the "Release" button. **WARNING: this can not be undone**. Within 24-48 hours, the artifact will be available on Maven Central for use. diff --git a/HISTORY.md b/HISTORY.md index 155a2ab83..a165d10c6 100644 --- a/HISTORY.md +++ b/HISTORY.md @@ -1,3 +1,9 @@ +# FRocksdb Change Log +## 6.20.2-ververica-1.0 (08/09/2021) +### Improvement +* [Flink TTL] compaction filter for background cleanup of state with time-to-live +* [FLINK-19710] Revert implementation of PerfContext back to __thread to avoid performance regression + # Rocksdb Change Log ## 6.29.5 (03/29/2022) ### Bug Fixes diff --git a/Makefile b/Makefile index b3bb5e9ed..1ba7c6905 100644 --- a/Makefile +++ b/Makefile @@ -6,6 +6,8 @@ #----------------------------------------------- +FROCKSDB_VERSION ?= 1.0 + BASH_EXISTS := $(shell which bash) SHELL := $(shell which bash) # Default to python3. Some distros like CentOS 8 do not have `python`. @@ -1441,6 +1443,9 @@ histogram_test: $(OBJ_DIR)/monitoring/histogram_test.o $(TEST_LIBRARY) $(LIBRARY thread_local_test: $(OBJ_DIR)/util/thread_local_test.o $(TEST_LIBRARY) $(LIBRARY) $(AM_LINK) +flink_compaction_filter_test: $(OBJ_DIR)/utilities/flink/flink_compaction_filter_test.o $(TEST_LIBRARY) $(LIBRARY) + $(AM_LINK) + work_queue_test: $(OBJ_DIR)/util/work_queue_test.o $(TEST_LIBRARY) $(LIBRARY) $(AM_LINK) @@ -2086,8 +2091,8 @@ ROCKSDB_JAVADOCS_JAR = rocksdbjni-$(ROCKSDB_JAVA_VERSION)-javadoc.jar ROCKSDB_SOURCES_JAR = rocksdbjni-$(ROCKSDB_JAVA_VERSION)-sources.jar SHA256_CMD = sha256sum -ZLIB_VER ?= 1.2.11 -ZLIB_SHA256 ?= c3e5e9fdd5004dcb542feda5ee4f0ff0744628baf8ed2dd5d66f8ca1197cb1a1 +ZLIB_VER ?= 1.2.12 +ZLIB_SHA256 ?= 91844808532e5ce316b3c010929493c0244f3d37593afd6de04f71821d5136d9 ZLIB_DOWNLOAD_BASE ?= http://zlib.net BZIP2_VER ?= 1.0.8 BZIP2_SHA256 ?= ab5a03176ee106d3f0fa90e381da478ddae405918153cca248e682cd0c4a2269 @@ -2254,7 +2259,7 @@ rocksdbjavastaticosx_ub: rocksdbjavastaticosx_archs cd java/target/classes; $(JAR_CMD) -uf ../$(ROCKSDB_JAR) org/rocksdb/*.class org/rocksdb/util/*.class openssl sha1 java/target/$(ROCKSDB_JAR) | sed 's/.*= \([0-9a-f]*\)/\1/' > java/target/$(ROCKSDB_JAR).sha1 -rocksdbjavastaticosx_archs: +rocksdbjavastaticosx_archs: $(MAKE) rocksdbjavastaticosx_arch_x86_64 $(MAKE) rocksdbjavastaticosx_arch_arm64 @@ -2313,10 +2318,42 @@ rocksdbjavastaticrelease: rocksdbjavastaticosx rocksdbjava_javadocs_jar rocksdbj rocksdbjavastaticreleasedocker: rocksdbjavastaticosx rocksdbjavastaticdockerx86 rocksdbjavastaticdockerx86_64 rocksdbjavastaticdockerx86musl rocksdbjavastaticdockerx86_64musl rocksdbjava_javadocs_jar rocksdbjava_sources_jar cd java; $(JAR_CMD) -cf target/$(ROCKSDB_JAR_ALL) HISTORY*.md - cd java/target; $(JAR_CMD) -uf $(ROCKSDB_JAR_ALL) librocksdbjni-*.so librocksdbjni-*.jnilib + $(JAR_CMD) -uf java/target/$(ROCKSDB_JAR_ALL) HISTORY*.md + cd java/target; $(JAR_CMD) -uf $(ROCKSDB_JAR_ALL) librocksdbjni-*.so librocksdbjni-*.jnilib librocksdbjni-win64.dll cd java/target/classes; $(JAR_CMD) -uf ../$(ROCKSDB_JAR_ALL) org/rocksdb/*.class org/rocksdb/util/*.class openssl sha1 java/target/$(ROCKSDB_JAR_ALL) | sed 's/.*= \([0-9a-f]*\)/\1/' > java/target/$(ROCKSDB_JAR_ALL).sha1 +frocksdbjavastaticreleasedocker: rocksdbjavastaticreleasedocker + # update apache license + mkdir -p java/target/META-INF + cp LICENSE.Apache java/target/META-INF/LICENSE + cd java/target;jar -uf $(ROCKSDB_JAR_ALL) META-INF/LICENSE + + # jars to be released + $(eval JAR_PREF=rocksdbjni-$(ROCKSDB_MAJOR).$(ROCKSDB_MINOR).$(ROCKSDB_PATCH)) + $(eval JAR_DOCS=$(JAR_PREF)-javadoc.jar) + $(eval JAR_SOURCES=$(JAR_PREF)-sources.jar) + + # update docs and sources jars + cd java/target;jar -uf $(JAR_DOCS) META-INF/LICENSE + cd java/target;jar -uf $(JAR_SOURCES) META-INF/LICENSE + + # prepare frocksdb release + cd java/target;mkdir -p frocksdb-release + + $(eval FROCKSDB_JAVA_VERSION=$(ROCKSDB_MAJOR).$(ROCKSDB_MINOR).$(ROCKSDB_PATCH)-ververica-$(FROCKSDB_VERSION)) + $(eval FJAR_PREF=frocksdbjni-$(FROCKSDB_JAVA_VERSION)) + $(eval FJAR=$(FJAR_PREF).jar) + $(eval FJAR_DOCS=$(FJAR_PREF)-javadoc.jar) + $(eval FJAR_SOURCES=$(FJAR_PREF)-sources.jar) + + cd java/target;cp $(ROCKSDB_JAR_ALL) frocksdb-release/$(FJAR) + cd java/target;cp $(JAR_DOCS) frocksdb-release/$(FJAR_DOCS) + cd java/target;cp $(JAR_SOURCES) frocksdb-release/$(FJAR_SOURCES) + openssl sha1 java/target/$(ROCKSDB_JAR_ALL) | sed 's/.*= \([0-9a-f]*\)/\1/' > java/target/$(ROCKSDB_JAR_ALL).sha1 + cd java;cat pom.xml.template | sed 's/\$${FROCKSDB_JAVA_VERSION}/$(FROCKSDB_JAVA_VERSION)/' > pom.xml + cd java;cp pom.xml target/frocksdb-release/$(FJAR_PREF).pom + rocksdbjavastaticdockerx86: mkdir -p java/target docker run --rm --name rocksdb_linux_x86-be --platform linux/386 --attach stdin --attach stdout --attach stderr --volume $(HOME)/.m2:/root/.m2:ro --volume `pwd`:/rocksdb-host:ro --volume /rocksdb-local-build --volume `pwd`/java/target:/rocksdb-java-target --env DEBUG_LEVEL=$(DEBUG_LEVEL) evolvedbinary/rocksjava:centos6_x86-be /rocksdb-host/java/crossbuild/docker-build-linux-centos.sh diff --git a/TARGETS b/TARGETS index 2d13cc8d9..d5bdde36f 100644 --- a/TARGETS +++ b/TARGETS @@ -399,6 +399,7 @@ cpp_library( "utilities/fault_injection_env.cc", "utilities/fault_injection_fs.cc", "utilities/fault_injection_secondary_cache.cc", + "utilities/flink/flink_compaction_filter.cc", "utilities/leveldb_options/leveldb_options.cc", "utilities/memory/memory_util.cc", "utilities/merge_operators.cc", @@ -729,6 +730,7 @@ cpp_library( "utilities/fault_injection_env.cc", "utilities/fault_injection_fs.cc", "utilities/fault_injection_secondary_cache.cc", + "utilities/flink/flink_compaction_filter.cc", "utilities/leveldb_options/leveldb_options.cc", "utilities/memory/memory_util.cc", "utilities/merge_operators.cc", @@ -1656,6 +1658,13 @@ ROCKS_TESTS = [ [], [], ], + [ + "flink_compaction_filter_test", + "utilities/flink/flink_compaction_filter_test.cc", + "parallel", + [], + [], + ], [ "flush_job_test", "db/flush_job_test.cc", diff --git a/Vagrantfile b/Vagrantfile index 07f2e99fd..3dcedaf76 100644 --- a/Vagrantfile +++ b/Vagrantfile @@ -2,7 +2,7 @@ Vagrant.configure("2") do |config| config.vm.provider "virtualbox" do |v| - v.memory = 4096 + v.memory = 6096 v.cpus = 2 end diff --git a/db/db_impl/db_impl_write.cc b/db/db_impl/db_impl_write.cc index 5ef279338..015552960 100644 --- a/db/db_impl/db_impl_write.cc +++ b/db/db_impl/db_impl_write.cc @@ -1085,8 +1085,8 @@ WriteBatch* DBImpl::MergeBatch(const WriteThread::WriteGroup& write_group, // write thread. Otherwise this must be called holding log_write_mutex_. IOStatus DBImpl::WriteToWAL(const WriteBatch& merged_batch, log::Writer* log_writer, uint64_t* log_used, - uint64_t* log_size, - bool with_db_mutex, bool with_log_mutex) { + uint64_t* log_size, bool with_db_mutex, + bool with_log_mutex) { assert(log_size != nullptr); // Assert mutex explicitly. diff --git a/db/perf_context_test.cc b/db/perf_context_test.cc index 908e684f7..c8bf24a86 100644 --- a/db/perf_context_test.cc +++ b/db/perf_context_test.cc @@ -701,89 +701,6 @@ TEST_F(PerfContextTest, MergeOperatorTime) { delete db; } -TEST_F(PerfContextTest, CopyAndMove) { - // Assignment operator - { - get_perf_context()->Reset(); - get_perf_context()->EnablePerLevelPerfContext(); - PERF_COUNTER_BY_LEVEL_ADD(bloom_filter_useful, 1, 5); - ASSERT_EQ( - 1, - (*(get_perf_context()->level_to_perf_context))[5].bloom_filter_useful); - PerfContext perf_context_assign; - perf_context_assign = *get_perf_context(); - ASSERT_EQ( - 1, - (*(perf_context_assign.level_to_perf_context))[5].bloom_filter_useful); - get_perf_context()->ClearPerLevelPerfContext(); - get_perf_context()->Reset(); - ASSERT_EQ( - 1, - (*(perf_context_assign.level_to_perf_context))[5].bloom_filter_useful); - perf_context_assign.ClearPerLevelPerfContext(); - perf_context_assign.Reset(); - } - // Copy constructor - { - get_perf_context()->Reset(); - get_perf_context()->EnablePerLevelPerfContext(); - PERF_COUNTER_BY_LEVEL_ADD(bloom_filter_useful, 1, 5); - ASSERT_EQ( - 1, - (*(get_perf_context()->level_to_perf_context))[5].bloom_filter_useful); - PerfContext perf_context_copy(*get_perf_context()); - ASSERT_EQ( - 1, (*(perf_context_copy.level_to_perf_context))[5].bloom_filter_useful); - get_perf_context()->ClearPerLevelPerfContext(); - get_perf_context()->Reset(); - ASSERT_EQ( - 1, (*(perf_context_copy.level_to_perf_context))[5].bloom_filter_useful); - perf_context_copy.ClearPerLevelPerfContext(); - perf_context_copy.Reset(); - } - // Move constructor - { - get_perf_context()->Reset(); - get_perf_context()->EnablePerLevelPerfContext(); - PERF_COUNTER_BY_LEVEL_ADD(bloom_filter_useful, 1, 5); - ASSERT_EQ( - 1, - (*(get_perf_context()->level_to_perf_context))[5].bloom_filter_useful); - PerfContext perf_context_move = std::move(*get_perf_context()); - ASSERT_EQ( - 1, (*(perf_context_move.level_to_perf_context))[5].bloom_filter_useful); - get_perf_context()->ClearPerLevelPerfContext(); - get_perf_context()->Reset(); - ASSERT_EQ( - 1, (*(perf_context_move.level_to_perf_context))[5].bloom_filter_useful); - perf_context_move.ClearPerLevelPerfContext(); - perf_context_move.Reset(); - } -} - -TEST_F(PerfContextTest, PerfContextDisableEnable) { - get_perf_context()->Reset(); - get_perf_context()->EnablePerLevelPerfContext(); - PERF_COUNTER_BY_LEVEL_ADD(bloom_filter_full_positive, 1, 0); - get_perf_context()->DisablePerLevelPerfContext(); - PERF_COUNTER_BY_LEVEL_ADD(bloom_filter_useful, 1, 5); - get_perf_context()->EnablePerLevelPerfContext(); - PERF_COUNTER_BY_LEVEL_ADD(block_cache_hit_count, 1, 0); - get_perf_context()->DisablePerLevelPerfContext(); - PerfContext perf_context_copy(*get_perf_context()); - ASSERT_EQ(1, (*(perf_context_copy.level_to_perf_context))[0] - .bloom_filter_full_positive); - // this was set when per level perf context is disabled, should not be copied - ASSERT_NE( - 1, (*(perf_context_copy.level_to_perf_context))[5].bloom_filter_useful); - ASSERT_EQ( - 1, (*(perf_context_copy.level_to_perf_context))[0].block_cache_hit_count); - perf_context_copy.ClearPerLevelPerfContext(); - perf_context_copy.Reset(); - get_perf_context()->ClearPerLevelPerfContext(); - get_perf_context()->Reset(); -} - TEST_F(PerfContextTest, PerfContextByLevelGetSet) { get_perf_context()->Reset(); get_perf_context()->EnablePerLevelPerfContext(); diff --git a/include/rocksdb/perf_context.h b/include/rocksdb/perf_context.h index f3058416e..83de87f60 100644 --- a/include/rocksdb/perf_context.h +++ b/include/rocksdb/perf_context.h @@ -42,13 +42,6 @@ struct PerfContextByLevel { }; struct PerfContext { - ~PerfContext(); - - PerfContext() {} - - PerfContext(const PerfContext&); - PerfContext& operator=(const PerfContext&); - PerfContext(PerfContext&&) noexcept; void Reset(); // reset all performance counters to zero @@ -63,26 +56,26 @@ struct PerfContext { // free the space for PerfContextByLevel, also disable per level perf context void ClearPerLevelPerfContext(); - uint64_t user_key_comparison_count; // total number of user key comparisons - uint64_t block_cache_hit_count; // total number of block cache hits - uint64_t block_read_count; // total number of block reads (with IO) - uint64_t block_read_byte; // total number of bytes from block reads - uint64_t block_read_time; // total nanos spent on block reads - uint64_t block_cache_index_hit_count; // total number of index block hits - uint64_t index_block_read_count; // total number of index block reads - uint64_t block_cache_filter_hit_count; // total number of filter block hits - uint64_t filter_block_read_count; // total number of filter block reads - uint64_t compression_dict_block_read_count; // total number of compression - // dictionary block reads + uint64_t user_key_comparison_count{}; // total number of user key comparisons + uint64_t block_cache_hit_count{}; // total number of block cache hits + uint64_t block_read_count{}; // total number of block reads (with IO) + uint64_t block_read_byte{}; // total number of bytes from block reads + uint64_t block_read_time{}; // total nanos spent on block reads + uint64_t block_cache_index_hit_count{}; // total number of index block hits + uint64_t index_block_read_count{}; // total number of index block reads + uint64_t block_cache_filter_hit_count{}; // total number of filter block hits + uint64_t filter_block_read_count{}; // total number of filter block reads + uint64_t compression_dict_block_read_count{}; // total number of compression + // dictionary block reads - uint64_t secondary_cache_hit_count; // total number of secondary cache hits + uint64_t secondary_cache_hit_count{}; // total number of secondary cache hits - uint64_t block_checksum_time; // total nanos spent on block checksum - uint64_t block_decompress_time; // total nanos spent on block decompression + uint64_t block_checksum_time{}; // total nanos spent on block checksum + uint64_t block_decompress_time{}; // total nanos spent on block decompression - uint64_t get_read_bytes; // bytes for vals returned by Get - uint64_t multiget_read_bytes; // bytes for vals returned by MultiGet - uint64_t iter_read_bytes; // bytes for keys/vals decoded by iterator + uint64_t get_read_bytes{}; // bytes for vals returned by Get + uint64_t multiget_read_bytes{}; // bytes for vals returned by MultiGet + uint64_t iter_read_bytes{}; // bytes for keys/vals decoded by iterator // total number of internal keys skipped over during iteration. // There are several reasons for it: @@ -101,7 +94,7 @@ struct PerfContext { // 4. symmetric cases for Prev() and SeekToLast() // internal_recent_skipped_count is not included in this counter. // - uint64_t internal_key_skipped_count; + uint64_t internal_key_skipped_count{}; // Total number of deletes and single deletes skipped over during iteration // When calling Next(), Seek() or SeekToFirst(), after previous position // before calling Next(), the seek key in Seek() or the beginning for @@ -109,125 +102,126 @@ struct PerfContext { // key. Every deleted key is counted once. We don't recount here if there are // still older updates invalidated by the tombstones. // - uint64_t internal_delete_skipped_count; + uint64_t internal_delete_skipped_count{}; // How many times iterators skipped over internal keys that are more recent // than the snapshot that iterator is using. // - uint64_t internal_recent_skipped_count; + uint64_t internal_recent_skipped_count{}; // How many values were fed into merge operator by iterators. // - uint64_t internal_merge_count; + uint64_t internal_merge_count{}; - uint64_t get_snapshot_time; // total nanos spent on getting snapshot - uint64_t get_from_memtable_time; // total nanos spent on querying memtables - uint64_t get_from_memtable_count; // number of mem tables queried + uint64_t get_snapshot_time{}; // total nanos spent on getting snapshot + uint64_t get_from_memtable_time{}; // total nanos spent on querying memtables + uint64_t get_from_memtable_count{}; // number of mem tables queried // total nanos spent after Get() finds a key - uint64_t get_post_process_time; - uint64_t get_from_output_files_time; // total nanos reading from output files + uint64_t get_post_process_time{}; + uint64_t + get_from_output_files_time{}; // total nanos reading from output files // total nanos spent on seeking memtable - uint64_t seek_on_memtable_time; + uint64_t seek_on_memtable_time{}; // number of seeks issued on memtable // (including SeekForPrev but not SeekToFirst and SeekToLast) - uint64_t seek_on_memtable_count; + uint64_t seek_on_memtable_count{}; // number of Next()s issued on memtable - uint64_t next_on_memtable_count; + uint64_t next_on_memtable_count{}; // number of Prev()s issued on memtable - uint64_t prev_on_memtable_count; + uint64_t prev_on_memtable_count{}; // total nanos spent on seeking child iters - uint64_t seek_child_seek_time; + uint64_t seek_child_seek_time{}; // number of seek issued in child iterators - uint64_t seek_child_seek_count; - uint64_t seek_min_heap_time; // total nanos spent on the merge min heap - uint64_t seek_max_heap_time; // total nanos spent on the merge max heap + uint64_t seek_child_seek_count{}; + uint64_t seek_min_heap_time{}; // total nanos spent on the merge min heap + uint64_t seek_max_heap_time{}; // total nanos spent on the merge max heap // total nanos spent on seeking the internal entries - uint64_t seek_internal_seek_time; + uint64_t seek_internal_seek_time{}; // total nanos spent on iterating internal entries to find the next user entry - uint64_t find_next_user_entry_time; + uint64_t find_next_user_entry_time{}; // This group of stats provide a breakdown of time spent by Write(). // May be inaccurate when 2PC, two_write_queues or enable_pipelined_write // are enabled. // // total nanos spent on writing to WAL - uint64_t write_wal_time; + uint64_t write_wal_time{}; // total nanos spent on writing to mem tables - uint64_t write_memtable_time; + uint64_t write_memtable_time{}; // total nanos spent on delaying or throttling write - uint64_t write_delay_time; + uint64_t write_delay_time{}; // total nanos spent on switching memtable/wal and scheduling // flushes/compactions. - uint64_t write_scheduling_flushes_compactions_time; + uint64_t write_scheduling_flushes_compactions_time{}; // total nanos spent on writing a record, excluding the above four things - uint64_t write_pre_and_post_process_time; + uint64_t write_pre_and_post_process_time{}; // time spent waiting for other threads of the batch group - uint64_t write_thread_wait_nanos; + uint64_t write_thread_wait_nanos{}; // time spent on acquiring DB mutex. - uint64_t db_mutex_lock_nanos; + uint64_t db_mutex_lock_nanos{}; // Time spent on waiting with a condition variable created with DB mutex. - uint64_t db_condition_wait_nanos; + uint64_t db_condition_wait_nanos{}; // Time spent on merge operator. - uint64_t merge_operator_time_nanos; + uint64_t merge_operator_time_nanos{}; // Time spent on reading index block from block cache or SST file - uint64_t read_index_block_nanos; + uint64_t read_index_block_nanos{}; // Time spent on reading filter block from block cache or SST file - uint64_t read_filter_block_nanos; + uint64_t read_filter_block_nanos{}; // Time spent on creating data block iterator - uint64_t new_table_block_iter_nanos; + uint64_t new_table_block_iter_nanos{}; // Time spent on creating a iterator of an SST file. - uint64_t new_table_iterator_nanos; + uint64_t new_table_iterator_nanos{}; // Time spent on seeking a key in data/index blocks - uint64_t block_seek_nanos; + uint64_t block_seek_nanos{}; // Time spent on finding or creating a table reader - uint64_t find_table_nanos; + uint64_t find_table_nanos{}; // total number of mem table bloom hits - uint64_t bloom_memtable_hit_count; + uint64_t bloom_memtable_hit_count{}; // total number of mem table bloom misses - uint64_t bloom_memtable_miss_count; + uint64_t bloom_memtable_miss_count{}; // total number of SST table bloom hits - uint64_t bloom_sst_hit_count; + uint64_t bloom_sst_hit_count{}; // total number of SST table bloom misses - uint64_t bloom_sst_miss_count; + uint64_t bloom_sst_miss_count{}; // Time spent waiting on key locks in transaction lock manager. - uint64_t key_lock_wait_time; + uint64_t key_lock_wait_time{}; // number of times acquiring a lock was blocked by another transaction. - uint64_t key_lock_wait_count; + uint64_t key_lock_wait_count{}; // Total time spent in Env filesystem operations. These are only populated // when TimedEnv is used. - uint64_t env_new_sequential_file_nanos; - uint64_t env_new_random_access_file_nanos; - uint64_t env_new_writable_file_nanos; - uint64_t env_reuse_writable_file_nanos; - uint64_t env_new_random_rw_file_nanos; - uint64_t env_new_directory_nanos; - uint64_t env_file_exists_nanos; - uint64_t env_get_children_nanos; - uint64_t env_get_children_file_attributes_nanos; - uint64_t env_delete_file_nanos; - uint64_t env_create_dir_nanos; - uint64_t env_create_dir_if_missing_nanos; - uint64_t env_delete_dir_nanos; - uint64_t env_get_file_size_nanos; - uint64_t env_get_file_modification_time_nanos; - uint64_t env_rename_file_nanos; - uint64_t env_link_file_nanos; - uint64_t env_lock_file_nanos; - uint64_t env_unlock_file_nanos; - uint64_t env_new_logger_nanos; - - uint64_t get_cpu_nanos; - uint64_t iter_next_cpu_nanos; - uint64_t iter_prev_cpu_nanos; - uint64_t iter_seek_cpu_nanos; + uint64_t env_new_sequential_file_nanos{}; + uint64_t env_new_random_access_file_nanos{}; + uint64_t env_new_writable_file_nanos{}; + uint64_t env_reuse_writable_file_nanos{}; + uint64_t env_new_random_rw_file_nanos{}; + uint64_t env_new_directory_nanos{}; + uint64_t env_file_exists_nanos{}; + uint64_t env_get_children_nanos{}; + uint64_t env_get_children_file_attributes_nanos{}; + uint64_t env_delete_file_nanos{}; + uint64_t env_create_dir_nanos{}; + uint64_t env_create_dir_if_missing_nanos{}; + uint64_t env_delete_dir_nanos{}; + uint64_t env_get_file_size_nanos{}; + uint64_t env_get_file_modification_time_nanos{}; + uint64_t env_rename_file_nanos{}; + uint64_t env_link_file_nanos{}; + uint64_t env_lock_file_nanos{}; + uint64_t env_unlock_file_nanos{}; + uint64_t env_new_logger_nanos{}; + + uint64_t get_cpu_nanos{}; + uint64_t iter_next_cpu_nanos{}; + uint64_t iter_prev_cpu_nanos{}; + uint64_t iter_seek_cpu_nanos{}; // Time spent in encrypting data. Populated when EncryptedEnv is used. - uint64_t encrypt_data_nanos; + uint64_t encrypt_data_nanos{}; // Time spent in decrypting data. Populated when EncryptedEnv is used. - uint64_t decrypt_data_nanos; + uint64_t decrypt_data_nanos{}; std::map* level_to_perf_context = nullptr; bool per_level_perf_context_enabled = false; diff --git a/java/CMakeLists.txt b/java/CMakeLists.txt index 9afd42927..c4c51a1db 100644 --- a/java/CMakeLists.txt +++ b/java/CMakeLists.txt @@ -33,6 +33,7 @@ set(JNI_NATIVE_SOURCES rocksjni/env_options.cc rocksjni/event_listener.cc rocksjni/event_listener_jnicallback.cc + rocksjni/flink_compactionfilterjni.cc rocksjni/filter.cc rocksjni/ingest_external_file_options.cc rocksjni/iterator.cc @@ -152,6 +153,7 @@ set(JAVA_MAIN_CLASSES src/main/java/org/rocksdb/ExternalFileIngestionInfo.java src/main/java/org/rocksdb/Filter.java src/main/java/org/rocksdb/FileOperationInfo.java + src/main/java/org/rocksdb/FlinkCompactionFilter.java src/main/java/org/rocksdb/FlushJobInfo.java src/main/java/org/rocksdb/FlushReason.java src/main/java/org/rocksdb/FlushOptions.java @@ -448,6 +450,7 @@ if(${CMAKE_VERSION} VERSION_LESS "3.11.4" OR (${Java_VERSION_MINOR} STREQUAL "7" org.rocksdb.Env org.rocksdb.EnvOptions org.rocksdb.Filter + org.rocksdb.FlinkCompactionFilter org.rocksdb.FlushOptions org.rocksdb.HashLinkedListMemTableConfig org.rocksdb.HashSkipListMemTableConfig diff --git a/java/Makefile b/java/Makefile index d5414c678..094a3c158 100644 --- a/java/Makefile +++ b/java/Makefile @@ -32,6 +32,7 @@ NATIVE_JAVA_CLASSES = \ org.rocksdb.DirectSlice\ org.rocksdb.Env\ org.rocksdb.EnvOptions\ + org.rocksdb.FlinkCompactionFilter\ org.rocksdb.FlushOptions\ org.rocksdb.Filter\ org.rocksdb.IngestExternalFileOptions\ diff --git a/java/crossbuild/Vagrantfile b/java/crossbuild/Vagrantfile index 0ee50de2c..a3035e683 100644 --- a/java/crossbuild/Vagrantfile +++ b/java/crossbuild/Vagrantfile @@ -33,7 +33,7 @@ Vagrant.configure(VAGRANTFILE_API_VERSION) do |config| end config.vm.provider "virtualbox" do |v| - v.memory = 2048 + v.memory = 6048 v.cpus = 4 v.customize ["modifyvm", :id, "--nictype1", "virtio" ] end diff --git a/java/crossbuild/build-win.bat b/java/crossbuild/build-win.bat new file mode 100644 index 000000000..2925ec19a --- /dev/null +++ b/java/crossbuild/build-win.bat @@ -0,0 +1,16 @@ +:: install git, java 8, maven, visual studio community 15 (2017) + +set MSBUILD=C:\Program Files (x86)\Microsoft Visual Studio\2017\Community\MSBuild\15.0\Bin\MSBuild.exe + +if exist build rd /s /q build +if exist librocksdbjni-win64.dll del librocksdbjni-win64.dll +mkdir build && cd build + +cmake -G "Visual Studio 15 Win64" -DWITH_JNI=1 .. + +"%MSBUILD%" rocksdb.sln /p:Configuration=Release /m + +cd .. + +copy build\java\Release\rocksdbjni-shared.dll librocksdbjni-win64.dll +echo Result is in librocksdbjni-win64.dll \ No newline at end of file diff --git a/java/crossbuild/docker-build-linux-centos.sh b/java/crossbuild/docker-build-linux-centos.sh index 78dbfb6da..16581dec7 100755 --- a/java/crossbuild/docker-build-linux-centos.sh +++ b/java/crossbuild/docker-build-linux-centos.sh @@ -13,21 +13,25 @@ cd /rocksdb-local-build # Use scl devtoolset if available if hash scl 2>/dev/null; then - if scl --list | grep -q 'devtoolset-7'; then - # CentOS 7+ - scl enable devtoolset-7 'make clean-not-downloaded' - scl enable devtoolset-7 'PORTABLE=1 make -j2 rocksdbjavastatic' - elif scl --list | grep -q 'devtoolset-2'; then - # CentOS 5 or 6 - scl enable devtoolset-2 'make clean-not-downloaded' - scl enable devtoolset-2 'PORTABLE=1 make -j2 rocksdbjavastatic' - else - echo "Could not find devtoolset" - exit 1; - fi + if scl --list | grep -q 'devtoolset-8'; then + # CentOS 6+ + scl enable devtoolset-8 'make clean-not-downloaded' + scl enable devtoolset-8 'PORTABLE=1 make -j2 rocksdbjavastatic' + elif scl --list | grep -q 'devtoolset-7'; then + # CentOS 6+ + scl enable devtoolset-7 'make clean-not-downloaded' + scl enable devtoolset-7 'PORTABLE=1 make -j2 rocksdbjavastatic' + elif scl --list | grep -q 'devtoolset-2'; then + # CentOS 5 or 6 + scl enable devtoolset-2 'make clean-not-downloaded' + scl enable devtoolset-2 'PORTABLE=1 make -j2 rocksdbjavastatic' + else + echo "Could not find devtoolset" + exit 1; + fi else - make clean-not-downloaded - PORTABLE=1 make -j2 rocksdbjavastatic + make clean-not-downloaded + PORTABLE=1 make -j2 rocksdbjavastatic fi cp java/target/librocksdbjni-linux*.so java/target/rocksdbjni-*-linux*.jar java/target/rocksdbjni-*-linux*.jar.sha1 /rocksdb-java-target diff --git a/java/deploysettings.xml b/java/deploysettings.xml new file mode 100644 index 000000000..7b73248e0 --- /dev/null +++ b/java/deploysettings.xml @@ -0,0 +1,12 @@ + + + + sonatype-nexus-staging + ${sonatype_user} + ${sonatype_pw} + + + \ No newline at end of file diff --git a/java/pom.xml.template b/java/pom.xml.template index 4abff4768..148f74963 100644 --- a/java/pom.xml.template +++ b/java/pom.xml.template @@ -2,12 +2,12 @@ 4.0.0 - org.rocksdb - rocksdbjni - ${ROCKSDB_JAVA_VERSION} + com.ververica + frocksdbjni + ${FROCKSDB_JAVA_VERSION} RocksDB JNI - RocksDB fat jar that contains .so files for linux32 and linux64 (glibc and musl-libc), jnilib files + RocksDB fat jar with modifications specific for Apache Flink that contains .so files for linux32 and linux64 (glibc and musl-libc), jnilib files for Mac OSX, and a .dll for Windows x64. https://rocksdb.org @@ -19,17 +19,12 @@ http://www.apache.org/licenses/LICENSE-2.0.html repo - - GNU General Public License, version 2 - http://www.gnu.org/licenses/gpl-2.0.html - repo - - scm:git:https://github.com/facebook/rocksdb.git - scm:git:https://github.com/facebook/rocksdb.git - scm:git:https://github.com/facebook/rocksdb.git + scm:git:https://github.com/ververica/frocksdb.git + scm:git:https://github.com/ververica/frocksdb.git + scm:git:https://github.com/ververica/frocksdb.git diff --git a/java/publish-frocksdbjni.sh b/java/publish-frocksdbjni.sh new file mode 100644 index 000000000..2a6bd2865 --- /dev/null +++ b/java/publish-frocksdbjni.sh @@ -0,0 +1,44 @@ +#!/usr/bin/env bash +################################################################################ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +################################################################################ + +# fail on errors +set -e + +PREFIX=java/target/frocksdb-release/frocksdbjni-${VERSION} + +function deploy() { + FILE=$1 + CLASSIFIER=$2 + echo "Deploying file=${FILE} with classifier=${CLASSIFIER} to sonatype with prefix=${PREFIX}" + sonatype_user="${USER}" sonatype_pw="${PASSWORD}" mvn gpg:sign-and-deploy-file \ + --settings java/deploysettings.xml \ + -Durl=https://oss.sonatype.org/service/local/staging/deploy/maven2/ \ + -DrepositoryId=sonatype-nexus-staging \ + -DpomFile=${PREFIX}.pom \ + -Dfile=$FILE \ + -Dclassifier=$CLASSIFIER \ + -Dgpg.keyname="${KEYNAME}" \ + -Dgpg.passphrase="${PASSPHRASE}" +} + +PREFIX=java/target/frocksdb-release/frocksdbjni-${VERSION} + +deploy ${PREFIX}-sources.jar sources +deploy ${PREFIX}-javadoc.jar javadoc +deploy ${PREFIX}.jar diff --git a/java/rocksjni/flink_compactionfilterjni.cc b/java/rocksjni/flink_compactionfilterjni.cc new file mode 100644 index 000000000..378854d06 --- /dev/null +++ b/java/rocksjni/flink_compactionfilterjni.cc @@ -0,0 +1,240 @@ +#include // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). + +#include +#include + +#include "include/org_rocksdb_FlinkCompactionFilter.h" +#include "loggerjnicallback.h" +#include "portal.h" +#include "rocksjni/jnicallback.h" +#include "utilities/flink/flink_compaction_filter.h" + +namespace ROCKSDB_NAMESPACE { +namespace flink { + +class JniCallbackBase : public ROCKSDB_NAMESPACE::JniCallback { + public: + JniCallbackBase(JNIEnv* env, jobject jcallback_obj) + : JniCallback(env, jcallback_obj) {} + + protected: + inline void CheckAndRethrowException(JNIEnv* env) const { + if (env->ExceptionCheck()) { + env->ExceptionDescribe(); + env->Throw(env->ExceptionOccurred()); + } + } +}; + +// This list element filter operates on list state for which byte length of +// elements is unknown (variable), the list element serializer has to be used in +// this case to compute the offset of the next element. The filter wraps java +// object implenented in Flink. The java object holds element serializer and +// performs filtering. +class JavaListElementFilter + : public ROCKSDB_NAMESPACE::flink::FlinkCompactionFilter::ListElementFilter, + JniCallbackBase { + public: + JavaListElementFilter(JNIEnv* env, jobject jlist_filter) + : JniCallbackBase(env, jlist_filter) { + jclass jclazz = ROCKSDB_NAMESPACE::JavaClass::getJClass( + env, "org/rocksdb/FlinkCompactionFilter$ListElementFilter"); + if (jclazz == nullptr) { + // exception occurred accessing class + return; + } + m_jnext_unexpired_offset_methodid = + env->GetMethodID(jclazz, "nextUnexpiredOffset", "([BJJ)I"); + assert(m_jnext_unexpired_offset_methodid != nullptr); + } + + std::size_t NextUnexpiredOffset(const ROCKSDB_NAMESPACE::Slice& list, + int64_t ttl, + int64_t current_timestamp) const override { + jboolean attached_thread = JNI_FALSE; + JNIEnv* env = getJniEnv(&attached_thread); + jbyteArray jlist = ROCKSDB_NAMESPACE::JniUtil::copyBytes(env, list); + CheckAndRethrowException(env); + if (jlist == nullptr) { + return static_cast(-1); + } + auto jl_ttl = static_cast(ttl); + auto jl_current_timestamp = static_cast(current_timestamp); + jint next_offset = + env->CallIntMethod(m_jcallback_obj, m_jnext_unexpired_offset_methodid, + jlist, jl_ttl, jl_current_timestamp); + CheckAndRethrowException(env); + env->DeleteLocalRef(jlist); + releaseJniEnv(attached_thread); + return static_cast(next_offset); + }; + + private: + jmethodID m_jnext_unexpired_offset_methodid; +}; + +class JavaListElemenFilterFactory + : public ROCKSDB_NAMESPACE::flink::FlinkCompactionFilter:: + ListElementFilterFactory, + JniCallbackBase { + public: + JavaListElemenFilterFactory(JNIEnv* env, jobject jlist_filter_factory) + : JniCallbackBase(env, jlist_filter_factory) { + jclass jclazz = ROCKSDB_NAMESPACE::JavaClass::getJClass( + env, "org/rocksdb/FlinkCompactionFilter$ListElementFilterFactory"); + if (jclazz == nullptr) { + // exception occurred accessing class + return; + } + m_jcreate_filter_methodid = env->GetMethodID( + jclazz, "createListElementFilter", + "()Lorg/rocksdb/FlinkCompactionFilter$ListElementFilter;"); + assert(m_jcreate_filter_methodid != nullptr); + } + + FlinkCompactionFilter::ListElementFilter* CreateListElementFilter( + std::shared_ptr /*logger*/) const override { + jboolean attached_thread = JNI_FALSE; + JNIEnv* env = getJniEnv(&attached_thread); + auto jlist_filter = + env->CallObjectMethod(m_jcallback_obj, m_jcreate_filter_methodid); + auto list_filter = new JavaListElementFilter(env, jlist_filter); + CheckAndRethrowException(env); + releaseJniEnv(attached_thread); + return list_filter; + }; + + private: + jmethodID m_jcreate_filter_methodid; +}; + +class JavaTimeProvider + : public ROCKSDB_NAMESPACE::flink::FlinkCompactionFilter::TimeProvider, + JniCallbackBase { + public: + JavaTimeProvider(JNIEnv* env, jobject jtime_provider) + : JniCallbackBase(env, jtime_provider) { + jclass jclazz = ROCKSDB_NAMESPACE::JavaClass::getJClass( + env, "org/rocksdb/FlinkCompactionFilter$TimeProvider"); + if (jclazz == nullptr) { + // exception occurred accessing class + return; + } + m_jcurrent_timestamp_methodid = + env->GetMethodID(jclazz, "currentTimestamp", "()J"); + assert(m_jcurrent_timestamp_methodid != nullptr); + } + + int64_t CurrentTimestamp() const override { + jboolean attached_thread = JNI_FALSE; + JNIEnv* env = getJniEnv(&attached_thread); + auto jtimestamp = + env->CallLongMethod(m_jcallback_obj, m_jcurrent_timestamp_methodid); + CheckAndRethrowException(env); + releaseJniEnv(attached_thread); + return static_cast(jtimestamp); + }; + + private: + jmethodID m_jcurrent_timestamp_methodid; +}; + +static FlinkCompactionFilter::ListElementFilterFactory* +createListElementFilterFactory(JNIEnv* env, jint ji_list_elem_len, + jobject jlist_filter_factory) { + FlinkCompactionFilter::ListElementFilterFactory* list_filter_factory = + nullptr; + if (ji_list_elem_len > 0) { + auto fixed_size = static_cast(ji_list_elem_len); + list_filter_factory = + new FlinkCompactionFilter::FixedListElementFilterFactory( + fixed_size, static_cast(0)); + } else if (jlist_filter_factory != nullptr) { + list_filter_factory = + new JavaListElemenFilterFactory(env, jlist_filter_factory); + } + return list_filter_factory; +} + +/*x + * Class: org_rocksdb_FlinkCompactionFilter + * Method: createNewFlinkCompactionFilterConfigHolder + * Signature: ()J + */ +jlong Java_org_rocksdb_FlinkCompactionFilter_createNewFlinkCompactionFilterConfigHolder( + JNIEnv* /* env */, jclass /* jcls */) { + return reinterpret_cast( + new std::shared_ptr( + new FlinkCompactionFilter::ConfigHolder())); +} + +/* + * Class: org_rocksdb_FlinkCompactionFilter + * Method: disposeFlinkCompactionFilterConfigHolder + * Signature: (J)V + */ +void Java_org_rocksdb_FlinkCompactionFilter_disposeFlinkCompactionFilterConfigHolder( + JNIEnv* /* env */, jclass /* jcls */, jlong handle) { + auto* config_holder = + reinterpret_cast*>( + handle); + delete config_holder; +} + +/* + * Class: org_rocksdb_FlinkCompactionFilter + * Method: createNewFlinkCompactionFilter0 + * Signature: (JJJ)J + */ +jlong Java_org_rocksdb_FlinkCompactionFilter_createNewFlinkCompactionFilter0( + JNIEnv* env, jclass /* jcls */, jlong config_holder_handle, + jobject jtime_provider, jlong logger_handle) { + auto config_holder = + *(reinterpret_cast*>( + config_holder_handle)); + auto time_provider = new JavaTimeProvider(env, jtime_provider); + auto logger = + logger_handle == 0 + ? nullptr + : *(reinterpret_cast< + std::shared_ptr*>( + logger_handle)); + return reinterpret_cast(new FlinkCompactionFilter( + config_holder, + std::unique_ptr(time_provider), + logger)); +} + +/* + * Class: org_rocksdb_FlinkCompactionFilter + * Method: configureFlinkCompactionFilter + * Signature: (JIIJJILorg/rocksdb/FlinkCompactionFilter$ListElementFilter;)Z + */ +jboolean Java_org_rocksdb_FlinkCompactionFilter_configureFlinkCompactionFilter( + JNIEnv* env, jclass /* jcls */, jlong handle, jint ji_state_type, + jint ji_timestamp_offset, jlong jl_ttl_milli, + jlong jquery_time_after_num_entries, jint ji_list_elem_len, + jobject jlist_filter_factory) { + auto state_type = + static_cast(ji_state_type); + auto timestamp_offset = static_cast(ji_timestamp_offset); + auto ttl = static_cast(jl_ttl_milli); + auto query_time_after_num_entries = + static_cast(jquery_time_after_num_entries); + auto config_holder = + *(reinterpret_cast*>( + handle)); + auto list_filter_factory = createListElementFilterFactory( + env, ji_list_elem_len, jlist_filter_factory); + auto config = new FlinkCompactionFilter::Config{ + state_type, timestamp_offset, ttl, query_time_after_num_entries, + std::unique_ptr( + list_filter_factory)}; + return static_cast(config_holder->Configure(config)); +} + +} // namespace flink +} // namespace ROCKSDB_NAMESPACE diff --git a/java/src/main/java/org/rocksdb/FlinkCompactionFilter.java b/java/src/main/java/org/rocksdb/FlinkCompactionFilter.java new file mode 100644 index 000000000..ee575d5ba --- /dev/null +++ b/java/src/main/java/org/rocksdb/FlinkCompactionFilter.java @@ -0,0 +1,177 @@ +// Copyright (c) 2017-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). + +package org.rocksdb; + +/** + * Just a Java wrapper around FlinkCompactionFilter implemented in C++. + * + * Note: this compaction filter is a special implementation, designed for usage only in Apache Flink + * project. + */ +public class FlinkCompactionFilter extends AbstractCompactionFilter { + public enum StateType { + // WARNING!!! Do not change the order of enum entries as it is important for jni translation + Disabled, + Value, + List + } + + public FlinkCompactionFilter(ConfigHolder configHolder, TimeProvider timeProvider) { + this(configHolder, timeProvider, null); + } + + public FlinkCompactionFilter( + ConfigHolder configHolder, TimeProvider timeProvider, Logger logger) { + super(createNewFlinkCompactionFilter0( + configHolder.nativeHandle_, timeProvider, logger == null ? 0 : logger.nativeHandle_)); + } + + private native static long createNewFlinkCompactionFilter0( + long configHolderHandle, TimeProvider timeProvider, long loggerHandle); + private native static long createNewFlinkCompactionFilterConfigHolder(); + private native static void disposeFlinkCompactionFilterConfigHolder(long configHolderHandle); + private native static boolean configureFlinkCompactionFilter(long configHolderHandle, + int stateType, int timestampOffset, long ttl, long queryTimeAfterNumEntries, + int fixedElementLength, ListElementFilterFactory listElementFilterFactory); + + public interface ListElementFilter { + /** + * Gets offset of the first unexpired element in the list. + * + *

Native code wraps this java object and calls it for list state + * for which element byte length is unknown and Flink custom type serializer has to be used + * to compute offset of the next element in serialized form. + * + * @param list serialised list of elements with timestamp + * @param ttl time-to-live of the list elements + * @param currentTimestamp current timestamp to check expiration against + * @return offset of the first unexpired element in the list + */ + @SuppressWarnings("unused") + int nextUnexpiredOffset(byte[] list, long ttl, long currentTimestamp); + } + + public interface ListElementFilterFactory { + @SuppressWarnings("unused") ListElementFilter createListElementFilter(); + } + + public static class Config { + final StateType stateType; + final int timestampOffset; + final long ttl; + /** + * Number of state entries to process by compaction filter before updating current timestamp. + */ + final long queryTimeAfterNumEntries; + final int fixedElementLength; + final ListElementFilterFactory listElementFilterFactory; + + private Config(StateType stateType, int timestampOffset, long ttl, + long queryTimeAfterNumEntries, int fixedElementLength, + ListElementFilterFactory listElementFilterFactory) { + this.stateType = stateType; + this.timestampOffset = timestampOffset; + this.ttl = ttl; + this.queryTimeAfterNumEntries = queryTimeAfterNumEntries; + this.fixedElementLength = fixedElementLength; + this.listElementFilterFactory = listElementFilterFactory; + } + + @SuppressWarnings("WeakerAccess") + public static Config createNotList( + StateType stateType, int timestampOffset, long ttl, long queryTimeAfterNumEntries) { + return new Config(stateType, timestampOffset, ttl, queryTimeAfterNumEntries, -1, null); + } + + @SuppressWarnings("unused") + public static Config createForValue(long ttl, long queryTimeAfterNumEntries) { + return createNotList(StateType.Value, 0, ttl, queryTimeAfterNumEntries); + } + + @SuppressWarnings("unused") + public static Config createForMap(long ttl, long queryTimeAfterNumEntries) { + return createNotList(StateType.Value, 1, ttl, queryTimeAfterNumEntries); + } + + @SuppressWarnings("WeakerAccess") + public static Config createForFixedElementList( + long ttl, long queryTimeAfterNumEntries, int fixedElementLength) { + return new Config(StateType.List, 0, ttl, queryTimeAfterNumEntries, fixedElementLength, null); + } + + @SuppressWarnings("WeakerAccess") + public static Config createForList(long ttl, long queryTimeAfterNumEntries, + ListElementFilterFactory listElementFilterFactory) { + return new Config( + StateType.List, 0, ttl, queryTimeAfterNumEntries, -1, listElementFilterFactory); + } + } + + private static class ConfigHolder extends RocksObject { + ConfigHolder() { + super(createNewFlinkCompactionFilterConfigHolder()); + } + + @Override + protected void disposeInternal(long handle) { + disposeFlinkCompactionFilterConfigHolder(handle); + } + } + + /** Provides current timestamp to check expiration, it must be thread safe. */ + public interface TimeProvider { + long currentTimestamp(); + } + + public static class FlinkCompactionFilterFactory + extends AbstractCompactionFilterFactory { + private final ConfigHolder configHolder; + private final TimeProvider timeProvider; + private final Logger logger; + + @SuppressWarnings("unused") + public FlinkCompactionFilterFactory(TimeProvider timeProvider) { + this(timeProvider, null); + } + + @SuppressWarnings("WeakerAccess") + public FlinkCompactionFilterFactory(TimeProvider timeProvider, Logger logger) { + this.configHolder = new ConfigHolder(); + this.timeProvider = timeProvider; + this.logger = logger; + } + + @Override + public void close() { + super.close(); + configHolder.close(); + if (logger != null) { + logger.close(); + } + } + + @Override + public FlinkCompactionFilter createCompactionFilter(Context context) { + return new FlinkCompactionFilter(configHolder, timeProvider, logger); + } + + @Override + public String name() { + return "FlinkCompactionFilterFactory"; + } + + @SuppressWarnings("WeakerAccess") + public void configure(Config config) { + boolean already_configured = + !configureFlinkCompactionFilter(configHolder.nativeHandle_, config.stateType.ordinal(), + config.timestampOffset, config.ttl, config.queryTimeAfterNumEntries, + config.fixedElementLength, config.listElementFilterFactory); + if (already_configured) { + throw new IllegalStateException("Compaction filter is already configured"); + } + } + } +} diff --git a/java/src/test/java/org/rocksdb/FilterTest.java b/java/src/test/java/org/rocksdb/FilterTest.java index dc5c19fbc..e308ffefb 100644 --- a/java/src/test/java/org/rocksdb/FilterTest.java +++ b/java/src/test/java/org/rocksdb/FilterTest.java @@ -16,7 +16,7 @@ public class FilterTest { @Test public void filter() { - // new Bloom filter + // new Bloom filterFactory final BlockBasedTableConfig blockConfig = new BlockBasedTableConfig(); try(final Options options = new Options()) { diff --git a/java/src/test/java/org/rocksdb/FlinkCompactionFilterTest.java b/java/src/test/java/org/rocksdb/FlinkCompactionFilterTest.java new file mode 100644 index 000000000..40320e9d5 --- /dev/null +++ b/java/src/test/java/org/rocksdb/FlinkCompactionFilterTest.java @@ -0,0 +1,356 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.rocksdb; + +import static org.assertj.core.api.Assertions.assertThat; + +import java.nio.ByteBuffer; +import java.nio.charset.StandardCharsets; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; +import java.util.Random; +import org.junit.After; +import org.junit.Before; +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.TemporaryFolder; +import org.rocksdb.FlinkCompactionFilter.StateType; +import org.rocksdb.FlinkCompactionFilter.TimeProvider; + +public class FlinkCompactionFilterTest { + private static final int LONG_LENGTH = 8; + private static final int INT_LENGTH = 4; + private static final String MERGE_OPERATOR_NAME = "stringappendtest"; + private static final byte DELIMITER = ','; + private static final long TTL = 100; + private static final long QUERY_TIME_AFTER_NUM_ENTRIES = 100; + private static final int TEST_TIMESTAMP_OFFSET = 2; + private static final Random rnd = new Random(); + + private TestTimeProvider timeProvider; + private List stateContexts; + private List cfDescs; + private List cfHandles; + + @Rule public TemporaryFolder dbFolder = new TemporaryFolder(); + + @Before + public void init() { + timeProvider = new TestTimeProvider(); + timeProvider.time = rnd.nextLong(); + stateContexts = + Arrays.asList(new StateContext(StateType.Value, timeProvider, TEST_TIMESTAMP_OFFSET), + new FixedElementListStateContext(timeProvider), + new NonFixedElementListStateContext(timeProvider)); + cfDescs = new ArrayList<>(); + cfHandles = new ArrayList<>(); + cfDescs.add(new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY)); + for (StateContext stateContext : stateContexts) { + cfDescs.add(stateContext.getCfDesc()); + } + } + + @After + public void cleanup() { + for (StateContext stateContext : stateContexts) { + stateContext.cfDesc.getOptions().close(); + stateContext.filterFactory.close(); + } + } + + @Test + public void checkStateTypeEnumOrder() { + // if the order changes it also needs to be adjusted + // in utilities/flink/flink_compaction_filter.h + // and in utilities/flink/flink_compaction_filter_test.cc + assertThat(StateType.Disabled.ordinal()).isEqualTo(0); + assertThat(StateType.Value.ordinal()).isEqualTo(1); + assertThat(StateType.List.ordinal()).isEqualTo(2); + } + + @Test + public void testCompactionFilter() throws RocksDBException { + try (DBOptions options = createDbOptions(); RocksDB rocksDb = setupDb(options)) { + try { + for (StateContext stateContext : stateContexts) { + stateContext.updateValueWithTimestamp(rocksDb); + stateContext.checkUnexpired(rocksDb); + rocksDb.compactRange(stateContext.columnFamilyHandle); + stateContext.checkUnexpired(rocksDb); + } + + timeProvider.time += TTL + TTL / 2; // expire state + + for (StateContext stateContext : stateContexts) { + stateContext.checkUnexpired(rocksDb); + rocksDb.compactRange(stateContext.columnFamilyHandle); + stateContext.checkExpired(rocksDb); + rocksDb.compactRange(stateContext.columnFamilyHandle); + } + } finally { + for (ColumnFamilyHandle cfHandle : cfHandles) { + cfHandle.close(); + } + } + } + } + + private static DBOptions createDbOptions() { + return new DBOptions().setCreateIfMissing(true).setCreateMissingColumnFamilies(true); + } + + private RocksDB setupDb(DBOptions options) throws RocksDBException { + RocksDB db = RocksDB.open(options, dbFolder.getRoot().getAbsolutePath(), cfDescs, cfHandles); + for (int i = 0; i < stateContexts.size(); i++) { + stateContexts.get(i).columnFamilyHandle = cfHandles.get(i + 1); + } + return db; + } + + private static class StateContext { + private final String cf; + final String key; + final ColumnFamilyDescriptor cfDesc; + final String userValue; + final long currentTime; + final FlinkCompactionFilter.FlinkCompactionFilterFactory filterFactory; + + ColumnFamilyHandle columnFamilyHandle; + + private StateContext(StateType type, TimeProvider timeProvider, int timestampOffset) { + this.currentTime = timeProvider.currentTimestamp(); + userValue = type.name() + "StateValue"; + cf = getClass().getSimpleName() + "StateCf"; + key = type.name() + "StateKey"; + filterFactory = + new FlinkCompactionFilter.FlinkCompactionFilterFactory(timeProvider, createLogger()); + filterFactory.configure(createConfig(type, timestampOffset)); + cfDesc = new ColumnFamilyDescriptor(getASCII(cf), getOptionsWithFilter(filterFactory)); + } + + private Logger createLogger() { + try (DBOptions opts = new DBOptions().setInfoLogLevel(InfoLogLevel.DEBUG_LEVEL)) { + return new Logger(opts) { + @Override + protected void log(InfoLogLevel infoLogLevel, String logMsg) { + System.out.println(infoLogLevel + ": " + logMsg); + } + }; + } + } + + FlinkCompactionFilter.Config createConfig(StateType type, int timestampOffset) { + return FlinkCompactionFilter.Config.createNotList( + type, timestampOffset, TTL, QUERY_TIME_AFTER_NUM_ENTRIES); + } + + private static ColumnFamilyOptions getOptionsWithFilter( + FlinkCompactionFilter.FlinkCompactionFilterFactory filterFactory) { + return new ColumnFamilyOptions() + .setCompactionFilterFactory(filterFactory) + .setMergeOperatorName(MERGE_OPERATOR_NAME); + } + + public String getKey() { + return key; + } + + ColumnFamilyDescriptor getCfDesc() { + return cfDesc; + } + + byte[] getValueWithTimestamp(RocksDB db) throws RocksDBException { + return db.get(columnFamilyHandle, getASCII(key)); + } + + void updateValueWithTimestamp(RocksDB db) throws RocksDBException { + db.put(columnFamilyHandle, getASCII(key), valueWithTimestamp()); + } + + byte[] valueWithTimestamp() { + return valueWithTimestamp(TEST_TIMESTAMP_OFFSET); + } + + byte[] valueWithTimestamp(@SuppressWarnings("SameParameterValue") int offset) { + return valueWithTimestamp(offset, currentTime); + } + + byte[] valueWithTimestamp(int offset, long timestamp) { + ByteBuffer buffer = getByteBuffer(offset); + buffer.put(new byte[offset]); + appendValueWithTimestamp(buffer, userValue, timestamp); + return buffer.array(); + } + + void appendValueWithTimestamp(ByteBuffer buffer, String value, long timestamp) { + buffer.putLong(timestamp); + buffer.putInt(value.length()); + buffer.put(getASCII(value)); + } + + ByteBuffer getByteBuffer(int offset) { + int length = offset + LONG_LENGTH + INT_LENGTH + userValue.length(); + return ByteBuffer.allocate(length); + } + + byte[] unexpiredValue() { + return valueWithTimestamp(); + } + + byte[] expiredValue() { + return null; + } + + void checkUnexpired(RocksDB db) throws RocksDBException { + assertThat(getValueWithTimestamp(db)).isEqualTo(unexpiredValue()); + } + + void checkExpired(RocksDB db) throws RocksDBException { + assertThat(getValueWithTimestamp(db)).isEqualTo(expiredValue()); + } + } + + private static class FixedElementListStateContext extends StateContext { + private FixedElementListStateContext(TimeProvider timeProvider) { + super(StateType.List, timeProvider, 0); + } + + @Override + FlinkCompactionFilter.Config createConfig(StateType type, int timestampOffset) { + // return FlinkCompactionFilter.Config.createForList(TTL, QUERY_TIME_AFTER_NUM_ENTRIES, + // ELEM_FILTER_FACTORY); + return FlinkCompactionFilter.Config.createForFixedElementList( + TTL, QUERY_TIME_AFTER_NUM_ENTRIES, 13 + userValue.getBytes().length); + } + + @Override + void updateValueWithTimestamp(RocksDB db) throws RocksDBException { + db.merge(columnFamilyHandle, getASCII(key), listExpired(3)); + db.merge(columnFamilyHandle, getASCII(key), mixedList(2, 3)); + db.merge(columnFamilyHandle, getASCII(key), listUnexpired(4)); + } + + @Override + byte[] unexpiredValue() { + return mixedList(5, 7); + } + + byte[] mergeBytes(byte[]... bytes) { + int length = 0; + for (byte[] a : bytes) { + length += a.length; + } + ByteBuffer buffer = ByteBuffer.allocate(length); + for (byte[] a : bytes) { + buffer.put(a); + } + return buffer.array(); + } + + @Override + byte[] expiredValue() { + return listUnexpired(7); + } + + private byte[] mixedList(int numberOfExpiredElements, int numberOfUnexpiredElements) { + assert numberOfExpiredElements > 0; + assert numberOfUnexpiredElements > 0; + return mergeBytes(listExpired(numberOfExpiredElements), new byte[] {DELIMITER}, + listUnexpired(numberOfUnexpiredElements)); + } + + private byte[] listExpired(int numberOfElements) { + return list(numberOfElements, currentTime); + } + + private byte[] listUnexpired(int numberOfElements) { + return list(numberOfElements, currentTime + TTL); + } + + private byte[] list(int numberOfElements, long timestamp) { + ByteBuffer buffer = getByteBufferForList(numberOfElements); + for (int i = 0; i < numberOfElements; i++) { + appendValueWithTimestamp(buffer, userValue, timestamp); + if (i < numberOfElements - 1) { + buffer.put(DELIMITER); + } + } + return buffer.array(); + } + + private ByteBuffer getByteBufferForList(int numberOfElements) { + int length = ((LONG_LENGTH + INT_LENGTH + userValue.length() + 1) * numberOfElements) - 1; + return ByteBuffer.allocate(length); + } + } + + private static class NonFixedElementListStateContext extends FixedElementListStateContext { + private static FlinkCompactionFilter.ListElementFilterFactory ELEM_FILTER_FACTORY = + new ListElementFilterFactory(); + + private NonFixedElementListStateContext(TimeProvider timeProvider) { + super(timeProvider); + } + + @Override + FlinkCompactionFilter.Config createConfig(StateType type, int timestampOffset) { + // return FlinkCompactionFilter.Config.createForList(TTL, QUERY_TIME_AFTER_NUM_ENTRIES, + // ELEM_FILTER_FACTORY); + return FlinkCompactionFilter.Config.createForList( + TTL, QUERY_TIME_AFTER_NUM_ENTRIES, ELEM_FILTER_FACTORY); + } + + private static class ListElementFilterFactory + implements FlinkCompactionFilter.ListElementFilterFactory { + @Override + public FlinkCompactionFilter.ListElementFilter createListElementFilter() { + return new FlinkCompactionFilter.ListElementFilter() { + @Override + public int nextUnexpiredOffset(byte[] list, long ttl, long currentTimestamp) { + int currentOffset = 0; + while (currentOffset < list.length) { + ByteBuffer bf = ByteBuffer.wrap(list, currentOffset, list.length - currentOffset); + long timestamp = bf.getLong(); + if (timestamp + ttl > currentTimestamp) { + break; + } + int elemLen = bf.getInt(8); + currentOffset += 13 + elemLen; + } + return currentOffset; + } + }; + } + } + } + + private static byte[] getASCII(String str) { + return str.getBytes(StandardCharsets.US_ASCII); + } + + private static class TestTimeProvider implements TimeProvider { + private long time; + + @Override + public long currentTimestamp() { + return time; + } + } +} \ No newline at end of file diff --git a/monitoring/perf_context.cc b/monitoring/perf_context.cc index 9e56f1018..ad5cab6b4 100644 --- a/monitoring/perf_context.cc +++ b/monitoring/perf_context.cc @@ -9,325 +9,26 @@ namespace ROCKSDB_NAMESPACE { -#if defined(NPERF_CONTEXT) -// Should not be used because the counters are not thread-safe. -// Put here just to make get_perf_context() simple without ifdef. +#if defined(NPERF_CONTEXT) || !defined(ROCKSDB_SUPPORT_THREAD_LOCAL) PerfContext perf_context; -#elif defined(ROCKSDB_SUPPORT_THREAD_LOCAL) +#else #if defined(OS_SOLARIS) -__thread PerfContext perf_context; -#else // OS_SOLARIS -thread_local PerfContext perf_context; -#endif // OS_SOLARIS +__thread PerfContext perf_context_; #else -#error "No thread-local support. Disable perf context with -DNPERF_CONTEXT." +__thread PerfContext perf_context; +#endif #endif PerfContext* get_perf_context() { +#if defined(NPERF_CONTEXT) || !defined(ROCKSDB_SUPPORT_THREAD_LOCAL) return &perf_context; -} - -PerfContext::~PerfContext() { -#if !defined(NPERF_CONTEXT) && defined(ROCKSDB_SUPPORT_THREAD_LOCAL) && !defined(OS_SOLARIS) - ClearPerLevelPerfContext(); -#endif -} - -PerfContext::PerfContext(const PerfContext& other) { -#ifdef NPERF_CONTEXT - (void)other; #else - user_key_comparison_count = other.user_key_comparison_count; - block_cache_hit_count = other.block_cache_hit_count; - block_read_count = other.block_read_count; - block_read_byte = other.block_read_byte; - block_read_time = other.block_read_time; - block_cache_index_hit_count = other.block_cache_index_hit_count; - index_block_read_count = other.index_block_read_count; - block_cache_filter_hit_count = other.block_cache_filter_hit_count; - filter_block_read_count = other.filter_block_read_count; - compression_dict_block_read_count = other.compression_dict_block_read_count; - secondary_cache_hit_count = other.secondary_cache_hit_count; - block_checksum_time = other.block_checksum_time; - block_decompress_time = other.block_decompress_time; - get_read_bytes = other.get_read_bytes; - multiget_read_bytes = other.multiget_read_bytes; - iter_read_bytes = other.iter_read_bytes; - internal_key_skipped_count = other.internal_key_skipped_count; - internal_delete_skipped_count = other.internal_delete_skipped_count; - internal_recent_skipped_count = other.internal_recent_skipped_count; - internal_merge_count = other.internal_merge_count; - write_wal_time = other.write_wal_time; - get_snapshot_time = other.get_snapshot_time; - get_from_memtable_time = other.get_from_memtable_time; - get_from_memtable_count = other.get_from_memtable_count; - get_post_process_time = other.get_post_process_time; - get_from_output_files_time = other.get_from_output_files_time; - seek_on_memtable_time = other.seek_on_memtable_time; - seek_on_memtable_count = other.seek_on_memtable_count; - next_on_memtable_count = other.next_on_memtable_count; - prev_on_memtable_count = other.prev_on_memtable_count; - seek_child_seek_time = other.seek_child_seek_time; - seek_child_seek_count = other.seek_child_seek_count; - seek_min_heap_time = other.seek_min_heap_time; - seek_internal_seek_time = other.seek_internal_seek_time; - find_next_user_entry_time = other.find_next_user_entry_time; - write_pre_and_post_process_time = other.write_pre_and_post_process_time; - write_memtable_time = other.write_memtable_time; - write_delay_time = other.write_delay_time; - write_thread_wait_nanos = other.write_thread_wait_nanos; - write_scheduling_flushes_compactions_time = - other.write_scheduling_flushes_compactions_time; - db_mutex_lock_nanos = other.db_mutex_lock_nanos; - db_condition_wait_nanos = other.db_condition_wait_nanos; - merge_operator_time_nanos = other.merge_operator_time_nanos; - read_index_block_nanos = other.read_index_block_nanos; - read_filter_block_nanos = other.read_filter_block_nanos; - new_table_block_iter_nanos = other.new_table_block_iter_nanos; - new_table_iterator_nanos = other.new_table_iterator_nanos; - block_seek_nanos = other.block_seek_nanos; - find_table_nanos = other.find_table_nanos; - bloom_memtable_hit_count = other.bloom_memtable_hit_count; - bloom_memtable_miss_count = other.bloom_memtable_miss_count; - bloom_sst_hit_count = other.bloom_sst_hit_count; - bloom_sst_miss_count = other.bloom_sst_miss_count; - key_lock_wait_time = other.key_lock_wait_time; - key_lock_wait_count = other.key_lock_wait_count; - - env_new_sequential_file_nanos = other.env_new_sequential_file_nanos; - env_new_random_access_file_nanos = other.env_new_random_access_file_nanos; - env_new_writable_file_nanos = other.env_new_writable_file_nanos; - env_reuse_writable_file_nanos = other.env_reuse_writable_file_nanos; - env_new_random_rw_file_nanos = other.env_new_random_rw_file_nanos; - env_new_directory_nanos = other.env_new_directory_nanos; - env_file_exists_nanos = other.env_file_exists_nanos; - env_get_children_nanos = other.env_get_children_nanos; - env_get_children_file_attributes_nanos = - other.env_get_children_file_attributes_nanos; - env_delete_file_nanos = other.env_delete_file_nanos; - env_create_dir_nanos = other.env_create_dir_nanos; - env_create_dir_if_missing_nanos = other.env_create_dir_if_missing_nanos; - env_delete_dir_nanos = other.env_delete_dir_nanos; - env_get_file_size_nanos = other.env_get_file_size_nanos; - env_get_file_modification_time_nanos = - other.env_get_file_modification_time_nanos; - env_rename_file_nanos = other.env_rename_file_nanos; - env_link_file_nanos = other.env_link_file_nanos; - env_lock_file_nanos = other.env_lock_file_nanos; - env_unlock_file_nanos = other.env_unlock_file_nanos; - env_new_logger_nanos = other.env_new_logger_nanos; - get_cpu_nanos = other.get_cpu_nanos; - iter_next_cpu_nanos = other.iter_next_cpu_nanos; - iter_prev_cpu_nanos = other.iter_prev_cpu_nanos; - iter_seek_cpu_nanos = other.iter_seek_cpu_nanos; - if (per_level_perf_context_enabled && level_to_perf_context != nullptr) { - ClearPerLevelPerfContext(); - } - if (other.level_to_perf_context != nullptr) { - level_to_perf_context = new std::map(); - *level_to_perf_context = *other.level_to_perf_context; - } - per_level_perf_context_enabled = other.per_level_perf_context_enabled; -#endif -} - -PerfContext::PerfContext(PerfContext&& other) noexcept { -#ifdef NPERF_CONTEXT - (void)other; +#if defined(OS_SOLARIS) + return &perf_context_; #else - user_key_comparison_count = other.user_key_comparison_count; - block_cache_hit_count = other.block_cache_hit_count; - block_read_count = other.block_read_count; - block_read_byte = other.block_read_byte; - block_read_time = other.block_read_time; - block_cache_index_hit_count = other.block_cache_index_hit_count; - index_block_read_count = other.index_block_read_count; - block_cache_filter_hit_count = other.block_cache_filter_hit_count; - filter_block_read_count = other.filter_block_read_count; - compression_dict_block_read_count = other.compression_dict_block_read_count; - secondary_cache_hit_count = other.secondary_cache_hit_count; - block_checksum_time = other.block_checksum_time; - block_decompress_time = other.block_decompress_time; - get_read_bytes = other.get_read_bytes; - multiget_read_bytes = other.multiget_read_bytes; - iter_read_bytes = other.iter_read_bytes; - internal_key_skipped_count = other.internal_key_skipped_count; - internal_delete_skipped_count = other.internal_delete_skipped_count; - internal_recent_skipped_count = other.internal_recent_skipped_count; - internal_merge_count = other.internal_merge_count; - write_wal_time = other.write_wal_time; - get_snapshot_time = other.get_snapshot_time; - get_from_memtable_time = other.get_from_memtable_time; - get_from_memtable_count = other.get_from_memtable_count; - get_post_process_time = other.get_post_process_time; - get_from_output_files_time = other.get_from_output_files_time; - seek_on_memtable_time = other.seek_on_memtable_time; - seek_on_memtable_count = other.seek_on_memtable_count; - next_on_memtable_count = other.next_on_memtable_count; - prev_on_memtable_count = other.prev_on_memtable_count; - seek_child_seek_time = other.seek_child_seek_time; - seek_child_seek_count = other.seek_child_seek_count; - seek_min_heap_time = other.seek_min_heap_time; - seek_internal_seek_time = other.seek_internal_seek_time; - find_next_user_entry_time = other.find_next_user_entry_time; - write_pre_and_post_process_time = other.write_pre_and_post_process_time; - write_memtable_time = other.write_memtable_time; - write_delay_time = other.write_delay_time; - write_thread_wait_nanos = other.write_thread_wait_nanos; - write_scheduling_flushes_compactions_time = - other.write_scheduling_flushes_compactions_time; - db_mutex_lock_nanos = other.db_mutex_lock_nanos; - db_condition_wait_nanos = other.db_condition_wait_nanos; - merge_operator_time_nanos = other.merge_operator_time_nanos; - read_index_block_nanos = other.read_index_block_nanos; - read_filter_block_nanos = other.read_filter_block_nanos; - new_table_block_iter_nanos = other.new_table_block_iter_nanos; - new_table_iterator_nanos = other.new_table_iterator_nanos; - block_seek_nanos = other.block_seek_nanos; - find_table_nanos = other.find_table_nanos; - bloom_memtable_hit_count = other.bloom_memtable_hit_count; - bloom_memtable_miss_count = other.bloom_memtable_miss_count; - bloom_sst_hit_count = other.bloom_sst_hit_count; - bloom_sst_miss_count = other.bloom_sst_miss_count; - key_lock_wait_time = other.key_lock_wait_time; - key_lock_wait_count = other.key_lock_wait_count; - - env_new_sequential_file_nanos = other.env_new_sequential_file_nanos; - env_new_random_access_file_nanos = other.env_new_random_access_file_nanos; - env_new_writable_file_nanos = other.env_new_writable_file_nanos; - env_reuse_writable_file_nanos = other.env_reuse_writable_file_nanos; - env_new_random_rw_file_nanos = other.env_new_random_rw_file_nanos; - env_new_directory_nanos = other.env_new_directory_nanos; - env_file_exists_nanos = other.env_file_exists_nanos; - env_get_children_nanos = other.env_get_children_nanos; - env_get_children_file_attributes_nanos = - other.env_get_children_file_attributes_nanos; - env_delete_file_nanos = other.env_delete_file_nanos; - env_create_dir_nanos = other.env_create_dir_nanos; - env_create_dir_if_missing_nanos = other.env_create_dir_if_missing_nanos; - env_delete_dir_nanos = other.env_delete_dir_nanos; - env_get_file_size_nanos = other.env_get_file_size_nanos; - env_get_file_modification_time_nanos = - other.env_get_file_modification_time_nanos; - env_rename_file_nanos = other.env_rename_file_nanos; - env_link_file_nanos = other.env_link_file_nanos; - env_lock_file_nanos = other.env_lock_file_nanos; - env_unlock_file_nanos = other.env_unlock_file_nanos; - env_new_logger_nanos = other.env_new_logger_nanos; - get_cpu_nanos = other.get_cpu_nanos; - iter_next_cpu_nanos = other.iter_next_cpu_nanos; - iter_prev_cpu_nanos = other.iter_prev_cpu_nanos; - iter_seek_cpu_nanos = other.iter_seek_cpu_nanos; - if (per_level_perf_context_enabled && level_to_perf_context != nullptr) { - ClearPerLevelPerfContext(); - } - if (other.level_to_perf_context != nullptr) { - level_to_perf_context = other.level_to_perf_context; - other.level_to_perf_context = nullptr; - } - per_level_perf_context_enabled = other.per_level_perf_context_enabled; + return &perf_context; #endif -} - -// TODO(Zhongyi): reduce code duplication between copy constructor and -// assignment operator -PerfContext& PerfContext::operator=(const PerfContext& other) { -#ifdef NPERF_CONTEXT - (void)other; -#else - user_key_comparison_count = other.user_key_comparison_count; - block_cache_hit_count = other.block_cache_hit_count; - block_read_count = other.block_read_count; - block_read_byte = other.block_read_byte; - block_read_time = other.block_read_time; - block_cache_index_hit_count = other.block_cache_index_hit_count; - index_block_read_count = other.index_block_read_count; - block_cache_filter_hit_count = other.block_cache_filter_hit_count; - filter_block_read_count = other.filter_block_read_count; - compression_dict_block_read_count = other.compression_dict_block_read_count; - secondary_cache_hit_count = other.secondary_cache_hit_count; - block_checksum_time = other.block_checksum_time; - block_decompress_time = other.block_decompress_time; - get_read_bytes = other.get_read_bytes; - multiget_read_bytes = other.multiget_read_bytes; - iter_read_bytes = other.iter_read_bytes; - internal_key_skipped_count = other.internal_key_skipped_count; - internal_delete_skipped_count = other.internal_delete_skipped_count; - internal_recent_skipped_count = other.internal_recent_skipped_count; - internal_merge_count = other.internal_merge_count; - write_wal_time = other.write_wal_time; - get_snapshot_time = other.get_snapshot_time; - get_from_memtable_time = other.get_from_memtable_time; - get_from_memtable_count = other.get_from_memtable_count; - get_post_process_time = other.get_post_process_time; - get_from_output_files_time = other.get_from_output_files_time; - seek_on_memtable_time = other.seek_on_memtable_time; - seek_on_memtable_count = other.seek_on_memtable_count; - next_on_memtable_count = other.next_on_memtable_count; - prev_on_memtable_count = other.prev_on_memtable_count; - seek_child_seek_time = other.seek_child_seek_time; - seek_child_seek_count = other.seek_child_seek_count; - seek_min_heap_time = other.seek_min_heap_time; - seek_internal_seek_time = other.seek_internal_seek_time; - find_next_user_entry_time = other.find_next_user_entry_time; - write_pre_and_post_process_time = other.write_pre_and_post_process_time; - write_memtable_time = other.write_memtable_time; - write_delay_time = other.write_delay_time; - write_thread_wait_nanos = other.write_thread_wait_nanos; - write_scheduling_flushes_compactions_time = - other.write_scheduling_flushes_compactions_time; - db_mutex_lock_nanos = other.db_mutex_lock_nanos; - db_condition_wait_nanos = other.db_condition_wait_nanos; - merge_operator_time_nanos = other.merge_operator_time_nanos; - read_index_block_nanos = other.read_index_block_nanos; - read_filter_block_nanos = other.read_filter_block_nanos; - new_table_block_iter_nanos = other.new_table_block_iter_nanos; - new_table_iterator_nanos = other.new_table_iterator_nanos; - block_seek_nanos = other.block_seek_nanos; - find_table_nanos = other.find_table_nanos; - bloom_memtable_hit_count = other.bloom_memtable_hit_count; - bloom_memtable_miss_count = other.bloom_memtable_miss_count; - bloom_sst_hit_count = other.bloom_sst_hit_count; - bloom_sst_miss_count = other.bloom_sst_miss_count; - key_lock_wait_time = other.key_lock_wait_time; - key_lock_wait_count = other.key_lock_wait_count; - - env_new_sequential_file_nanos = other.env_new_sequential_file_nanos; - env_new_random_access_file_nanos = other.env_new_random_access_file_nanos; - env_new_writable_file_nanos = other.env_new_writable_file_nanos; - env_reuse_writable_file_nanos = other.env_reuse_writable_file_nanos; - env_new_random_rw_file_nanos = other.env_new_random_rw_file_nanos; - env_new_directory_nanos = other.env_new_directory_nanos; - env_file_exists_nanos = other.env_file_exists_nanos; - env_get_children_nanos = other.env_get_children_nanos; - env_get_children_file_attributes_nanos = - other.env_get_children_file_attributes_nanos; - env_delete_file_nanos = other.env_delete_file_nanos; - env_create_dir_nanos = other.env_create_dir_nanos; - env_create_dir_if_missing_nanos = other.env_create_dir_if_missing_nanos; - env_delete_dir_nanos = other.env_delete_dir_nanos; - env_get_file_size_nanos = other.env_get_file_size_nanos; - env_get_file_modification_time_nanos = - other.env_get_file_modification_time_nanos; - env_rename_file_nanos = other.env_rename_file_nanos; - env_link_file_nanos = other.env_link_file_nanos; - env_lock_file_nanos = other.env_lock_file_nanos; - env_unlock_file_nanos = other.env_unlock_file_nanos; - env_new_logger_nanos = other.env_new_logger_nanos; - get_cpu_nanos = other.get_cpu_nanos; - iter_next_cpu_nanos = other.iter_next_cpu_nanos; - iter_prev_cpu_nanos = other.iter_prev_cpu_nanos; - iter_seek_cpu_nanos = other.iter_seek_cpu_nanos; - if (per_level_perf_context_enabled && level_to_perf_context != nullptr) { - ClearPerLevelPerfContext(); - } - if (other.level_to_perf_context != nullptr) { - level_to_perf_context = new std::map(); - *level_to_perf_context = *other.level_to_perf_context; - } - per_level_perf_context_enabled = other.per_level_perf_context_enabled; #endif - return *this; } void PerfContext::Reset() { diff --git a/monitoring/perf_context_imp.h b/monitoring/perf_context_imp.h index d1804067c..072cf2ef1 100644 --- a/monitoring/perf_context_imp.h +++ b/monitoring/perf_context_imp.h @@ -16,7 +16,7 @@ extern PerfContext perf_context; extern __thread PerfContext perf_context_; #define perf_context (*get_perf_context()) #else -extern thread_local PerfContext perf_context; +extern __thread PerfContext perf_context; #endif #endif diff --git a/src.mk b/src.mk index 0b33b02e2..721e9d302 100644 --- a/src.mk +++ b/src.mk @@ -253,6 +253,7 @@ LIB_SOURCES = \ utilities/fault_injection_env.cc \ utilities/fault_injection_fs.cc \ utilities/fault_injection_secondary_cache.cc \ + utilities/flink/flink_compaction_filter.cc \ utilities/leveldb_options/leveldb_options.cc \ utilities/memory/memory_util.cc \ utilities/merge_operators.cc \ @@ -565,6 +566,7 @@ TEST_MAIN_SOURCES = \ utilities/cassandra/cassandra_serialize_test.cc \ utilities/checkpoint/checkpoint_test.cc \ utilities/env_timed_test.cc \ + utilities/flink/flink_compaction_filter_test.cc \ utilities/memory/memory_test.cc \ utilities/merge_operators/string_append/stringappend_test.cc \ utilities/object_registry_test.cc \ @@ -616,6 +618,7 @@ JNI_NATIVE_SOURCES = \ java/rocksjni/env_options.cc \ java/rocksjni/event_listener.cc \ java/rocksjni/event_listener_jnicallback.cc \ + java/rocksjni/flink_compactionfilterjni.cc \ java/rocksjni/ingest_external_file_options.cc \ java/rocksjni/filter.cc \ java/rocksjni/iterator.cc \ diff --git a/utilities/flink/flink_compaction_filter.cc b/utilities/flink/flink_compaction_filter.cc new file mode 100644 index 000000000..4cbdd7e7d --- /dev/null +++ b/utilities/flink/flink_compaction_filter.cc @@ -0,0 +1,206 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). + +#include "utilities/flink/flink_compaction_filter.h" + +#include +#include + +namespace ROCKSDB_NAMESPACE { +namespace flink { + +int64_t DeserializeTimestamp(const char* src, std::size_t offset) { + uint64_t result = 0; + for (unsigned long i = 0; i < sizeof(uint64_t); i++) { + result |= static_cast(static_cast(src[offset + i])) + << ((sizeof(int64_t) - 1 - i) * BITS_PER_BYTE); + } + return static_cast(result); +} + +CompactionFilter::Decision Decide(const char* ts_bytes, const int64_t ttl, + const std::size_t timestamp_offset, + const int64_t current_timestamp, + const std::shared_ptr& logger) { + int64_t timestamp = DeserializeTimestamp(ts_bytes, timestamp_offset); + const int64_t ttlWithoutOverflow = + timestamp > 0 ? std::min(JAVA_MAX_LONG - timestamp, ttl) : ttl; + Debug(logger.get(), + "Last access timestamp: %" PRId64 " ms, ttlWithoutOverflow: %" PRId64 + " ms, Current timestamp: %" PRId64 " ms", + timestamp, ttlWithoutOverflow, current_timestamp); + return timestamp + ttlWithoutOverflow <= current_timestamp + ? CompactionFilter::Decision::kRemove + : CompactionFilter::Decision::kKeep; +} + +FlinkCompactionFilter::ConfigHolder::ConfigHolder() + : config_(const_cast(&DISABLED_CONFIG)){}; + +FlinkCompactionFilter::ConfigHolder::~ConfigHolder() { + Config* config = config_.load(); + if (config != &DISABLED_CONFIG) { + delete config; + } +} + +// at the moment Flink configures filters (can be already created) only once +// when user creates state otherwise it can lead to ListElementFilter leak in +// Config or race between its delete in Configure() and usage in FilterV2() the +// method returns true if it was configured before +bool FlinkCompactionFilter::ConfigHolder::Configure(Config* config) { + bool not_configured = GetConfig() == &DISABLED_CONFIG; + if (not_configured) { + assert(config->query_time_after_num_entries_ >= 0); + config_ = config; + } + return not_configured; +} + +FlinkCompactionFilter::Config* +FlinkCompactionFilter::ConfigHolder::GetConfig() { + return config_.load(); +} + +std::size_t FlinkCompactionFilter::FixedListElementFilter::NextUnexpiredOffset( + const Slice& list, int64_t ttl, int64_t current_timestamp) const { + std::size_t offset = 0; + while (offset < list.size()) { + Decision decision = Decide(list.data(), ttl, offset + timestamp_offset_, + current_timestamp, logger_); + if (decision != Decision::kKeep) { + std::size_t new_offset = offset + fixed_size_; + if (new_offset >= JAVA_MAX_SIZE || new_offset < offset) { + return JAVA_MAX_SIZE; + } + offset = new_offset; + } else { + break; + } + } + return offset; +} + +const char* FlinkCompactionFilter::Name() const { + return "FlinkCompactionFilter"; +} + +FlinkCompactionFilter::FlinkCompactionFilter( + std::shared_ptr config_holder, + std::unique_ptr time_provider) + : FlinkCompactionFilter(std::move(config_holder), std::move(time_provider), + nullptr){}; + +FlinkCompactionFilter::FlinkCompactionFilter( + std::shared_ptr config_holder, + std::unique_ptr time_provider, std::shared_ptr logger) + : config_holder_(std::move(config_holder)), + time_provider_(std::move(time_provider)), + logger_(std::move(logger)), + config_cached_(const_cast(&DISABLED_CONFIG)){}; + +inline void FlinkCompactionFilter::InitConfigIfNotYet() const { + const_cast(this)->config_cached_ = + config_cached_ == &DISABLED_CONFIG ? config_holder_->GetConfig() + : config_cached_; +} + +CompactionFilter::Decision FlinkCompactionFilter::FilterV2( + int /*level*/, const Slice& key, ValueType value_type, + const Slice& existing_value, std::string* new_value, + std::string* /*skip_until*/) const { + InitConfigIfNotYet(); + CreateListElementFilterIfNull(); + UpdateCurrentTimestampIfStale(); + + const char* data = existing_value.data(); + + Debug(logger_.get(), + "Call FlinkCompactionFilter::FilterV2 - Key: %s, Data: %s, Value type: " + "%d, " + "State type: %d, TTL: %" PRId64 " ms, timestamp_offset: %zu", + key.ToString().c_str(), existing_value.ToString(true).c_str(), + value_type, config_cached_->state_type_, config_cached_->ttl_, + config_cached_->timestamp_offset_); + + // too short value to have timestamp at all + const bool tooShortValue = + existing_value.size() < + config_cached_->timestamp_offset_ + TIMESTAMP_BYTE_SIZE; + + const StateType state_type = config_cached_->state_type_; + const bool value_or_merge = + value_type == ValueType::kValue || value_type == ValueType::kMergeOperand; + const bool value_state = + state_type == StateType::Value && value_type == ValueType::kValue; + const bool list_entry = state_type == StateType::List && value_or_merge; + const bool toDecide = value_state || list_entry; + const bool list_filter = list_entry && list_element_filter_; + + Decision decision = Decision::kKeep; + if (!tooShortValue && toDecide) { + decision = list_filter ? ListDecide(existing_value, new_value) + : Decide(data, config_cached_->ttl_, + config_cached_->timestamp_offset_, + current_timestamp_, logger_); + } + Debug(logger_.get(), "Decision: %d", static_cast(decision)); + return decision; +} + +CompactionFilter::Decision FlinkCompactionFilter::ListDecide( + const Slice& existing_value, std::string* new_value) const { + std::size_t offset = 0; + if (offset < existing_value.size()) { + Decision decision = Decide(existing_value.data(), config_cached_->ttl_, + offset + config_cached_->timestamp_offset_, + current_timestamp_, logger_); + if (decision != Decision::kKeep) { + offset = + ListNextUnexpiredOffset(existing_value, offset, config_cached_->ttl_); + if (offset >= JAVA_MAX_SIZE) { + return Decision::kKeep; + } + } + } + if (offset >= existing_value.size()) { + return Decision::kRemove; + } else if (offset > 0) { + SetUnexpiredListValue(existing_value, offset, new_value); + return Decision::kChangeValue; + } + return Decision::kKeep; +} + +std::size_t FlinkCompactionFilter::ListNextUnexpiredOffset( + const Slice& existing_value, size_t offset, int64_t ttl) const { + std::size_t new_offset = list_element_filter_->NextUnexpiredOffset( + existing_value, ttl, current_timestamp_); + if (new_offset >= JAVA_MAX_SIZE || new_offset < offset) { + Error(logger_.get(), "Wrong next offset in list filter: %zu -> %zu", offset, + new_offset); + new_offset = JAVA_MAX_SIZE; + } else { + Debug(logger_.get(), "Next unexpired offset: %zu -> %zu", offset, + new_offset); + } + return new_offset; +} + +void FlinkCompactionFilter::SetUnexpiredListValue( + const Slice& existing_value, std::size_t offset, + std::string* new_value) const { + new_value->clear(); + auto new_value_char = existing_value.data() + offset; + auto new_value_size = existing_value.size() - offset; + new_value->assign(new_value_char, new_value_size); + Logger* logger = logger_.get(); + if (logger && logger->GetInfoLogLevel() <= InfoLogLevel::DEBUG_LEVEL) { + Slice new_value_slice = Slice(new_value_char, new_value_size); + Debug(logger, "New list value: %s", new_value_slice.ToString(true).c_str()); + } +} +} // namespace flink +} // namespace ROCKSDB_NAMESPACE diff --git a/utilities/flink/flink_compaction_filter.h b/utilities/flink/flink_compaction_filter.h new file mode 100644 index 000000000..3b3b651ea --- /dev/null +++ b/utilities/flink/flink_compaction_filter.h @@ -0,0 +1,191 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). + +#pragma once +#include + +#include +#include +#include +#include +#include + +#include "rocksdb/compaction_filter.h" +#include "rocksdb/slice.h" + +namespace ROCKSDB_NAMESPACE { +namespace flink { + +static const std::size_t BITS_PER_BYTE = static_cast(8); +static const std::size_t TIMESTAMP_BYTE_SIZE = static_cast(8); +static const int64_t JAVA_MIN_LONG = static_cast(0x8000000000000000); +static const int64_t JAVA_MAX_LONG = static_cast(0x7fffffffffffffff); +static const std::size_t JAVA_MAX_SIZE = static_cast(0x7fffffff); + +/** + * Compaction filter for removing expired Flink state entries with ttl. + * + * Note: this compaction filter is a special implementation, designed for usage + * only in Apache Flink project. + */ +class FlinkCompactionFilter : public CompactionFilter { + public: + enum StateType { + // WARNING!!! Do not change the order of enum entries as it is important for + // jni translation + Disabled, + Value, + List + }; + + // Provides current timestamp to check expiration, it must thread safe. + class TimeProvider { + public: + virtual ~TimeProvider() = default; + virtual int64_t CurrentTimestamp() const = 0; + }; + + // accepts serialized list state and checks elements for expiration starting + // from the head stops upon discovery of unexpired element and returns its + // offset or returns offset greater or equal to list byte length. + class ListElementFilter { + public: + virtual ~ListElementFilter() = default; + virtual std::size_t NextUnexpiredOffset( + const Slice& list, int64_t ttl, int64_t current_timestamp) const = 0; + }; + + // this filter can operate directly on list state bytes + // because the byte length of list element and last acess timestamp position + // are known. + class FixedListElementFilter : public ListElementFilter { + public: + explicit FixedListElementFilter(std::size_t fixed_size, + std::size_t timestamp_offset, + std::shared_ptr logger) + : fixed_size_(fixed_size), + timestamp_offset_(timestamp_offset), + logger_(std::move(logger)) {} + std::size_t NextUnexpiredOffset(const Slice& list, int64_t ttl, + int64_t current_timestamp) const override; + + private: + std::size_t fixed_size_; + std::size_t timestamp_offset_; + std::shared_ptr logger_; + }; + + // Factory is needed to create one filter per filter/thread + // and avoid concurrent access to the filter state + class ListElementFilterFactory { + public: + virtual ~ListElementFilterFactory() = default; + virtual ListElementFilter* CreateListElementFilter( + std::shared_ptr logger) const = 0; + }; + + class FixedListElementFilterFactory : public ListElementFilterFactory { + public: + explicit FixedListElementFilterFactory(std::size_t fixed_size, + std::size_t timestamp_offset) + : fixed_size_(fixed_size), timestamp_offset_(timestamp_offset) {} + FixedListElementFilter* CreateListElementFilter( + std::shared_ptr logger) const override { + return new FixedListElementFilter(fixed_size_, timestamp_offset_, logger); + }; + + private: + std::size_t fixed_size_; + std::size_t timestamp_offset_; + }; + + struct Config { + StateType state_type_; + std::size_t timestamp_offset_; + int64_t ttl_; + // Number of state entries to process by compaction filter before updating + // current timestamp. + int64_t query_time_after_num_entries_; + std::unique_ptr list_element_filter_factory_; + }; + + // Allows to configure at once all FlinkCompactionFilters created by the + // factory. The ConfigHolder holds the shared Config. + class ConfigHolder { + public: + explicit ConfigHolder(); + ~ConfigHolder(); + bool Configure(Config* config); + Config* GetConfig(); + + private: + std::atomic config_; + }; + + explicit FlinkCompactionFilter(std::shared_ptr config_holder, + std::unique_ptr time_provider); + + explicit FlinkCompactionFilter(std::shared_ptr config_holder, + std::unique_ptr time_provider, + std::shared_ptr logger); + + const char* Name() const override; + Decision FilterV2(int level, const Slice& key, ValueType value_type, + const Slice& existing_value, std::string* new_value, + std::string* skip_until) const override; + + bool IgnoreSnapshots() const override { return true; } + + private: + inline void InitConfigIfNotYet() const; + + Decision ListDecide(const Slice& existing_value, + std::string* new_value) const; + + inline std::size_t ListNextUnexpiredOffset(const Slice& existing_value, + std::size_t offset, + int64_t ttl) const; + + inline void SetUnexpiredListValue(const Slice& existing_value, + std::size_t offset, + std::string* new_value) const; + + inline void CreateListElementFilterIfNull() const { + if (!list_element_filter_ && config_cached_->list_element_filter_factory_) { + const_cast(this)->list_element_filter_ = + std::unique_ptr( + config_cached_->list_element_filter_factory_ + ->CreateListElementFilter(logger_)); + } + } + + inline void UpdateCurrentTimestampIfStale() const { + bool is_stale = + record_counter_ >= config_cached_->query_time_after_num_entries_; + if (is_stale) { + const_cast(this)->record_counter_ = 0; + const_cast(this)->current_timestamp_ = + time_provider_->CurrentTimestamp(); + } + const_cast(this)->record_counter_ = + record_counter_ + 1; + } + + std::shared_ptr config_holder_; + std::unique_ptr time_provider_; + std::shared_ptr logger_; + Config* config_cached_; + std::unique_ptr list_element_filter_; + int64_t current_timestamp_ = std::numeric_limits::max(); + int64_t record_counter_ = std::numeric_limits::max(); +}; + +static const FlinkCompactionFilter::Config DISABLED_CONFIG = + FlinkCompactionFilter::Config{FlinkCompactionFilter::StateType::Disabled, 0, + std::numeric_limits::max(), + std::numeric_limits::max(), nullptr}; + +} // namespace flink +} // namespace ROCKSDB_NAMESPACE diff --git a/utilities/flink/flink_compaction_filter_test.cc b/utilities/flink/flink_compaction_filter_test.cc new file mode 100644 index 000000000..26613ae68 --- /dev/null +++ b/utilities/flink/flink_compaction_filter_test.cc @@ -0,0 +1,226 @@ +// Copyright (c) 2017-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). + +#include "utilities/flink/flink_compaction_filter.h" + +#include + +#include "test_util/testharness.h" + +namespace ROCKSDB_NAMESPACE { +namespace flink { + +#define DISABLED FlinkCompactionFilter::StateType::Disabled +#define VALUE FlinkCompactionFilter::StateType::Value +#define LIST FlinkCompactionFilter::StateType::List + +#define KVALUE CompactionFilter::ValueType::kValue +#define KMERGE CompactionFilter::ValueType::kMergeOperand +#define KBLOB CompactionFilter::ValueType::kBlobIndex + +#define KKEEP CompactionFilter::Decision::kKeep +#define KREMOVE CompactionFilter::Decision::kRemove +#define KCHANGE CompactionFilter::Decision::kChangeValue + +#define EXPIRE (time += ttl + 20) + +#define EXPECT_ARR_EQ(arr1, arr2, num) \ + EXPECT_TRUE(0 == memcmp(arr1, arr2, num)); + +static const std::size_t TEST_TIMESTAMP_OFFSET = static_cast(2); + +static const std::size_t LIST_ELEM_FIXED_LEN = static_cast(8 + 4); + +static const int64_t QUERY_TIME_AFTER_NUM_ENTRIES = static_cast(10); + +class ConsoleLogger : public Logger { + public: + using Logger::Logv; + ConsoleLogger() : Logger(InfoLogLevel::DEBUG_LEVEL) {} + + void Logv(const char* format, va_list ap) override { + vprintf(format, ap); + printf("\n"); + } +}; + +int64_t time = 0; + +class TestTimeProvider : public FlinkCompactionFilter::TimeProvider { + public: + int64_t CurrentTimestamp() const override { return time; } +}; + +std::random_device rd; // NOLINT +std::mt19937 mt(rd()); // NOLINT +std::uniform_int_distribution rnd(JAVA_MIN_LONG, + JAVA_MAX_LONG); // NOLINT + +int64_t ttl = 100; + +Slice key = Slice("key"); // NOLINT +char data[24]; +std::string new_list = ""; // NOLINT +std::string stub = ""; // NOLINT + +FlinkCompactionFilter::StateType state_type; +CompactionFilter::ValueType value_type; +FlinkCompactionFilter* filter; // NOLINT + +void SetTimestamp(int64_t timestamp, size_t offset = 0, char* value = data) { + for (unsigned long i = 0; i < sizeof(uint64_t); i++) { + value[offset + i] = + static_cast(static_cast(timestamp) >> + ((sizeof(int64_t) - 1 - i) * BITS_PER_BYTE)); + } +} + +CompactionFilter::Decision decide(size_t data_size = sizeof(data)) { + return filter->FilterV2(0, key, value_type, Slice(data, data_size), &new_list, + &stub); +} + +void Init( + FlinkCompactionFilter::StateType stype, CompactionFilter::ValueType vtype, + FlinkCompactionFilter::ListElementFilterFactory* fixed_len_filter_factory, + size_t timestamp_offset, bool expired = false) { + time = expired ? time + ttl + 20 : time; + state_type = stype; + value_type = vtype; + + auto config_holder = std::make_shared(); + auto time_provider = new TestTimeProvider(); + auto logger = std::make_shared(); + + filter = new FlinkCompactionFilter( + config_holder, + std::unique_ptr(time_provider), + logger); + auto config = new FlinkCompactionFilter::Config{ + state_type, timestamp_offset, ttl, QUERY_TIME_AFTER_NUM_ENTRIES, + std::unique_ptr( + fixed_len_filter_factory)}; + EXPECT_EQ(decide(), KKEEP); // test disabled config + EXPECT_TRUE(config_holder->Configure(config)); + EXPECT_FALSE(config_holder->Configure(config)); +} + +void InitValue(FlinkCompactionFilter::StateType stype, + CompactionFilter::ValueType vtype, bool expired = false, + size_t timestamp_offset = TEST_TIMESTAMP_OFFSET) { + time = rnd(mt); + SetTimestamp(time, timestamp_offset); + Init(stype, vtype, nullptr, timestamp_offset, expired); +} + +void InitList(CompactionFilter::ValueType vtype, bool all_expired = false, + bool first_elem_expired = false, size_t timestamp_offset = 0) { + time = rnd(mt); + SetTimestamp(first_elem_expired ? time - ttl - 20 : time, + timestamp_offset); // elem 1 ts + SetTimestamp(time, LIST_ELEM_FIXED_LEN + timestamp_offset); // elem 2 ts + auto fixed_len_filter_factory = + new FlinkCompactionFilter::FixedListElementFilterFactory( + LIST_ELEM_FIXED_LEN, static_cast(0)); + Init(LIST, vtype, fixed_len_filter_factory, timestamp_offset, all_expired); +} + +void Deinit() { delete filter; } + +TEST(FlinkStateTtlTest, CheckStateTypeEnumOrder) { // NOLINT + // if the order changes it also needs to be adjusted in Java client: + // in org.rocksdb.FlinkCompactionFilter + // and in org.rocksdb.FlinkCompactionFilterTest + EXPECT_EQ(DISABLED, 0); + EXPECT_EQ(VALUE, 1); + EXPECT_EQ(LIST, 2); +} + +TEST(FlinkStateTtlTest, SkipShortDataWithoutTimestamp) { // NOLINT + InitValue(VALUE, KVALUE, true); + EXPECT_EQ(decide(TIMESTAMP_BYTE_SIZE - 1), KKEEP); + Deinit(); +} + +TEST(FlinkValueStateTtlTest, Unexpired) { // NOLINT + InitValue(VALUE, KVALUE); + EXPECT_EQ(decide(), KKEEP); + Deinit(); +} + +TEST(FlinkValueStateTtlTest, Expired) { // NOLINT + InitValue(VALUE, KVALUE, true); + EXPECT_EQ(decide(), KREMOVE); + Deinit(); +} + +TEST(FlinkValueStateTtlTest, CachedTimeUpdate) { // NOLINT + InitValue(VALUE, KVALUE); + EXPECT_EQ(decide(), KKEEP); // also implicitly cache current timestamp + EXPIRE; // advance current timestamp to expire but cached should be used + // QUERY_TIME_AFTER_NUM_ENTRIES - 2: + // -1 -> for decide disabled in InitValue + // and -1 -> for decide right after InitValue + for (int64_t i = 0; i < QUERY_TIME_AFTER_NUM_ENTRIES - 2; i++) { + EXPECT_EQ(decide(), KKEEP); + } + EXPECT_EQ(decide(), KREMOVE); // advanced current timestamp should be updated + // in cache and expire state + Deinit(); +} + +TEST(FlinkValueStateTtlTest, WrongFilterValueType) { // NOLINT + InitValue(VALUE, KMERGE, true); + EXPECT_EQ(decide(), KKEEP); + Deinit(); +} + +TEST(FlinkListStateTtlTest, Unexpired) { // NOLINT + InitList(KMERGE); + EXPECT_EQ(decide(), KKEEP); + Deinit(); + + InitList(KVALUE); + EXPECT_EQ(decide(), KKEEP); + Deinit(); +} + +TEST(FlinkListStateTtlTest, Expired) { // NOLINT + InitList(KMERGE, true); + EXPECT_EQ(decide(), KREMOVE); + Deinit(); + + InitList(KVALUE, true); + EXPECT_EQ(decide(), KREMOVE); + Deinit(); +} + +TEST(FlinkListStateTtlTest, HalfExpired) { // NOLINT + InitList(KMERGE, false, true); + EXPECT_EQ(decide(), KCHANGE); + EXPECT_ARR_EQ(new_list.data(), data + LIST_ELEM_FIXED_LEN, + LIST_ELEM_FIXED_LEN); + Deinit(); + + InitList(KVALUE, false, true); + EXPECT_EQ(decide(), KCHANGE); + EXPECT_ARR_EQ(new_list.data(), data + LIST_ELEM_FIXED_LEN, + LIST_ELEM_FIXED_LEN); + Deinit(); +} + +TEST(FlinkListStateTtlTest, WrongFilterValueType) { // NOLINT + InitList(KBLOB, true); + EXPECT_EQ(decide(), KKEEP); + Deinit(); +} + +} // namespace flink +} // namespace ROCKSDB_NAMESPACE + +int main(int argc, char** argv) { + ::testing::InitGoogleTest(&argc, argv); + return RUN_ALL_TESTS(); +}