diff --git a/.asf.yaml b/.asf.yaml index 9edce0b5eb2..94c18f0c581 100644 --- a/.asf.yaml +++ b/.asf.yaml @@ -13,7 +13,7 @@ # See the License for the specific language governing permissions and # limitations under the License. github: - description: "Scalable, redundant, and distributed object store for Apache Hadoop" + description: "Scalable, reliable, distributed storage system optimized for data analytics and object store workloads." homepage: https://ozone.apache.org labels: - hadoop diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 42ef94a0b36..4c6723daff8 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -28,8 +28,14 @@ on: required: false env: FAIL_FAST: ${{ github.event_name == 'pull_request' }} + # Minimum required Java version for running Ozone is defined in pom.xml (javac.version). + TEST_JAVA_VERSION: 21 # JDK version used by CI build and tests; should match the JDK version in apache/ozone-runner image + MAVEN_ARGS: --batch-mode --settings ${{ github.workspace }}/dev-support/ci/maven-settings.xml --show-version MAVEN_OPTS: -Dhttp.keepAlive=false -Dmaven.wagon.http.pool=false -Dmaven.wagon.http.retryHandler.class=standard -Dmaven.wagon.http.retryHandler.count=3 - OZONE_WITH_COVERAGE: ${{ github.repository == 'apache/ozone' && github.event_name == 'push' }} + HADOOP_IMAGE: ghcr.io/apache/hadoop + OZONE_IMAGE: ghcr.io/apache/ozone + OZONE_RUNNER_IMAGE: ghcr.io/apache/ozone-runner + OZONE_WITH_COVERAGE: ${{ github.event_name == 'push' }} jobs: build-info: runs-on: ubuntu-20.04 @@ -102,10 +108,6 @@ jobs: runs-on: ubuntu-20.04 timeout-minutes: 60 if: needs.build-info.outputs.needs-build == 'true' - strategy: - matrix: - java: [ 8 ] - fail-fast: false steps: - name: Checkout project uses: actions/checkout@v4 @@ -136,11 +138,11 @@ jobs: name: ratis-jars path: | ~/.m2/repository/org/apache/ratis - - name: Setup java + - name: Setup java ${{ env.TEST_JAVA_VERSION }} uses: actions/setup-java@v4 with: distribution: 'temurin' - java-version: ${{ matrix.java }} + java-version: ${{ env.TEST_JAVA_VERSION }} - name: Run a full build run: hadoop-ozone/dev-support/checks/build.sh -Pdist -Psrc -Dmaven.javadoc.skip=true ${{ inputs.ratis_args }} env: @@ -155,6 +157,7 @@ jobs: retention-days: 1 - name: Store source tarball for compilation uses: actions/upload-artifact@v4 + if: needs.build-info.outputs.needs-compile == 'true' with: name: ozone-src path: hadoop-ozone/dist/target/ozone-*-src.tar.gz @@ -171,6 +174,8 @@ jobs: - build-info - build - basic + - dependency + - license timeout-minutes: 45 if: needs.build-info.outputs.needs-compile == 'true' strategy: @@ -179,7 +184,7 @@ jobs: include: - os: ubuntu-20.04 - java: 8 - os: macos-12 + os: macos-13 fail-fast: false runs-on: ${{ matrix.os }} steps: @@ -212,13 +217,13 @@ jobs: name: ratis-jars path: | ~/.m2/repository/org/apache/ratis - - name: Setup java + - name: Setup java ${{ matrix.java }} uses: actions/setup-java@v4 with: distribution: 'temurin' java-version: ${{ matrix.java }} - name: Compile Ozone using Java ${{ matrix.java }} - run: hadoop-ozone/dev-support/checks/build.sh -Pdist -Dskip.npx -Dskip.installnpx -Dmaven.javadoc.failOnWarnings=${{ matrix.java != 8 }} -Djavac.version=${{ matrix.java }} ${{ inputs.ratis_args }} + run: hadoop-ozone/dev-support/checks/build.sh -Pdist -DskipRecon -Dmaven.javadoc.failOnWarnings=${{ matrix.java != 8 }} -Djavac.version=${{ matrix.java }} ${{ inputs.ratis_args }} env: OZONE_WITH_COVERAGE: false DEVELOCITY_ACCESS_KEY: ${{ secrets.GE_ACCESS_TOKEN }} @@ -253,7 +258,7 @@ jobs: key: maven-repo-${{ hashFiles('**/pom.xml') }} restore-keys: | maven-repo- - if: ${{ !contains('author,bats,docs', matrix.check) }} + if: ${{ !contains('author,bats', matrix.check) }} - name: Download Ratis repo if: ${{ inputs.ratis_args != '' }} uses: actions/download-artifact@v4 @@ -261,19 +266,18 @@ jobs: name: ratis-jars path: | ~/.m2/repository/org/apache/ratis - - name: Setup java + - name: Setup java 8 uses: actions/setup-java@v4 with: distribution: 'temurin' java-version: 8 - name: Execute tests run: hadoop-ozone/dev-support/checks/${{ matrix.check }}.sh ${{ inputs.ratis_args }} - continue-on-error: true env: DEVELOCITY_ACCESS_KEY: ${{ secrets.GE_ACCESS_TOKEN }} - name: Summary of failures run: hadoop-ozone/dev-support/checks/_summary.sh target/${{ matrix.check }}/summary.txt - if: ${{ !cancelled() }} + if: ${{ failure() }} - name: Archive build results uses: actions/upload-artifact@v4 if: ${{ !cancelled() }} @@ -309,19 +313,18 @@ jobs: name: ratis-jars path: | ~/.m2/repository/org/apache/ratis - - name: Setup java + - name: Setup java ${{ env.TEST_JAVA_VERSION }} uses: actions/setup-java@v4 with: distribution: 'temurin' - java-version: 8 + java-version: ${{ env.TEST_JAVA_VERSION }} - name: Execute tests run: hadoop-ozone/dev-support/checks/${{ github.job }}.sh ${{ inputs.ratis_args }} - continue-on-error: true env: DEVELOCITY_ACCESS_KEY: ${{ secrets.GE_ACCESS_TOKEN }} - name: Summary of failures run: hadoop-ozone/dev-support/checks/_summary.sh target/${{ github.job }}/summary.txt - if: ${{ !cancelled() }} + if: ${{ failure() }} - name: Archive build results uses: actions/upload-artifact@v4 if: ${{ !cancelled() }} @@ -341,6 +344,15 @@ jobs: uses: actions/checkout@v4 with: ref: ${{ needs.build-info.outputs.sha }} + - name: Cache for maven dependencies + uses: actions/cache/restore@v4 + with: + path: | + ~/.m2/repository/*/*/* + !~/.m2/repository/org/apache/ozone + key: maven-repo-${{ hashFiles('**/pom.xml') }} + restore-keys: | + maven-repo- - name: Download compiled Ozone binaries uses: actions/download-artifact@v4 with: @@ -388,13 +400,76 @@ jobs: name: ozone-repo path: | ~/.m2/repository/org/apache/ozone + - name: Setup java ${{ env.TEST_JAVA_VERSION }} + uses: actions/setup-java@v4 + with: + distribution: 'temurin' + java-version: ${{ env.TEST_JAVA_VERSION }} - name: Execute tests run: | hadoop-ozone/dev-support/checks/${{ github.job }}.sh + - name: Summary of failures + run: hadoop-ozone/dev-support/checks/_summary.sh target/${{ github.job }}/summary.txt + if: ${{ failure() }} + - name: Archive build results + uses: actions/upload-artifact@v4 + if: always() + with: + name: ${{ github.job }} + path: target/${{ github.job }} continue-on-error: true + repro: + needs: + - build-info + - build + runs-on: ubuntu-20.04 + timeout-minutes: 30 + steps: + - name: Checkout project + uses: actions/checkout@v4 + - name: Cache for maven dependencies + uses: actions/cache/restore@v4 + with: + path: | + ~/.m2/repository/*/*/* + !~/.m2/repository/org/apache/ozone + key: maven-repo-${{ hashFiles('**/pom.xml') }} + restore-keys: | + maven-repo- + - name: Download Ozone repo + id: download-ozone-repo + uses: actions/download-artifact@v4 + with: + name: ozone-repo + path: | + ~/.m2/repository/org/apache/ozone + - name: Download Ratis repo + if: ${{ inputs.ratis_args != '' }} + uses: actions/download-artifact@v4 + with: + name: ratis-jars + path: | + ~/.m2/repository/org/apache/ratis + - name: Setup java ${{ env.TEST_JAVA_VERSION }} + uses: actions/setup-java@v4 + with: + distribution: 'temurin' + java-version: ${{ env.TEST_JAVA_VERSION }} + - name: Execute tests + run: | + hadoop-ozone/dev-support/checks/${{ github.job }}.sh -Pdist -Psrc -Dmaven.javadoc.skip=true ${{ inputs.ratis_args }} - name: Summary of failures run: hadoop-ozone/dev-support/checks/_summary.sh target/${{ github.job }}/summary.txt - if: ${{ !cancelled() }} + if: ${{ failure() }} + - name: Install diffoscope + run: | + sudo apt update -q + sudo apt install -y diffoscope + if: ${{ failure() }} + - name: Check artifact differences + run: | + hadoop-ozone/dev-support/checks/_diffoscope.sh + if: ${{ failure() }} - name: Archive build results uses: actions/upload-artifact@v4 if: always() @@ -407,6 +482,8 @@ jobs: - build-info - build - basic + - dependency + - license runs-on: ubuntu-20.04 timeout-minutes: 150 if: needs.build-info.outputs.needs-compose-tests == 'true' @@ -419,6 +496,15 @@ jobs: uses: actions/checkout@v4 with: ref: ${{ needs.build-info.outputs.sha }} + - name: Cache for maven dependencies + uses: actions/cache/restore@v4 + with: + path: | + ~/.m2/repository/*/*/* + !~/.m2/repository/org/apache/ozone + key: maven-repo-${{ hashFiles('**/pom.xml') }} + restore-keys: | + maven-repo- - name: Download compiled Ozone binaries uses: actions/download-artifact@v4 with: @@ -438,10 +524,9 @@ jobs: KEEP_IMAGE: false OZONE_ACCEPTANCE_SUITE: ${{ matrix.suite }} OZONE_VOLUME_OWNER: 1000 - continue-on-error: true - name: Summary of failures run: hadoop-ozone/dev-support/checks/_summary.sh target/${{ github.job }}/summary.txt - if: ${{ !cancelled() }} + if: ${{ failure() }} - name: Archive build results uses: actions/upload-artifact@v4 if: always() @@ -454,6 +539,8 @@ jobs: - build-info - build - basic + - dependency + - license runs-on: ubuntu-20.04 timeout-minutes: 60 if: needs.build-info.outputs.needs-kubernetes-tests == 'true' @@ -462,6 +549,15 @@ jobs: uses: actions/checkout@v4 with: ref: ${{ needs.build-info.outputs.sha }} + - name: Cache for maven dependencies + uses: actions/cache/restore@v4 + with: + path: | + ~/.m2/repository/*/*/* + !~/.m2/repository/org/apache/ozone + key: maven-repo-${{ hashFiles('**/pom.xml') }} + restore-keys: | + maven-repo- - name: Download compiled Ozone binaries uses: actions/download-artifact@v4 with: @@ -476,10 +572,9 @@ jobs: sudo mkdir .aws && sudo chmod 777 .aws && sudo chown 1000 .aws popd ./hadoop-ozone/dev-support/checks/kubernetes.sh - continue-on-error: true - name: Summary of failures run: hadoop-ozone/dev-support/checks/_summary.sh target/${{ github.job }}/summary.txt - if: ${{ !cancelled() }} + if: ${{ failure() }} - name: Archive build results uses: actions/upload-artifact@v4 if: always() @@ -529,25 +624,31 @@ jobs: name: ratis-jars path: | ~/.m2/repository/org/apache/ratis - - name: Setup java + - name: Setup java ${{ env.TEST_JAVA_VERSION }} uses: actions/setup-java@v4 with: distribution: 'temurin' - java-version: 17 + java-version: ${{ env.TEST_JAVA_VERSION }} - name: Execute tests - continue-on-error: true run: | args="${{ inputs.ratis_args }}" if [[ "${{ matrix.profile }}" == "flaky" ]]; then args="$args -Dsurefire.rerunFailingTestsCount=5 -Dsurefire.fork.timeout=3600" fi + if [[ "${{ matrix.profile }}" != "filesystem" ]]; then + args="$args -DskipShade" + fi hadoop-ozone/dev-support/checks/integration.sh -P${{ matrix.profile }} ${args} env: DEVELOCITY_ACCESS_KEY: ${{ secrets.GE_ACCESS_TOKEN }} - name: Summary of failures - run: hadoop-ozone/dev-support/checks/_summary.sh target/${{ github.job }}/summary.txt - if: ${{ !cancelled() }} + run: | + if [[ -s "target/${{ github.job }}/summary.md" ]]; then + cat target/${{ github.job }}/summary.md >> $GITHUB_STEP_SUMMARY + fi + hadoop-ozone/dev-support/checks/_summary.sh target/${{ github.job }}/summary.txt + if: ${{ failure() }} - name: Archive build results uses: actions/upload-artifact@v4 if: always() @@ -558,7 +659,7 @@ jobs: coverage: runs-on: ubuntu-20.04 timeout-minutes: 30 - if: github.repository == 'apache/ozone' && github.event_name == 'push' + if: github.event_name == 'push' needs: - build-info - acceptance @@ -587,15 +688,16 @@ jobs: run: | mkdir -p hadoop-ozone/dist/target tar xzvf target/artifacts/ozone-bin/ozone*.tar.gz -C hadoop-ozone/dist/target - - name: Calculate combined coverage - run: ./hadoop-ozone/dev-support/checks/coverage.sh - - name: Setup java 17 + - name: Setup java ${{ env.TEST_JAVA_VERSION }} uses: actions/setup-java@v4 with: distribution: 'temurin' - java-version: 17 + java-version: ${{ env.TEST_JAVA_VERSION }} + - name: Calculate combined coverage + run: ./hadoop-ozone/dev-support/checks/coverage.sh - name: Upload coverage to Sonar run: ./hadoop-ozone/dev-support/checks/sonar.sh + if: github.repository == 'apache/ozone' env: SONAR_TOKEN: ${{ secrets.SONARCLOUD_TOKEN }} GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} diff --git a/.github/workflows/intermittent-test-check.yml b/.github/workflows/intermittent-test-check.yml index 5de5654aced..cb765f36217 100644 --- a/.github/workflows/intermittent-test-check.yml +++ b/.github/workflows/intermittent-test-check.yml @@ -115,7 +115,7 @@ jobs: java-version: 8 - name: Build (most) of Ozone run: | - args="-Dskip.npx -Dskip.installnpx -DskipShade -Dmaven.javadoc.skip=true" + args="-DskipRecon -DskipShade -Dmaven.javadoc.skip=true" if [[ "${{ github.event.inputs.ratis-ref }}" != "" ]]; then args="$args -Dratis.version=${{ needs.ratis.outputs.ratis-version }}" args="$args -Dratis.thirdparty.version=${{ needs.ratis.outputs.thirdparty-version }}" @@ -183,7 +183,7 @@ jobs: export OZONE_REPO_CACHED=true fi - args="-DexcludedGroups=native|slow|unhealthy" + args="-DexcludedGroups=native|slow|unhealthy -DskipShade" if [[ "${{ github.event.inputs.ratis-ref }}" != "" ]]; then args="$args -Dratis.version=${{ needs.ratis.outputs.ratis-version }}" args="$args -Dratis.thirdparty.version=${{ needs.ratis.outputs.thirdparty-version }}" diff --git a/.github/workflows/populate-cache.yml b/.github/workflows/populate-cache.yml index cc93390a5bc..94f2ccfe52d 100644 --- a/.github/workflows/populate-cache.yml +++ b/.github/workflows/populate-cache.yml @@ -26,8 +26,7 @@ on: - 'pom.xml' - '**/pom.xml' - '.github/workflows/populate-cache.yml' - schedule: - - cron: '20 3 * * *' + workflow_call: workflow_dispatch: jobs: @@ -74,7 +73,7 @@ jobs: - name: Fetch dependencies if: steps.restore-cache.outputs.cache-hit != 'true' - run: mvn --batch-mode --no-transfer-progress --show-version -Pgo-offline -Pdist clean verify + run: mvn --batch-mode --no-transfer-progress --show-version -Pgo-offline -Pdist -Drocks_tools_native clean verify - name: Delete Ozone jars from repo if: steps.restore-cache.outputs.cache-hit != 'true' diff --git a/.github/workflows/scheduled-cache-update.yml b/.github/workflows/scheduled-cache-update.yml new file mode 100644 index 00000000000..94ac45e785e --- /dev/null +++ b/.github/workflows/scheduled-cache-update.yml @@ -0,0 +1,27 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# This workflow periodically updates dependency cache. + +name: scheduled-cache-update + +on: + schedule: + - cron: '20 3 * * *' + +jobs: + update: + uses: ./.github/workflows/populate-cache.yml + secrets: inherit diff --git a/.mvn/extensions.xml b/.mvn/extensions.xml index 20f1c034c58..549a1cddcd3 100644 --- a/.mvn/extensions.xml +++ b/.mvn/extensions.xml @@ -24,11 +24,11 @@ com.gradle develocity-maven-extension - 1.22.1 + 1.23 com.gradle common-custom-user-data-maven-extension - 2.0 + 2.0.1 diff --git a/SECURITY.md b/SECURITY.md index 3a89968026a..580f1862c7f 100644 --- a/SECURITY.md +++ b/SECURITY.md @@ -2,19 +2,7 @@ ## Supported Versions -The first stable release of Apache Ozone is 1.0, the previous alpha and beta releases are not supported by the community. - -| Version | Supported | -|---------------| ------------------ | -| 0.3.0 (alpha) | :x: | -| 0.4.0 (alpha) | :x: | -| 0.4.1 (alpha) | :x: | -| 0.5.0 (beta) | :x: | -| 1.0.0 | :x: | -| 1.1.0 | :x: | -| 1.2.1 | :x: | -| 1.3.0 | :x: | -| 1.4.0 | :white_check_mark: | +Please check the Apache Ozone [website](https://ozone.apache.org/downloads/) for the list of versions currently supported. ## Reporting a Vulnerability diff --git a/dev-support/ci/maven-settings.xml b/dev-support/ci/maven-settings.xml new file mode 100644 index 00000000000..43fa07bb52b --- /dev/null +++ b/dev-support/ci/maven-settings.xml @@ -0,0 +1,35 @@ + + + + + + block-snapshots1 + apache.snapshots + Block access to Apache Snapshots + https://repository.apache.org/snapshots + true + + + block-snapshots2 + apache.snapshots.https + Block access to Apache Snapshots + https://repository.apache.org/content/repositories/snapshots + true + + + diff --git a/dev-support/ci/selective_ci_checks.bats b/dev-support/ci/selective_ci_checks.bats index 12a7987ffb4..6edd38d68fe 100644 --- a/dev-support/ci/selective_ci_checks.bats +++ b/dev-support/ci/selective_ci_checks.bats @@ -52,7 +52,7 @@ load bats-assert/load.bash assert_output -p needs-build=true assert_output -p needs-compile=false assert_output -p needs-compose-tests=true - assert_output -p needs-dependency-check=false + assert_output -p needs-dependency-check=true assert_output -p needs-integration-tests=false assert_output -p needs-kubernetes-tests=false } @@ -76,7 +76,7 @@ load bats-assert/load.bash assert_output -p needs-build=true assert_output -p needs-compile=false assert_output -p needs-compose-tests=true - assert_output -p needs-dependency-check=false + assert_output -p needs-dependency-check=true assert_output -p needs-integration-tests=false assert_output -p needs-kubernetes-tests=true } @@ -100,7 +100,7 @@ load bats-assert/load.bash assert_output -p needs-build=true assert_output -p needs-compile=false assert_output -p needs-compose-tests=true - assert_output -p needs-dependency-check=false + assert_output -p needs-dependency-check=true assert_output -p needs-integration-tests=false assert_output -p needs-kubernetes-tests=true } @@ -112,7 +112,7 @@ load bats-assert/load.bash assert_output -p needs-build=true assert_output -p needs-compile=true assert_output -p needs-compose-tests=false - assert_output -p needs-dependency-check=false + assert_output -p needs-dependency-check=true assert_output -p needs-integration-tests=true assert_output -p needs-kubernetes-tests=false } @@ -136,7 +136,7 @@ load bats-assert/load.bash assert_output -p needs-build=true assert_output -p needs-compile=true assert_output -p needs-compose-tests=false - assert_output -p needs-dependency-check=false + assert_output -p needs-dependency-check=true assert_output -p needs-integration-tests=true assert_output -p needs-kubernetes-tests=false } @@ -148,7 +148,7 @@ load bats-assert/load.bash assert_output -p needs-build=true assert_output -p needs-compile=true assert_output -p needs-compose-tests=false - assert_output -p needs-dependency-check=false + assert_output -p needs-dependency-check=true assert_output -p needs-integration-tests=true assert_output -p needs-kubernetes-tests=false } @@ -160,7 +160,7 @@ load bats-assert/load.bash assert_output -p needs-build=true assert_output -p needs-compile=true assert_output -p needs-compose-tests=false - assert_output -p needs-dependency-check=false + assert_output -p needs-dependency-check=true assert_output -p needs-integration-tests=true assert_output -p needs-kubernetes-tests=false } @@ -172,7 +172,7 @@ load bats-assert/load.bash assert_output -p needs-build=true assert_output -p needs-compile=true assert_output -p needs-compose-tests=false - assert_output -p needs-dependency-check=false + assert_output -p needs-dependency-check=true assert_output -p needs-integration-tests=true assert_output -p needs-kubernetes-tests=false } @@ -184,7 +184,7 @@ load bats-assert/load.bash assert_output -p needs-build=true assert_output -p needs-compile=true assert_output -p needs-compose-tests=false - assert_output -p needs-dependency-check=false + assert_output -p needs-dependency-check=true assert_output -p needs-integration-tests=false assert_output -p needs-kubernetes-tests=false } @@ -196,7 +196,7 @@ load bats-assert/load.bash assert_output -p needs-build=true assert_output -p needs-compile=true assert_output -p needs-compose-tests=false - assert_output -p needs-dependency-check=false + assert_output -p needs-dependency-check=true assert_output -p needs-integration-tests=false assert_output -p needs-kubernetes-tests=false } @@ -208,7 +208,7 @@ load bats-assert/load.bash assert_output -p needs-build=true assert_output -p needs-compile=false assert_output -p needs-compose-tests=false - assert_output -p needs-dependency-check=false + assert_output -p needs-dependency-check=true assert_output -p needs-integration-tests=false assert_output -p needs-kubernetes-tests=true } @@ -232,7 +232,7 @@ load bats-assert/load.bash assert_output -p needs-build=true assert_output -p needs-compile=true assert_output -p needs-compose-tests=true - assert_output -p needs-dependency-check=false + assert_output -p needs-dependency-check=true assert_output -p needs-integration-tests=true assert_output -p needs-kubernetes-tests=true } @@ -244,7 +244,7 @@ load bats-assert/load.bash assert_output -p needs-build=true assert_output -p needs-compile=true assert_output -p needs-compose-tests=true - assert_output -p needs-dependency-check=false + assert_output -p needs-dependency-check=true assert_output -p needs-integration-tests=true assert_output -p needs-kubernetes-tests=true } @@ -256,7 +256,7 @@ load bats-assert/load.bash assert_output -p needs-build=true assert_output -p needs-compile=true assert_output -p needs-compose-tests=true - assert_output -p needs-dependency-check=false + assert_output -p needs-dependency-check=true assert_output -p needs-integration-tests=true assert_output -p needs-kubernetes-tests=true } @@ -268,7 +268,7 @@ load bats-assert/load.bash assert_output -p needs-build=true assert_output -p needs-compile=true assert_output -p needs-compose-tests=true - assert_output -p needs-dependency-check=false + assert_output -p needs-dependency-check=true assert_output -p needs-integration-tests=true assert_output -p needs-kubernetes-tests=true } diff --git a/dev-support/ci/selective_ci_checks.sh b/dev-support/ci/selective_ci_checks.sh index f6b06326a32..869d36fc6cc 100755 --- a/dev-support/ci/selective_ci_checks.sh +++ b/dev-support/ci/selective_ci_checks.sh @@ -330,6 +330,7 @@ function check_needs_compile() { if [[ ${match_count} != "0" ]]; then compile_needed=true + dependency_check_needed=true fi start_end::group_end @@ -520,6 +521,7 @@ function calculate_test_types_to_run() { echo "Looks like ${COUNT_CORE_OTHER_CHANGED_FILES} core files changed, running all tests." echo compose_tests_needed=true + dependency_check_needed=true integration_tests_needed=true kubernetes_tests_needed=true else @@ -527,12 +529,14 @@ function calculate_test_types_to_run() { echo if [[ ${COUNT_COMPOSE_CHANGED_FILES} != "0" ]] || [[ ${COUNT_ROBOT_CHANGED_FILES} != "0" ]]; then compose_tests_needed="true" + dependency_check_needed=true fi if [[ ${COUNT_INTEGRATION_CHANGED_FILES} != "0" ]]; then integration_tests_needed="true" fi if [[ ${COUNT_KUBERNETES_CHANGED_FILES} != "0" ]] || [[ ${COUNT_ROBOT_CHANGED_FILES} != "0" ]]; then kubernetes_tests_needed="true" + dependency_check_needed=true fi fi start_end::group_end @@ -590,6 +594,7 @@ get_count_robot_files get_count_misc_files check_needs_build +check_needs_dependency check_needs_compile # calculate basic checks to run @@ -597,7 +602,6 @@ BASIC_CHECKS="rat" check_needs_author check_needs_bats check_needs_checkstyle -check_needs_dependency check_needs_docs check_needs_findbugs check_needs_native diff --git a/dev-support/pom.xml b/dev-support/pom.xml index e11e3b32ee4..bc39cd3437e 100644 --- a/dev-support/pom.xml +++ b/dev-support/pom.xml @@ -18,7 +18,7 @@ ozone-main org.apache.ozone - 1.5.0-SNAPSHOT + 2.0.0-SNAPSHOT 4.0.0 ozone-dev-support diff --git a/hadoop-hdds/annotations/pom.xml b/hadoop-hdds/annotations/pom.xml index 3bb148d5c25..0a961087040 100644 --- a/hadoop-hdds/annotations/pom.xml +++ b/hadoop-hdds/annotations/pom.xml @@ -20,11 +20,11 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> org.apache.ozone hdds - 1.5.0-SNAPSHOT + 2.0.0-SNAPSHOT hdds-annotation-processing - 1.5.0-SNAPSHOT + 2.0.0-SNAPSHOT Apache Ozone annotation processing tools for validating custom annotations at compile time. diff --git a/hadoop-hdds/client/pom.xml b/hadoop-hdds/client/pom.xml index bf728403cb4..333b960fc24 100644 --- a/hadoop-hdds/client/pom.xml +++ b/hadoop-hdds/client/pom.xml @@ -20,11 +20,11 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> org.apache.ozone hdds - 1.5.0-SNAPSHOT + 2.0.0-SNAPSHOT hdds-client - 1.5.0-SNAPSHOT + 2.0.0-SNAPSHOT Apache Ozone Distributed Data Store Client Library Apache Ozone HDDS Client jar diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/ContainerClientMetrics.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/ContainerClientMetrics.java index 19a5a9cad5d..f6367b5a53a 100644 --- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/ContainerClientMetrics.java +++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/ContainerClientMetrics.java @@ -28,7 +28,9 @@ import org.apache.hadoop.metrics2.lib.MetricsRegistry; import org.apache.hadoop.metrics2.lib.MutableCounterLong; import org.apache.hadoop.metrics2.lib.MutableQuantiles; +import org.apache.hadoop.metrics2.lib.MutableRate; import org.apache.hadoop.ozone.OzoneConsts; +import org.apache.hadoop.ozone.util.MetricUtil; import java.util.Map; import java.util.UUID; @@ -52,6 +54,21 @@ public final class ContainerClientMetrics { private MutableCounterLong totalWriteChunkCalls; @Metric private MutableCounterLong totalWriteChunkBytes; + + @Metric + private MutableRate hsyncSynchronizedWorkNs; + @Metric + private MutableRate hsyncSendWriteChunkNs; + @Metric + private MutableRate hsyncWaitForFlushNs; + @Metric + private MutableRate hsyncWatchForCommitNs; + @Metric + private MutableCounterLong writeChunksDuringWrite; + @Metric + private MutableCounterLong flushesDuringWrite; + + private MutableQuantiles[] listBlockLatency; private MutableQuantiles[] getBlockLatency; private MutableQuantiles[] getCommittedBlockLengthLatency; @@ -82,6 +99,7 @@ public static synchronized void release() { } referenceCount--; if (referenceCount == 0) { + instance.stop(); DefaultMetricsSystem.instance().unregisterSource( SOURCE_NAME + instanceCount); instance = null; @@ -140,6 +158,17 @@ private ContainerClientMetrics() { } } + public void stop() { + MetricUtil.stop(listBlockLatency); + MetricUtil.stop(getBlockLatency); + MetricUtil.stop(getCommittedBlockLengthLatency); + MetricUtil.stop(readChunkLatency); + MetricUtil.stop(getSmallFileLatency); + MetricUtil.stop(hsyncLatencyNs); + MetricUtil.stop(omHsyncLatencyNs); + MetricUtil.stop(datanodeHsyncLatencyNs); + } + public void recordWriteChunk(Pipeline pipeline, long chunkSizeBytes) { writeChunkCallsByPipeline.computeIfAbsent(pipeline.getId(), pipelineID -> registry.newCounter( @@ -249,4 +278,28 @@ Map getWriteChunkCallsByPipeline() { Map getWriteChunksCallsByLeaders() { return writeChunksCallsByLeaders; } + + public MutableRate getHsyncSynchronizedWorkNs() { + return hsyncSynchronizedWorkNs; + } + + public MutableRate getHsyncSendWriteChunkNs() { + return hsyncSendWriteChunkNs; + } + + public MutableRate getHsyncWaitForFlushNs() { + return hsyncWaitForFlushNs; + } + + public MutableRate getHsyncWatchForCommitNs() { + return hsyncWatchForCommitNs; + } + + public MutableCounterLong getWriteChunksDuringWrite() { + return writeChunksDuringWrite; + } + + public MutableCounterLong getFlushesDuringWrite() { + return flushesDuringWrite; + } } diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/OzoneClientConfig.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/OzoneClientConfig.java index 5426bbc4981..a4b53a80a1e 100644 --- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/OzoneClientConfig.java +++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/OzoneClientConfig.java @@ -156,7 +156,7 @@ public enum ChecksumCombineMode { description = "Indicates the time duration in seconds a client will wait " + "before retrying a read key request on encountering " - + "a connectivity excepetion from Datanodes . " + + "a connectivity exception from Datanodes. " + "By default the interval is 1 second", tags = ConfigTag.CLIENT) private int readRetryInterval = 1; diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientGrpc.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientGrpc.java index c02306f8af8..bf49d408f7f 100644 --- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientGrpc.java +++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientGrpc.java @@ -167,7 +167,7 @@ private synchronized void connectToDatanode(DatanodeDetails dn) } // read port from the data node, on failure use default configured // port. - int port = dn.getPort(DatanodeDetails.Port.Name.STANDALONE).getValue(); + int port = dn.getStandalonePort().getValue(); if (port == 0) { port = config.getInt(OzoneConfigKeys.HDDS_CONTAINER_IPC_PORT, OzoneConfigKeys.HDDS_CONTAINER_IPC_PORT_DEFAULT); diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientManager.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientManager.java index 07b70441721..52f31c9d129 100644 --- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientManager.java +++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientManager.java @@ -185,8 +185,7 @@ private String getPipelineCacheKey(Pipeline pipeline, // Standalone port is chosen since all datanodes should have a // standalone port regardless of version and this port should not // have any collisions. - key += closestNode.getHostName() + closestNode.getPort( - DatanodeDetails.Port.Name.STANDALONE); + key += closestNode.getHostName() + closestNode.getStandalonePort(); } catch (IOException e) { LOG.error("Failed to get closest node to create pipeline cache key:" + e.getMessage()); diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientMetrics.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientMetrics.java index 704e886659a..50e010e85a2 100644 --- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientMetrics.java +++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientMetrics.java @@ -21,6 +21,7 @@ import org.apache.hadoop.hdds.annotation.InterfaceAudience; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; +import org.apache.hadoop.hdds.utils.IOUtils; import org.apache.hadoop.metrics2.MetricsCollector; import org.apache.hadoop.metrics2.MetricsRecordBuilder; import org.apache.hadoop.metrics2.MetricsSource; @@ -129,6 +130,7 @@ public void reset() { } public void unRegister() { + IOUtils.closeQuietly(containerOpsLatency.values()); MetricsSystem ms = DefaultMetricsSystem.instance(); ms.unregisterSource(SOURCE_NAME); } diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientRatis.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientRatis.java index b0ef85cfbf7..979b1b99208 100644 --- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientRatis.java +++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientRatis.java @@ -282,7 +282,7 @@ private CompletableFuture sendRequestAsync( // gets the minimum log index replicated to all servers @Override public long getReplicatedMinCommitIndex() { - return commitInfoMap.values().parallelStream() + return commitInfoMap.values().stream() .mapToLong(Long::longValue).min().orElse(0); } diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockInputStream.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockInputStream.java index f792a678dad..d6353be9d22 100644 --- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockInputStream.java +++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockInputStream.java @@ -166,6 +166,7 @@ public synchronized void initialize() throws IOException { if (blockInfo != null && blockInfo.isUnderConstruction()) { // use the block length from DN if block is under construction. length = blockData.getSize(); + LOG.debug("Updated block length to {} for block {}", length, blockID); } break; // If we get a StorageContainerException or an IOException due to @@ -274,16 +275,6 @@ protected BlockData getBlockDataUsingClient() throws IOException { blockID); } - DatanodeBlockID.Builder blkIDBuilder = - DatanodeBlockID.newBuilder().setContainerID(blockID.getContainerID()) - .setLocalID(blockID.getLocalID()) - .setBlockCommitSequenceId(blockID.getBlockCommitSequenceId()); - - int replicaIndex = pipeline.getReplicaIndex(pipeline.getClosestNode()); - if (replicaIndex > 0) { - blkIDBuilder.setReplicaIndex(replicaIndex); - } - GetBlockResponseProto response = ContainerProtocolCalls.getBlock( xceiverClient, VALIDATORS, blockID, tokenRef.get(), pipeline.getReplicaIndexes()); return response.getBlockData(); diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockOutputStream.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockOutputStream.java index 59795dd0f05..86bcfb3990e 100644 --- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockOutputStream.java +++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockOutputStream.java @@ -62,7 +62,9 @@ import static org.apache.hadoop.hdds.scm.storage.ContainerProtocolCalls.putBlockAsync; import static org.apache.hadoop.hdds.scm.storage.ContainerProtocolCalls.writeChunkAsync; import static org.apache.hadoop.ozone.OzoneConsts.INCREMENTAL_CHUNK_LIST; +import static org.apache.hadoop.ozone.util.MetricUtil.captureLatencyNs; +import org.apache.hadoop.util.Time; import org.apache.ratis.thirdparty.com.google.protobuf.ByteString; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -231,8 +233,7 @@ public BlockOutputStream( writtenDataLength = 0; failedServers = new ArrayList<>(0); ioException = new AtomicReference<>(null); - checksum = new Checksum(config.getChecksumType(), - config.getBytesPerChecksum()); + this.checksum = new Checksum(config.getChecksumType(), config.getBytesPerChecksum(), true); this.clientMetrics = clientMetrics; this.streamBufferArgs = streamBufferArgs; this.allowPutBlockPiggybacking = canEnablePutblockPiggybacking(); @@ -360,6 +361,7 @@ public void write(int b) throws IOException { private void writeChunkIfNeeded() throws IOException { if (currentBufferRemaining == 0) { LOG.debug("WriteChunk from write(), buffer = {}", currentBuffer); + clientMetrics.getWriteChunksDuringWrite().incr(); writeChunk(currentBuffer); updateWriteChunkLength(); } @@ -404,6 +406,7 @@ private void doFlushOrWatchIfNeeded() throws IOException { updatePutBlockLength(); CompletableFuture putBlockFuture = executePutBlock(false, false); recordWatchForCommitAsync(putBlockFuture); + clientMetrics.getFlushesDuringWrite().incr(); } if (bufferPool.isAtCapacity()) { @@ -532,12 +535,16 @@ private CompletableFuture watchForCommit(long commitIndex) { } LOG.debug("Entering watchForCommit commitIndex = {}", commitIndex); + final long start = Time.monotonicNowNanos(); return sendWatchForCommit(commitIndex) .thenAccept(this::checkReply) .exceptionally(e -> { throw new FlushRuntimeException(setIoException(e)); }) - .whenComplete((r, e) -> LOG.debug("Leaving watchForCommit commitIndex = {}", commitIndex)); + .whenComplete((r, e) -> { + LOG.debug("Leaving watchForCommit commitIndex = {}", commitIndex); + clientMetrics.getHsyncWatchForCommitNs().add(Time.monotonicNowNanos() - start); + }); } private void checkReply(XceiverClientReply reply) { @@ -579,6 +586,7 @@ CompletableFuture executePutBlock(boolean close, final CompletableFuture flushFuture; final XceiverClientReply asyncReply; try { + // Note: checksum was previously appended to containerBlockData by WriteChunk BlockData blockData = containerBlockData.build(); LOG.debug("sending PutBlock {} flushPos {}", blockData, flushPos); @@ -693,12 +701,15 @@ private void handleFlushInternal(boolean close) throws IOException, InterruptedException, ExecutionException { checkOpen(); LOG.debug("Start handleFlushInternal close={}", close); - CompletableFuture toWaitFor = handleFlushInternalSynchronized(close); + CompletableFuture toWaitFor = captureLatencyNs(clientMetrics.getHsyncSynchronizedWorkNs(), + () -> handleFlushInternalSynchronized(close)); if (toWaitFor != null) { LOG.debug("Waiting for flush"); try { + long startWaiting = Time.monotonicNowNanos(); toWaitFor.get(); + clientMetrics.getHsyncWaitForFlushNs().add(Time.monotonicNowNanos() - startWaiting); } catch (ExecutionException ex) { if (ex.getCause() instanceof FlushRuntimeException) { throw ((FlushRuntimeException) ex.getCause()).cause; @@ -727,6 +738,7 @@ public void waitForAllPendingFlushes() throws IOException { } private synchronized CompletableFuture handleFlushInternalSynchronized(boolean close) throws IOException { + long start = Time.monotonicNowNanos(); CompletableFuture putBlockResultFuture = null; // flush the last chunk data residing on the currentBuffer if (totalWriteChunkLength < writtenDataLength) { @@ -768,6 +780,7 @@ private synchronized CompletableFuture handleFlushInternalSynchronized(boo if (putBlockResultFuture != null) { recordWatchForCommitAsync(putBlockResultFuture); } + clientMetrics.getHsyncSendWriteChunkNs().add(Time.monotonicNowNanos() - start); return lastFlushFuture; } @@ -841,6 +854,8 @@ public synchronized void cleanup(boolean invalidateClient) { if (lastChunkBuffer != null) { DIRECT_BUFFER_POOL.returnBuffer(lastChunkBuffer); lastChunkBuffer = null; + // Clear checksum cache + checksum.clearChecksumCache(); } } @@ -890,7 +905,10 @@ private CompletableFuture writeChunkToContainer( final long offset = chunkOffset.getAndAdd(effectiveChunkSize); final ByteString data = chunk.toByteString( bufferPool.byteStringConversion()); - ChecksumData checksumData = checksum.computeChecksum(chunk); + // chunk is incremental, don't cache its checksum + ChecksumData checksumData = checksum.computeChecksum(chunk, false); + // side note: checksum object is shared with PutBlock's (blockData) checksum calc, + // current impl does not support caching both ChunkInfo chunkInfo = ChunkInfo.newBuilder() .setChunkName(blockID.get().getLocalID() + "_chunk_" + ++chunkIndex) .setOffset(offset) @@ -1040,6 +1058,7 @@ private void updateBlockDataForWriteChunk(ChunkBuffer chunk) lastChunkBuffer.capacity() - lastChunkBuffer.position(); appendLastChunkBuffer(chunk, 0, remainingBufferSize); updateBlockDataWithLastChunkBuffer(); + // TODO: Optional refactoring: Can attach ChecksumCache to lastChunkBuffer rather than Checksum appendLastChunkBuffer(chunk, remainingBufferSize, chunk.remaining() - remainingBufferSize); } @@ -1056,10 +1075,13 @@ private void updateBlockDataWithLastChunkBuffer() LOG.debug("lastChunkInfo = {}", lastChunkInfo); long lastChunkSize = lastChunkInfo.getLen(); addToBlockData(lastChunkInfo); - + // Set ByteBuffer limit to capacity, pos to 0. Does not erase data lastChunkBuffer.clear(); + if (lastChunkSize == config.getStreamBufferSize()) { lastChunkOffset += config.getStreamBufferSize(); + // Reached stream buffer size (chunk size), starting new chunk, need to clear checksum cache + checksum.clearChecksumCache(); } else { lastChunkBuffer.position((int) lastChunkSize); } @@ -1123,8 +1145,9 @@ private ChunkInfo createChunkInfo(long lastPartialChunkOffset) lastChunkBuffer.flip(); int revisedChunkSize = lastChunkBuffer.remaining(); // create the chunk info to be sent in PutBlock. - ChecksumData revisedChecksumData = - checksum.computeChecksum(lastChunkBuffer); + // checksum cache is utilized for this computation + // this checksum is stored in blockData and later transferred in PutBlock + ChecksumData revisedChecksumData = checksum.computeChecksum(lastChunkBuffer, true); long chunkID = lastPartialChunkOffset / config.getStreamBufferSize(); ChunkInfo.Builder revisedChunkInfo = ChunkInfo.newBuilder() diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/MultipartInputStream.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/MultipartInputStream.java index 4bc144f3bd7..5f00e83e81b 100644 --- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/MultipartInputStream.java +++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/MultipartInputStream.java @@ -34,7 +34,7 @@ public class MultipartInputStream extends ExtendedInputStream { private final String key; - private final long length; + private long length; // List of PartInputStream, one for each part of the key private final List partStreams; @@ -56,6 +56,8 @@ public class MultipartInputStream extends ExtendedInputStream { // can be reset if a new position is seeked. private int prevPartIndex; + private boolean initialized = false; + public MultipartInputStream(String keyName, List inputStreams) { @@ -130,6 +132,9 @@ protected void checkPartBytesRead(int numBytesToRead, int numBytesRead, @Override public synchronized void seek(long pos) throws IOException { checkOpen(); + if (!initialized) { + initialize(); + } if (pos == 0 && length == 0) { // It is possible for length and pos to be zero in which case // seek should return instead of throwing exception @@ -173,6 +178,26 @@ public synchronized void seek(long pos) throws IOException { prevPartIndex = partIndex; } + public synchronized void initialize() throws IOException { + // Pre-check that the stream has not been intialized already + if (initialized) { + return; + } + + for (PartInputStream partInputStream : partStreams) { + if (partInputStream instanceof BlockInputStream) { + ((BlockInputStream) partInputStream).initialize(); + } + } + + long streamLength = 0L; + for (PartInputStream partInputStream : partStreams) { + streamLength += partInputStream.getLength(); + } + this.length = streamLength; + initialized = true; + } + @Override public synchronized long getPos() throws IOException { return length == 0 ? 0 : diff --git a/hadoop-hdds/common/pom.xml b/hadoop-hdds/common/pom.xml index 29cb513bb6f..f2576f7cf08 100644 --- a/hadoop-hdds/common/pom.xml +++ b/hadoop-hdds/common/pom.xml @@ -20,10 +20,10 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> org.apache.ozone hdds - 1.5.0-SNAPSHOT + 2.0.0-SNAPSHOT hdds-common - 1.5.0-SNAPSHOT + 2.0.0-SNAPSHOT Apache Ozone Distributed Data Store Common Apache Ozone HDDS Common jar diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsConfigKeys.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsConfigKeys.java index 87707f75dc4..4d630243e51 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsConfigKeys.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsConfigKeys.java @@ -401,4 +401,7 @@ private HddsConfigKeys() { "hdds.datanode.slow.op.warning.threshold"; public static final String HDDS_DATANODE_SLOW_OP_WARNING_THRESHOLD_DEFAULT = "500ms"; + + public static final String OZONE_DATANODE_IO_METRICS_PERCENTILES_INTERVALS_SECONDS_KEY = + "ozone.volume.io.percentiles.intervals.seconds"; } diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsUtils.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsUtils.java index f8c4a5e6fab..42aaa18a317 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsUtils.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsUtils.java @@ -18,6 +18,7 @@ package org.apache.hadoop.hdds; +import com.google.protobuf.InvalidProtocolBufferException; import com.google.protobuf.ServiceException; import jakarta.annotation.Nonnull; @@ -686,21 +687,26 @@ public static int roundupMb(long bytes) { * or a RpcException. */ public static Throwable getUnwrappedException(Exception ex) { + Throwable t = ex; if (ex instanceof ServiceException) { - Throwable t = ex.getCause(); - if (t instanceof RemoteException) { - t = ((RemoteException) t).unwrapRemoteException(); - } - while (t != null) { - if (t instanceof RpcException || - t instanceof AccessControlException || - t instanceof SecretManager.InvalidToken) { - return t; - } - t = t.getCause(); + t = ex.getCause(); + } + if (t instanceof RemoteException) { + t = ((RemoteException) t).unwrapRemoteException(); + } + while (t != null) { + if (t instanceof RpcException || + t instanceof AccessControlException || + t instanceof SecretManager.InvalidToken) { + break; } + Throwable cause = t.getCause(); + if (cause == null || cause instanceof RemoteException) { + break; + } + t = cause; } - return null; + return t; } /** @@ -720,7 +726,7 @@ public static boolean shouldNotFailoverOnRpcException(Throwable exception) { return true; } } - return false; + return exception instanceof InvalidProtocolBufferException; } /** @@ -879,4 +885,17 @@ public static HddsProtos.UUID toProtobuf(UUID uuid) { ? Thread.currentThread().getStackTrace() : null; } + + /** + * Logs a warning to report that the class is not closed properly. + */ + public static void reportLeak(Class clazz, String stackTrace, Logger log) { + String warning = String.format("%s is not closed properly", clazz.getSimpleName()); + if (stackTrace != null && log.isDebugEnabled()) { + String debugMessage = String.format("%nStackTrace for unclosed instance: %s", + stackTrace); + warning = warning.concat(debugMessage); + } + log.warn(warning); + } } diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/cli/ExtensibleParentCommand.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/cli/ExtensibleParentCommand.java new file mode 100644 index 00000000000..d4fde1b75cb --- /dev/null +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/cli/ExtensibleParentCommand.java @@ -0,0 +1,56 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdds.cli; + +import picocli.CommandLine; + +import java.util.ServiceLoader; + +/** + * Interface for parent commands that accept subcommands to be dynamically registered. + * Subcommands should: + *
  • implement the interface returned by {@link #subcommandType()}
  • + *
  • be annotated with {@code MetaInfServices} parameterized with the same type
  • + */ +public interface ExtensibleParentCommand { + + /** @return The class of the marker interface for subcommands. */ + Class subcommandType(); + + /** Recursively find and add subcommands to {@code cli}. */ + static void addSubcommands(CommandLine cli) { + Object command = cli.getCommand(); + + // find and add subcommands + if (command instanceof ExtensibleParentCommand) { + ExtensibleParentCommand parentCommand = (ExtensibleParentCommand) command; + ServiceLoader subcommands = ServiceLoader.load(parentCommand.subcommandType()); + for (Object subcommand : subcommands) { + final CommandLine.Command commandAnnotation = subcommand.getClass().getAnnotation(CommandLine.Command.class); + CommandLine subcommandCommandLine = new CommandLine(subcommand, cli.getFactory()); + cli.addSubcommand(commandAnnotation.name(), subcommandCommandLine); + } + } + + // process subcommands recursively + for (CommandLine subcommand : cli.getSubcommands().values()) { + addSubcommands(subcommand); + } + } + +} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/cli/GenericCli.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/cli/GenericCli.java index 4c5f3fdc872..14d454431f9 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/cli/GenericCli.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/cli/GenericCli.java @@ -16,18 +16,18 @@ */ package org.apache.hadoop.hdds.cli; +import java.io.IOException; import java.util.HashMap; import java.util.Map; import java.util.Map.Entry; -import java.util.ServiceLoader; import java.util.concurrent.Callable; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.security.UserGroupInformation; import picocli.CommandLine; -import picocli.CommandLine.Command; import picocli.CommandLine.ExitCode; import picocli.CommandLine.Model.CommandSpec; import picocli.CommandLine.Option; @@ -50,32 +50,21 @@ public class GenericCli implements Callable, GenericParentCommand { private String configurationPath; private final CommandLine cmd; + private OzoneConfiguration conf; + private UserGroupInformation user; public GenericCli() { - cmd = new CommandLine(this); + this(CommandLine.defaultFactory()); + } + + public GenericCli(CommandLine.IFactory factory) { + cmd = new CommandLine(this, factory); cmd.setExecutionExceptionHandler((ex, commandLine, parseResult) -> { printError(ex); return EXECUTION_ERROR_EXIT_CODE; }); - } - public GenericCli(Class type) { - this(); - addSubcommands(getCmd(), type); - } - - private void addSubcommands(CommandLine cli, Class type) { - ServiceLoader registeredSubcommands = - ServiceLoader.load(SubcommandWithParent.class); - for (SubcommandWithParent subcommand : registeredSubcommands) { - if (subcommand.getParentType().equals(type)) { - final Command commandAnnotation = - subcommand.getClass().getAnnotation(Command.class); - CommandLine subcommandCommandLine = new CommandLine(subcommand); - addSubcommands(subcommandCommandLine, subcommand.getClass()); - cli.addSubcommand(commandAnnotation.name(), subcommandCommandLine); - } - } + ExtensibleParentCommand.addSubcommands(cmd); } /** @@ -130,6 +119,20 @@ public OzoneConfiguration createOzoneConfiguration() { return ozoneConf; } + public OzoneConfiguration getOzoneConf() { + if (conf == null) { + conf = createOzoneConfiguration(); + } + return conf; + } + + public UserGroupInformation getUser() throws IOException { + if (user == null) { + user = UserGroupInformation.getCurrentUser(); + } + return user; + } + @VisibleForTesting public picocli.CommandLine getCmd() { return cmd; diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/ECReplicationConfig.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/ECReplicationConfig.java index a6dbd933ff1..9709029634c 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/ECReplicationConfig.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/ECReplicationConfig.java @@ -218,6 +218,11 @@ public String configFormat() { + "/" + data + "-" + parity + "-" + chunkKB(); } + @Override + public int getMinimumNodes() { + return data; + } + private String chunkKB() { return ecChunkSize / 1024 + "k"; } diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/RatisReplicationConfig.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/RatisReplicationConfig.java index 36d4d90e1af..9c42e3d59b1 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/RatisReplicationConfig.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/RatisReplicationConfig.java @@ -126,4 +126,9 @@ public String toString() { public String configFormat() { return toString(); } + + @Override + public int getMinimumNodes() { + return 1; + } } diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/ReplicationConfig.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/ReplicationConfig.java index 7542409679b..d82cd08c08e 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/ReplicationConfig.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/ReplicationConfig.java @@ -234,4 +234,6 @@ static ReplicationConfig parseWithoutFallback(ReplicationType type, String configFormat(); + /** Minimum number of nodes, below this data loss happens. */ + int getMinimumNodes(); } diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/StandaloneReplicationConfig.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/StandaloneReplicationConfig.java index 9ca2dfb538a..0b82ab8c872 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/StandaloneReplicationConfig.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/StandaloneReplicationConfig.java @@ -128,4 +128,9 @@ public String toString() { public String configFormat() { return toString(); } + + @Override + public int getMinimumNodes() { + return replicationFactor.getNumber(); + } } diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/protocol/DatanodeDetails.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/protocol/DatanodeDetails.java index 01f508d257c..1c324ac8ff5 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/protocol/DatanodeDetails.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/protocol/DatanodeDetails.java @@ -19,8 +19,10 @@ package org.apache.hadoop.hdds.protocol; import java.util.ArrayList; +import java.util.Collections; import java.util.EnumSet; import java.util.List; +import java.util.Objects; import java.util.Set; import java.util.UUID; @@ -74,7 +76,8 @@ public class DatanodeDetails extends NodeImpl implements private static final Codec CODEC = new DelegatedCodec<>( Proto2Codec.get(ExtendedDatanodeDetailsProto.getDefaultInstance()), DatanodeDetails::getFromProtoBuf, - DatanodeDetails::getExtendedProtoBufMessage); + DatanodeDetails::getExtendedProtoBufMessage, + DatanodeDetails.class); public static Codec getCodec() { return CODEC; @@ -93,7 +96,6 @@ public static Codec getCodec() { private String version; private long setupTime; private String revision; - private String buildDate; private volatile HddsProtos.NodeOperationalState persistedOpState; private volatile long persistedOpStateExpiryEpochSec; private int initialVersion; @@ -111,7 +113,6 @@ private DatanodeDetails(Builder b) { version = b.version; setupTime = b.setupTime; revision = b.revision; - buildDate = b.buildDate; persistedOpState = b.persistedOpState; persistedOpStateExpiryEpochSec = b.persistedOpStateExpiryEpochSec; initialVersion = b.initialVersion; @@ -140,7 +141,6 @@ public DatanodeDetails(DatanodeDetails datanodeDetails) { this.version = datanodeDetails.version; this.setupTime = datanodeDetails.setupTime; this.revision = datanodeDetails.revision; - this.buildDate = datanodeDetails.buildDate; this.persistedOpState = datanodeDetails.getPersistedOpState(); this.persistedOpStateExpiryEpochSec = datanodeDetails.getPersistedOpStateExpiryEpochSec(); @@ -236,6 +236,18 @@ public synchronized void setPort(Name name, int port) { setPort(new Port(name, port)); } + public void setRatisPort(int port) { + setPort(Name.RATIS, port); + } + + public void setRestPort(int port) { + setPort(Name.REST, port); + } + + public void setStandalonePort(int port) { + setPort(Name.STANDALONE, port); + } + /** * Returns all the Ports used by DataNode. * @@ -326,20 +338,52 @@ public void setPersistedOpStateExpiryEpochSec(long expiry) { * @return Port */ public synchronized Port getPort(Port.Name name) { + Port ratisPort = null; for (Port port : ports) { if (port.getName().equals(name)) { return port; } + if (port.getName().equals(Name.RATIS)) { + ratisPort = port; + } } - // if no separate admin/server/datastream port, return single Ratis one for - // compat + // if no separate admin/server/datastream port, + // return single Ratis one for compatibility if (name == Name.RATIS_ADMIN || name == Name.RATIS_SERVER || name == Name.RATIS_DATASTREAM) { - return getPort(Name.RATIS); + return ratisPort; } return null; } + /** + * Helper method to get the Ratis port. + * + * @return Port + */ + public Port getRatisPort() { + return getPort(Name.RATIS); + } + + /** + * Helper method to get the REST port. + * + * @return Port + */ + public Port getRestPort() { + return getPort(Name.REST); + } + + /** + * Helper method to get the Standalone port. + * + * @return Port + */ + public Port getStandalonePort() { + return getPort(Name.STANDALONE); + } + + /** * Starts building a new DatanodeDetails from the protobuf input. * @@ -432,9 +476,6 @@ public static DatanodeDetails getFromProtoBuf( if (extendedDetailsProto.hasRevision()) { builder.setRevision(extendedDetailsProto.getRevision()); } - if (extendedDetailsProto.hasBuildDate()) { - builder.setBuildDate(extendedDetailsProto.getBuildDate()); - } return builder.build(); } @@ -448,11 +489,24 @@ public HddsProtos.DatanodeDetailsProto getProtoBufMessage() { } public HddsProtos.DatanodeDetailsProto toProto(int clientVersion) { - return toProtoBuilder(clientVersion).build(); + return toProtoBuilder(clientVersion, Collections.emptySet()).build(); + } + + public HddsProtos.DatanodeDetailsProto toProto(int clientVersion, Set filterPorts) { + return toProtoBuilder(clientVersion, filterPorts).build(); } + /** + * Converts the current DatanodeDetails instance into a proto {@link HddsProtos.DatanodeDetailsProto.Builder} object. + * + * @param clientVersion - The client version. + * @param filterPorts - A set of {@link Port.Name} specifying ports to include. + * If empty, all available ports will be included. + * @return A {@link HddsProtos.DatanodeDetailsProto.Builder} Object. + */ + public HddsProtos.DatanodeDetailsProto.Builder toProtoBuilder( - int clientVersion) { + int clientVersion, Set filterPorts) { HddsProtos.UUID uuid128 = HddsProtos.UUID.newBuilder() .setMostSigBits(uuid.getMostSignificantBits()) @@ -491,15 +545,25 @@ public HddsProtos.DatanodeDetailsProto.Builder toProtoBuilder( final boolean handlesUnknownPorts = ClientVersion.fromProtoValue(clientVersion) .compareTo(VERSION_HANDLES_UNKNOWN_DN_PORTS) >= 0; + final int requestedPortCount = filterPorts.size(); + final boolean maySkip = requestedPortCount > 0; for (Port port : ports) { - if (handlesUnknownPorts || Name.V0_PORTS.contains(port.getName())) { + if (maySkip && !filterPorts.contains(port.getName())) { + if (LOG.isDebugEnabled()) { + LOG.debug("Skip adding {} port {} to proto message", + port.getName(), port.getValue()); + } + } else if (handlesUnknownPorts || Name.V0_PORTS.contains(port.getName())) { builder.addPorts(port.toProto()); } else { if (LOG.isDebugEnabled()) { LOG.debug("Skip adding {} port {} to proto message for client v{}", - port.getName(), port.getValue(), clientVersion); + port.getName(), port.getValue(), clientVersion); } } + if (maySkip && builder.getPortsCount() == requestedPortCount) { + break; + } } builder.setCurrentVersion(currentVersion); @@ -526,9 +590,6 @@ public ExtendedDatanodeDetailsProto getExtendedProtoBufMessage() { if (!Strings.isNullOrEmpty(getRevision())) { extendedBuilder.setRevision(getRevision()); } - if (!Strings.isNullOrEmpty(getBuildDate())) { - extendedBuilder.setBuildDate(getBuildDate()); - } return extendedBuilder.build(); } @@ -587,6 +648,20 @@ public boolean equals(Object obj) { uuid.equals(((DatanodeDetails) obj).uuid); } + + /** + * Checks hostname, ipAddress and port of the 2 nodes are the same. + * @param datanodeDetails dnDetails object to compare with. + * @return true if the values match otherwise false. + */ + public boolean compareNodeValues(DatanodeDetails datanodeDetails) { + if (this == datanodeDetails || super.equals(datanodeDetails)) { + return true; + } + return Objects.equals(ipAddress, datanodeDetails.ipAddress) + && Objects.equals(hostName, datanodeDetails.hostName) && Objects.equals(ports, datanodeDetails.ports); + } + @Override public int hashCode() { return uuid.hashCode(); @@ -621,7 +696,6 @@ public static final class Builder { private String version; private long setupTime; private String revision; - private String buildDate; private HddsProtos.NodeOperationalState persistedOpState; private long persistedOpStateExpiryEpochSec = 0; private int initialVersion; @@ -653,7 +727,6 @@ public Builder setDatanodeDetails(DatanodeDetails details) { this.version = details.getVersion(); this.setupTime = details.getSetupTime(); this.revision = details.getRevision(); - this.buildDate = details.getBuildDate(); this.persistedOpState = details.getPersistedOpState(); this.persistedOpStateExpiryEpochSec = details.getPersistedOpStateExpiryEpochSec(); @@ -800,18 +873,6 @@ public Builder setRevision(String rev) { return this; } - /** - * Sets the DataNode build date. - * - * @param date the build date of DataNode. - * - * @return DatanodeDetails.Builder - */ - public Builder setBuildDate(String date) { - this.buildDate = date; - return this; - } - /** * Sets the DataNode setup time. * @@ -885,6 +946,36 @@ public static Port newPort(Port.Name name, Integer value) { return new Port(name, value); } + /** + * Constructs a new Ratis Port with the given port number. + * + * @param portNumber Port number + * @return the {@link Port} instance + */ + public static Port newRatisPort(Integer portNumber) { + return newPort(Name.RATIS, portNumber); + } + + /** + * Constructs a new REST Port with the given port number. + * + * @param portNumber Port number + * @return the {@link Port} instance + */ + public static Port newRestPort(Integer portNumber) { + return newPort(Name.REST, portNumber); + } + + /** + * Constructs a new Standalone Port with the given port number. + * + * @param portNumber Port number + * @return the {@link Port} instance + */ + public static Port newStandalonePort(Integer portNumber) { + return newPort(Name.STANDALONE, portNumber); + } + /** * Container to hold DataNode Port details. */ @@ -908,6 +999,9 @@ public enum Name { Name.values()); public static final Set V0_PORTS = ImmutableSet.copyOf( EnumSet.of(STANDALONE, RATIS, REST)); + + public static final Set IO_PORTS = ImmutableSet.copyOf( + EnumSet.of(STANDALONE, RATIS, RATIS_DATASTREAM)); } private final Name name; @@ -1053,29 +1147,11 @@ public void setRevision(String rev) { this.revision = rev; } - /** - * Returns the DataNode build date. - * - * @return DataNode build date - */ - public String getBuildDate() { - return buildDate; - } - - /** - * Set DataNode build date. - * - * @param date DataNode build date - */ - public void setBuildDate(String date) { - this.buildDate = date; - } - @Override public HddsProtos.NetworkNode toProtobuf( int clientVersion) { return HddsProtos.NetworkNode.newBuilder() - .setDatanodeDetails(toProtoBuilder(clientVersion).build()) + .setDatanodeDetails(toProtoBuilder(clientVersion, Collections.emptySet()).build()) .build(); } } diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java index db789783c7c..c4b42acec43 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java @@ -131,6 +131,11 @@ public final class ScmConfigKeys { "hdds.ratis.snapshot.threshold"; public static final long HDDS_RATIS_SNAPSHOT_THRESHOLD_DEFAULT = 100000; + public static final String OZONE_SCM_CONTAINER_LIST_MAX_COUNT = + "ozone.scm.container.list.max.count"; + + public static final int OZONE_SCM_CONTAINER_LIST_MAX_COUNT_DEFAULT = 4096; + // TODO : this is copied from OzoneConsts, may need to move to a better place public static final String OZONE_SCM_CHUNK_SIZE_KEY = "ozone.scm.chunk.size"; // 4 MB by default diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmInfo.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmInfo.java index 19c39698dec..aeb894564b5 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmInfo.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmInfo.java @@ -30,6 +30,7 @@ public final class ScmInfo { private final String clusterId; private final String scmId; private final List peerRoles; + private final boolean scmRatisEnabled; /** * Builder for ScmInfo. @@ -38,6 +39,7 @@ public static class Builder { private String clusterId; private String scmId; private final List peerRoles; + private boolean scmRatisEnabled; public Builder() { peerRoles = new ArrayList<>(); @@ -73,15 +75,28 @@ public Builder setRatisPeerRoles(List roles) { return this; } + /** + * Set whether SCM enables Ratis. + * + * @param ratisEnabled If it is true, it means that the Ratis mode is turned on. + * If it is false, it means that the Ratis mode is not turned on. + * @return Builder for scmInfo + */ + public Builder setScmRatisEnabled(boolean ratisEnabled) { + scmRatisEnabled = ratisEnabled; + return this; + } + public ScmInfo build() { - return new ScmInfo(clusterId, scmId, peerRoles); + return new ScmInfo(clusterId, scmId, peerRoles, scmRatisEnabled); } } - private ScmInfo(String clusterId, String scmId, List peerRoles) { + private ScmInfo(String clusterId, String scmId, List peerRoles, boolean ratisEnabled) { this.clusterId = clusterId; this.scmId = scmId; this.peerRoles = Collections.unmodifiableList(peerRoles); + this.scmRatisEnabled = ratisEnabled; } /** @@ -107,4 +122,8 @@ public String getScmId() { public List getRatisPeerRoles() { return peerRoles; } + + public boolean getScmRatisEnabled() { + return scmRatisEnabled; + } } diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/client/ScmClient.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/client/ScmClient.java index 77079a7bddc..91c0cbd50b4 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/client/ScmClient.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/client/ScmClient.java @@ -27,6 +27,7 @@ import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.ContainerBalancerStatusInfoResponseProto; import org.apache.hadoop.hdds.scm.DatanodeAdminError; import org.apache.hadoop.hdds.scm.container.ContainerID; +import org.apache.hadoop.hdds.scm.container.ContainerListResult; import org.apache.hadoop.hdds.scm.container.ContainerReplicaInfo; import org.apache.hadoop.hdds.scm.container.ReplicationManagerReport; import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline; @@ -122,10 +123,11 @@ void deleteContainer(long containerId, Pipeline pipeline, boolean force) * @param startContainerID start containerID. * @param count count must be {@literal >} 0. * - * @return a list of pipeline. + * @return a list of containers capped by max count allowed + * in "ozone.scm.container.list.max.count" and total number of containers. * @throws IOException */ - List listContainer(long startContainerID, + ContainerListResult listContainer(long startContainerID, int count) throws IOException; /** @@ -135,10 +137,11 @@ List listContainer(long startContainerID, * @param count count must be {@literal >} 0. * @param state Container of this state will be returned. * @param replicationConfig container replication Config. - * @return a list of pipeline. + * @return a list of containers capped by max count allowed + * in "ozone.scm.container.list.max.count" and total number of containers. * @throws IOException */ - List listContainer(long startContainerID, int count, + ContainerListResult listContainer(long startContainerID, int count, HddsProtos.LifeCycleState state, HddsProtos.ReplicationType replicationType, ReplicationConfig replicationConfig) @@ -392,6 +395,15 @@ StartContainerBalancerResponseProto startContainerBalancer( */ List getScmRatisRoles() throws IOException; + /** + * Get the current SCM mode. + * + * @return `true` indicates that it is in RATIS mode, + * while `false` indicates that it is in STANDALONE mode. + * @throws IOException an I/O exception of some sort has occurred. + */ + boolean isScmRatisEnable() throws IOException; + /** * Force generates new secret keys (rotate). * diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerID.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerID.java index 88522f2f9f4..90f690da5a1 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerID.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerID.java @@ -37,7 +37,7 @@ public final class ContainerID implements Comparable { private static final Codec CODEC = new DelegatedCodec<>( LongCodec.get(), ContainerID::valueOf, c -> c.id, - DelegatedCodec.CopyType.SHALLOW); + ContainerID.class, DelegatedCodec.CopyType.SHALLOW); public static final ContainerID MIN = ContainerID.valueOf(0); diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerInfo.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerInfo.java index 6bf2d5500c8..90eb8b47de1 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerInfo.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerInfo.java @@ -46,7 +46,8 @@ public final class ContainerInfo implements Comparable { private static final Codec CODEC = new DelegatedCodec<>( Proto2Codec.get(HddsProtos.ContainerInfoProto.getDefaultInstance()), ContainerInfo::fromProtobuf, - ContainerInfo::getProtobuf); + ContainerInfo::getProtobuf, + ContainerInfo.class); public static Codec getCodec() { return CODEC; diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerListResult.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerListResult.java new file mode 100644 index 00000000000..9e8d5738db8 --- /dev/null +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerListResult.java @@ -0,0 +1,57 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdds.scm.container; + +import java.util.List; + +/** + * Wrapper class for the result of listing containers with their total count. + */ +public class ContainerListResult { + private final List containerInfoList; + private final long totalCount; + + /** + * Constructs a new ContainerListResult. + * + * @param containerInfoList the list of containers + * @param totalCount the total number of containers + */ + public ContainerListResult(List containerInfoList, long totalCount) { + this.containerInfoList = containerInfoList; + this.totalCount = totalCount; + } + + /** + * Gets the list of containers. + * + * @return the list of containers + */ + public List getContainerInfoList() { + return containerInfoList; + } + + /** + * Gets the total count of containers. + * + * @return the total count of containers + */ + public long getTotalCount() { + return totalCount; + } +} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/ContainerWithPipeline.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/ContainerWithPipeline.java index ac72dc94224..f4c9a5dbda9 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/ContainerWithPipeline.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/ContainerWithPipeline.java @@ -21,6 +21,7 @@ import java.util.Comparator; import org.apache.commons.lang3.builder.EqualsBuilder; import org.apache.commons.lang3.builder.HashCodeBuilder; +import org.apache.hadoop.hdds.protocol.DatanodeDetails.Port.Name; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.scm.container.ContainerInfo; import org.apache.hadoop.hdds.scm.pipeline.Pipeline; @@ -61,7 +62,7 @@ public HddsProtos.ContainerWithPipeline getProtobuf(int clientVersion) HddsProtos.ContainerWithPipeline.Builder builder = HddsProtos.ContainerWithPipeline.newBuilder(); builder.setContainerInfo(getContainerInfo().getProtobuf()) - .setPipeline(getPipeline().getProtobufMessage(clientVersion)); + .setPipeline(getPipeline().getProtobufMessage(clientVersion, Name.IO_PORTS)); return builder.build(); } diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMHAUtils.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMHAUtils.java index af4e7299383..b71adb7099a 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMHAUtils.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMHAUtils.java @@ -54,7 +54,7 @@ import java.util.List; import static org.apache.hadoop.hdds.HddsConfigKeys.OZONE_METADATA_DIRS; -import static org.apache.hadoop.ozone.OzoneConsts.SCM_RATIS_SNAPSHOT_DIR; +import static org.apache.hadoop.ozone.OzoneConsts.OZONE_RATIS_SNAPSHOT_DIR; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_DEFAULT_SERVICE_ID; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_SERVICE_IDS_KEY; @@ -159,7 +159,7 @@ public static String getSCMRatisSnapshotDirectory(ConfigurationSource conf) { OZONE_METADATA_DIRS); File metaDirPath = ServerUtils.getOzoneMetaDirPath(conf); snapshotDir = - Paths.get(metaDirPath.getPath(), SCM_RATIS_SNAPSHOT_DIR).toString(); + Paths.get(metaDirPath.getPath(), OZONE_RATIS_SNAPSHOT_DIR).toString(); } return snapshotDir; } diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/pipeline/Pipeline.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/pipeline/Pipeline.java index 54a32e9c340..7390de95fe9 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/pipeline/Pipeline.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/pipeline/Pipeline.java @@ -68,6 +68,7 @@ public final class Pipeline { Proto2Codec.get(HddsProtos.Pipeline.getDefaultInstance()), Pipeline::getFromProtobufSetCreationTimestamp, p -> p.getProtobufMessage(ClientVersion.CURRENT_VERSION), + Pipeline.class, DelegatedCodec.CopyType.UNSUPPORTED); public static Codec getCodec() { @@ -329,7 +330,11 @@ public List getNodesInOrder() { } void reportDatanode(DatanodeDetails dn) throws IOException { - if (nodeStatus.get(dn) == null) { + //This is a workaround for the case a datanode restarted with reinitializing it's dnId but it still reports the + // same set of pipelines it was part of. The pipeline report should be accepted for this anomalous condition. + // We rely on StaleNodeHandler in closing this pipeline eventually. + if (dn == null || (nodeStatus.get(dn) == null + && nodeStatus.keySet().stream().noneMatch(node -> node.compareNodeValues(dn)))) { throw new IOException( String.format("Datanode=%s not part of pipeline=%s", dn, id)); } @@ -361,12 +366,17 @@ public ReplicationConfig getReplicationConfig() { public HddsProtos.Pipeline getProtobufMessage(int clientVersion) throws UnknownPipelineStateException { + return getProtobufMessage(clientVersion, Collections.emptySet()); + } + + public HddsProtos.Pipeline getProtobufMessage(int clientVersion, Set filterPorts) + throws UnknownPipelineStateException { List members = new ArrayList<>(); List memberReplicaIndexes = new ArrayList<>(); for (DatanodeDetails dn : nodeStatus.keySet()) { - members.add(dn.toProto(clientVersion)); + members.add(dn.toProto(clientVersion, filterPorts)); memberReplicaIndexes.add(replicaIndexes.getOrDefault(dn, 0)); } diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineID.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineID.java index 5ca35456261..92e01735d53 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineID.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineID.java @@ -34,7 +34,7 @@ public final class PipelineID { private static final Codec CODEC = new DelegatedCodec<>( UuidCodec.get(), PipelineID::valueOf, c -> c.id, - DelegatedCodec.CopyType.SHALLOW); + PipelineID.class, DelegatedCodec.CopyType.SHALLOW); public static Codec getCodec() { return CODEC; diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocol.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocol.java index 7cdf8b8eed9..419623f3c06 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocol.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocol.java @@ -29,6 +29,7 @@ import org.apache.hadoop.hdds.scm.DatanodeAdminError; import org.apache.hadoop.hdds.scm.ScmConfig; import org.apache.hadoop.hdds.scm.ScmInfo; +import org.apache.hadoop.hdds.scm.container.ContainerListResult; import org.apache.hadoop.hdds.scm.container.ContainerID; import org.apache.hadoop.hdds.scm.container.ContainerInfo; import org.apache.hadoop.hdds.scm.container.ReplicationManagerReport; @@ -146,10 +147,11 @@ List getExistContainerWithPipelinesInBatch( * Usually the count will be replace with a very big * value instead of being unlimited in case the db is very big) * - * @return a list of container. + * @return a list of containers capped by max count allowed + * in "ozone.scm.container.list.max.count" and total number of containers. * @throws IOException */ - List listContainer(long startContainerID, + ContainerListResult listContainer(long startContainerID, int count) throws IOException; /** @@ -165,10 +167,11 @@ List listContainer(long startContainerID, * value instead of being unlimited in case the db is very big) * @param state Container with this state will be returned. * - * @return a list of container. + * @return a list of containers capped by max count allowed + * in "ozone.scm.container.list.max.count" and total number of containers. * @throws IOException */ - List listContainer(long startContainerID, + ContainerListResult listContainer(long startContainerID, int count, HddsProtos.LifeCycleState state) throws IOException; /** @@ -184,14 +187,14 @@ List listContainer(long startContainerID, * value instead of being unlimited in case the db is very big) * @param state Container with this state will be returned. * @param factor Container factor - * @return a list of container. + * @return a list of containers capped by max count allowed + * in "ozone.scm.container.list.max.count" and total number of containers. * @throws IOException */ - List listContainer(long startContainerID, + ContainerListResult listContainer(long startContainerID, int count, HddsProtos.LifeCycleState state, HddsProtos.ReplicationFactor factor) throws IOException; - /** * Ask SCM for a list of containers with a range of container ID, state * and replication config, and the limit of count. @@ -205,10 +208,11 @@ List listContainer(long startContainerID, * value instead of being unlimited in case the db is very big) * @param state Container with this state will be returned. * @param replicationConfig Replication config for the containers - * @return a list of container. + * @return a list of containers capped by max count allowed + * in "ozone.scm.container.list.max.count" and total number of containers. * @throws IOException */ - List listContainer(long startContainerID, + ContainerListResult listContainer(long startContainerID, int count, HddsProtos.LifeCycleState state, HddsProtos.ReplicationType replicationType, ReplicationConfig replicationConfig) throws IOException; diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/utils/CertificateSignRequest.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/utils/CertificateSignRequest.java index 1f04e868a85..553b1dc812e 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/utils/CertificateSignRequest.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/utils/CertificateSignRequest.java @@ -27,13 +27,12 @@ import java.util.List; import java.util.Optional; +import com.google.common.base.Preconditions; import org.apache.commons.lang3.StringUtils; import org.apache.commons.validator.routines.DomainValidator; import org.apache.hadoop.hdds.security.SecurityConfig; import org.apache.hadoop.hdds.security.exception.SCMSecurityException; import org.apache.hadoop.hdds.security.x509.exception.CertificateException; - -import com.google.common.base.Preconditions; import org.apache.hadoop.ozone.OzoneSecurityUtil; import org.bouncycastle.asn1.ASN1EncodableVector; import org.bouncycastle.asn1.ASN1Object; @@ -390,7 +389,7 @@ private Optional getSubjectAltNameExtension() throws if (altNames != null) { return Optional.of(new Extension(Extension.subjectAlternativeName, false, new DEROctetString(new GeneralNames( - altNames.toArray(new GeneralName[altNames.size()]))))); + altNames.toArray(new GeneralName[0]))))); } return Optional.empty(); } @@ -414,12 +413,10 @@ private Extensions createExtensions() throws IOException { // Add subject alternate name extension Optional san = getSubjectAltNameExtension(); - if (san.isPresent()) { - extensions.add(san.get()); - } + san.ifPresent(extensions::add); return new Extensions( - extensions.toArray(new Extension[extensions.size()])); + extensions.toArray(new Extension[0])); } public CertificateSignRequest build() throws SCMSecurityException { diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/HddsVersionInfo.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/HddsVersionInfo.java index b76a316c90b..386b1358b97 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/HddsVersionInfo.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/HddsVersionInfo.java @@ -44,8 +44,6 @@ public static void main(String[] args) { System.out.println( "Source code repository " + HDDS_VERSION_INFO.getUrl() + " -r " + HDDS_VERSION_INFO.getRevision()); - System.out.println("Compiled by " + HDDS_VERSION_INFO.getUser() + " on " - + HDDS_VERSION_INFO.getDate()); System.out.println( "Compiled with protoc " + HDDS_VERSION_INFO.getHadoopProtoc2Version() + ", " + HDDS_VERSION_INFO.getGrpcProtocVersion() + diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/VersionInfo.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/VersionInfo.java index 349c0a86206..d3de20cd476 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/VersionInfo.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/VersionInfo.java @@ -65,18 +65,6 @@ public String getRevision() { return info.getProperty("revision", "Unknown"); } - public String getBranch() { - return info.getProperty("branch", "Unknown"); - } - - public String getDate() { - return info.getProperty("date", "Unknown"); - } - - public String getUser() { - return info.getProperty("user", "Unknown"); - } - public String getUrl() { return info.getProperty("url", "Unknown"); } @@ -108,7 +96,6 @@ public String getCompilePlatform() { public String getBuildVersion() { return getVersion() + " from " + getRevision() + - " by " + getUser() + " source checksum " + getSrcChecksum(); } } diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/BooleanCodec.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/BooleanCodec.java index 2ec396c0ffa..6d416ea2ef3 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/BooleanCodec.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/BooleanCodec.java @@ -36,6 +36,11 @@ private BooleanCodec() { // singleton } + @Override + public Class getTypeClass() { + return Boolean.class; + } + @Override public boolean supportCodecBuffer() { return true; diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/Codec.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/Codec.java index 46779648e67..54bbf42c468 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/Codec.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/Codec.java @@ -30,6 +30,9 @@ public interface Codec { byte[] EMPTY_BYTE_ARRAY = {}; + /** @return the class of the {@link T}. */ + Class getTypeClass(); + /** * Does this {@link Codec} support the {@link CodecBuffer} methods? * If this method returns true, this class must implement both diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/CodecBuffer.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/CodecBuffer.java index 1ac293b301b..87be912bb53 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/CodecBuffer.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/CodecBuffer.java @@ -58,9 +58,9 @@ public class CodecBuffer implements UncheckedAutoCloseable { private static class Factory { private static volatile BiFunction constructor = CodecBuffer::new; - static void set(BiFunction f) { + static void set(BiFunction f, String name) { constructor = f; - LOG.info("Successfully set constructor to " + f); + LOG.info("Successfully set constructor to {}: {}", name, f); } static CodecBuffer newCodecBuffer(ByteBuf buf) { @@ -89,7 +89,7 @@ protected void finalize() { * Note that there is a severe performance penalty for leak detection. */ public static void enableLeakDetection() { - Factory.set(LeakDetector::newCodecBuffer); + Factory.set(LeakDetector::newCodecBuffer, "LeakDetector::newCodecBuffer"); } /** The size of a buffer. */ diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/DelegatedCodec.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/DelegatedCodec.java index 8d6f3c32e53..2ed92e66d2e 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/DelegatedCodec.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/DelegatedCodec.java @@ -47,6 +47,7 @@ public enum CopyType { private final Codec delegate; private final CheckedFunction forward; private final CheckedFunction backward; + private final Class clazz; private final CopyType copyType; /** @@ -60,18 +61,25 @@ public enum CopyType { public DelegatedCodec(Codec delegate, CheckedFunction forward, CheckedFunction backward, - CopyType copyType) { + Class clazz, CopyType copyType) { this.delegate = delegate; this.forward = forward; this.backward = backward; + this.clazz = clazz; this.copyType = copyType; } /** The same as new DelegatedCodec(delegate, forward, backward, DEEP). */ public DelegatedCodec(Codec delegate, CheckedFunction forward, - CheckedFunction backward) { - this(delegate, forward, backward, CopyType.DEEP); + CheckedFunction backward, + Class clazz) { + this(delegate, forward, backward, clazz, CopyType.DEEP); + } + + @Override + public Class getTypeClass() { + return clazz; } @Override diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/IntegerCodec.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/IntegerCodec.java index 50488053159..d31be6fe976 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/IntegerCodec.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/IntegerCodec.java @@ -36,6 +36,11 @@ private IntegerCodec() { // singleton } + @Override + public Class getTypeClass() { + return Integer.class; + } + @Override public boolean supportCodecBuffer() { return true; diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/LongCodec.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/LongCodec.java index 9e776cc18f7..cf481980008 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/LongCodec.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/LongCodec.java @@ -33,6 +33,11 @@ public static LongCodec get() { private LongCodec() { } + @Override + public Class getTypeClass() { + return Long.class; + } + @Override public boolean supportCodecBuffer() { return true; diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/Proto2Codec.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/Proto2Codec.java index 96d12d1ebe5..8eb4a307215 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/Proto2Codec.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/Proto2Codec.java @@ -47,12 +47,19 @@ public static Codec get(T t) { return (Codec) codec; } + private final Class clazz; private final Parser parser; private Proto2Codec(M m) { + this.clazz = (Class) m.getClass(); this.parser = (Parser) m.getParserForType(); } + @Override + public Class getTypeClass() { + return clazz; + } + @Override public boolean supportCodecBuffer() { return true; diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/Proto3Codec.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/Proto3Codec.java index 30245e033e0..c1eb693a007 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/Proto3Codec.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/Proto3Codec.java @@ -47,12 +47,19 @@ public static Codec get(T t) { return (Codec) codec; } + private final Class clazz; private final Parser parser; private Proto3Codec(M m) { + this.clazz = (Class) m.getClass(); this.parser = (Parser) m.getParserForType(); } + @Override + public Class getTypeClass() { + return clazz; + } + @Override public boolean supportCodecBuffer() { return true; diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/ShortCodec.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/ShortCodec.java index f6482e5712c..beb296a29d1 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/ShortCodec.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/ShortCodec.java @@ -37,6 +37,11 @@ private ShortCodec() { // singleton } + @Override + public Class getTypeClass() { + return Short.class; + } + @Override public boolean supportCodecBuffer() { return true; diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/StringCodecBase.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/StringCodecBase.java index 1df55237937..e35be632dc4 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/StringCodecBase.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/StringCodecBase.java @@ -59,6 +59,11 @@ abstract class StringCodecBase implements Codec { this.fixedLength = max == encoder.averageBytesPerChar(); } + @Override + public final Class getTypeClass() { + return String.class; + } + CharsetEncoder newEncoder() { return charset.newEncoder() .onMalformedInput(CodingErrorAction.REPORT) diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/UuidCodec.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/UuidCodec.java index dfccaa0ab75..d05b748b52a 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/UuidCodec.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/UuidCodec.java @@ -40,6 +40,11 @@ public static int getSerializedSize() { private UuidCodec() { } + @Override + public Class getTypeClass() { + return UUID.class; + } + @Override public boolean supportCodecBuffer() { return true; diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/ClientVersion.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/ClientVersion.java index f3bd1a96b66..cc6695dc7d6 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/ClientVersion.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/ClientVersion.java @@ -42,10 +42,6 @@ public enum ClientVersion implements ComponentVersion { "This client version has support for Object Store and File " + "System Optimized Bucket Layouts."), - EC_REPLICA_INDEX_REQUIRED_IN_BLOCK_REQUEST(4, - "This client version enforces replica index is set for fixing read corruption that could occur when " + - "replicaIndex parameter is not validated before EC block reads."), - FUTURE_VERSION(-1, "Used internally when the server side is older and an" + " unknown client version has arrived from the client."); diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java index df0fdc59a4a..4c0df91e1a6 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java @@ -201,9 +201,6 @@ public final class OzoneConfigKeys { "ozone.client.ec.grpc.write.timeout"; public static final String OZONE_CLIENT_EC_GRPC_WRITE_TIMEOUT_DEFAULT = "30s"; - public static final String OZONE_EC_GRPC_ZERO_COPY_ENABLED = - "ozone.ec.grpc.zerocopy.enabled"; - public static final boolean OZONE_EC_GRPC_ZERO_COPY_ENABLED_DEFAULT = true; /** * Ozone administrator users delimited by comma. @@ -571,11 +568,6 @@ public final class OzoneConfigKeys { "ozone.https.client.need-auth"; public static final boolean OZONE_CLIENT_HTTPS_NEED_AUTH_DEFAULT = false; - public static final String OZONE_OM_KEYNAME_CHARACTER_CHECK_ENABLED_KEY = - "ozone.om.keyname.character.check.enabled"; - public static final boolean OZONE_OM_KEYNAME_CHARACTER_CHECK_ENABLED_DEFAULT = - false; - public static final int OZONE_INIT_DEFAULT_LAYOUT_VERSION_DEFAULT = -1; public static final String OZONE_CLIENT_KEY_PROVIDER_CACHE_EXPIRY = "ozone.client.key.provider.cache.expiry"; diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java index 101507b502e..49bfa1eae21 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java @@ -122,6 +122,7 @@ public final class OzoneConsts { public static final String OM_DB_BACKUP_PREFIX = "om.db.backup."; public static final String SCM_DB_BACKUP_PREFIX = "scm.db.backup."; public static final String CONTAINER_DB_NAME = "container.db"; + public static final String WITNESSED_CONTAINER_DB_NAME = "witnessed_container.db"; public static final String STORAGE_DIR_CHUNKS = "chunks"; public static final String OZONE_DB_CHECKPOINT_REQUEST_FLUSH = @@ -387,10 +388,7 @@ private OzoneConsts() { // should remain prepared even after a restart. public static final String PREPARE_MARKER = "prepareMarker"; - // TODO : rename this to OZONE_RATIS_SNAPSHOT_DIR and use it in both - // SCM and OM - public static final String OM_RATIS_SNAPSHOT_DIR = "snapshot"; - public static final String SCM_RATIS_SNAPSHOT_DIR = "snapshot"; + public static final String OZONE_RATIS_SNAPSHOT_DIR = "snapshot"; public static final long DEFAULT_OM_UPDATE_ID = -1L; diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneManagerVersion.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneManagerVersion.java index 982b559c7a5..2d0b2bb56fd 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneManagerVersion.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneManagerVersion.java @@ -47,6 +47,9 @@ public enum OzoneManagerVersion implements ComponentVersion { LIGHTWEIGHT_LIST_STATUS(8, "OzoneManager version that supports lightweight" + " listStatus API."), + S3_OBJECT_TAGGING_API(9, "OzoneManager version that supports S3 object tagging APIs, such as " + + "PutObjectTagging, GetObjectTagging, and DeleteObjectTagging"), + FUTURE_VERSION(-1, "Used internally in the client when the server side is " + " newer and an unknown server version has arrived to the client."); diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/Checksum.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/Checksum.java index f8b3febfeca..03771915be4 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/Checksum.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/Checksum.java @@ -33,6 +33,8 @@ import org.apache.hadoop.ozone.common.utils.BufferUtils; import org.apache.ratis.thirdparty.com.google.protobuf.ByteString; import org.apache.ratis.thirdparty.com.google.protobuf.UnsafeByteOperations; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** * Class to compute and verify checksums for chunks. @@ -40,6 +42,8 @@ * This class is not thread safe. */ public class Checksum { + public static final Logger LOG = LoggerFactory.getLogger(Checksum.class); + private static Function newMessageDigestFunction( String algorithm) { final MessageDigest md; @@ -63,7 +67,7 @@ public static ByteString int2ByteString(int n) { private static Function newChecksumByteBufferFunction( Supplier constructor) { final ChecksumByteBuffer algorithm = constructor.get(); - return data -> { + return data -> { algorithm.reset(); algorithm.update(data); return int2ByteString((int)algorithm.getValue()); @@ -97,6 +101,23 @@ Function newChecksumFunction() { private final ChecksumType checksumType; private final int bytesPerChecksum; + /** + * Caches computeChecksum() result when requested. + * This must be manually cleared when a new block chunk has been started. + */ + private final ChecksumCache checksumCache; + + /** + * BlockOutputStream needs to call this method to clear the checksum cache + * whenever a block chunk has been established. + */ + public boolean clearChecksumCache() { + if (checksumCache != null) { + checksumCache.clear(); + return true; + } + return false; + } /** * Constructs a Checksum object. @@ -106,6 +127,24 @@ Function newChecksumFunction() { public Checksum(ChecksumType type, int bytesPerChecksum) { this.checksumType = type; this.bytesPerChecksum = bytesPerChecksum; + this.checksumCache = null; + } + + /** + * Constructs a Checksum object. + * @param type type of Checksum + * @param bytesPerChecksum number of bytes of data per checksum + * @param allowChecksumCache true to enable checksum cache + */ + public Checksum(ChecksumType type, int bytesPerChecksum, boolean allowChecksumCache) { + this.checksumType = type; + this.bytesPerChecksum = bytesPerChecksum; + LOG.debug("allowChecksumCache = {}", allowChecksumCache); + if (allowChecksumCache) { + this.checksumCache = new ChecksumCache(bytesPerChecksum); + } else { + this.checksumCache = null; + } } /** @@ -128,13 +167,25 @@ public ChecksumData computeChecksum(byte[] data) return computeChecksum(ByteBuffer.wrap(data)); } + /** + * The default implementation of computeChecksum(ByteBuffer) that does not use cache, even if cache is initialized. + * This is a stop-gap solution before the protocol change. + * @param data ByteBuffer + * @return ChecksumData + * @throws OzoneChecksumException + */ + public ChecksumData computeChecksum(ByteBuffer data) + throws OzoneChecksumException { + return computeChecksum(data, false); + } + /** * Computes checksum for give data. * @param data input data. * @return ChecksumData computed for input data. * @throws OzoneChecksumException thrown when ChecksumType is not recognized */ - public ChecksumData computeChecksum(ByteBuffer data) + public ChecksumData computeChecksum(ByteBuffer data, boolean useChecksumCache) throws OzoneChecksumException { // If type is set to NONE, we do not need to compute the checksums. We also // need to avoid unnecessary conversions. @@ -144,7 +195,7 @@ public ChecksumData computeChecksum(ByteBuffer data) if (!data.isReadOnly()) { data = data.asReadOnlyBuffer(); } - return computeChecksum(ChunkBuffer.wrap(data)); + return computeChecksum(ChunkBuffer.wrap(data), useChecksumCache); } public ChecksumData computeChecksum(List byteStrings) @@ -154,8 +205,20 @@ public ChecksumData computeChecksum(List byteStrings) return computeChecksum(ChunkBuffer.wrap(buffers)); } + /** + * The default implementation of computeChecksum(ChunkBuffer) that does not use cache, even if cache is initialized. + * This is a stop-gap solution before the protocol change. + * @param data ChunkBuffer + * @return ChecksumData + * @throws OzoneChecksumException + */ public ChecksumData computeChecksum(ChunkBuffer data) throws OzoneChecksumException { + return computeChecksum(data, false); + } + + public ChecksumData computeChecksum(ChunkBuffer data, boolean useCache) + throws OzoneChecksumException { if (checksumType == ChecksumType.NONE) { // Since type is set to NONE, we do not need to compute the checksums return new ChecksumData(checksumType, bytesPerChecksum); @@ -168,12 +231,20 @@ public ChecksumData computeChecksum(ChunkBuffer data) throw new OzoneChecksumException(checksumType); } - // Checksum is computed for each bytesPerChecksum number of bytes of data - // starting at offset 0. The last checksum might be computed for the - // remaining data with length less than bytesPerChecksum. - final List checksumList = new ArrayList<>(); - for (ByteBuffer b : data.iterate(bytesPerChecksum)) { - checksumList.add(computeChecksum(b, function, bytesPerChecksum)); + final List checksumList; + if (checksumCache == null || !useCache) { + // When checksumCache is not enabled: + // Checksum is computed for each bytesPerChecksum number of bytes of data + // starting at offset 0. The last checksum might be computed for the + // remaining data with length less than bytesPerChecksum. + checksumList = new ArrayList<>(); + for (ByteBuffer b : data.iterate(bytesPerChecksum)) { + checksumList.add(computeChecksum(b, function, bytesPerChecksum)); // merge this? + } + } else { + // When checksumCache is enabled: + // We only need to update the last checksum in the cache, then pass it along. + checksumList = checksumCache.computeChecksum(data, function); } return new ChecksumData(checksumType, bytesPerChecksum, checksumList); } @@ -185,7 +256,7 @@ public ChecksumData computeChecksum(ChunkBuffer data) * @param maxLength the max length of data * @return computed checksum ByteString */ - private static ByteString computeChecksum(ByteBuffer data, + protected static ByteString computeChecksum(ByteBuffer data, Function function, int maxLength) { final int limit = data.limit(); try { diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/ChecksumCache.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/ChecksumCache.java new file mode 100644 index 00000000000..0f6482919a3 --- /dev/null +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/ChecksumCache.java @@ -0,0 +1,122 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + *

    + * http://www.apache.org/licenses/LICENSE-2.0 + *

    + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ +package org.apache.hadoop.ozone.common; + +import org.apache.ratis.thirdparty.com.google.protobuf.ByteString; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.nio.ByteBuffer; +import java.util.ArrayList; +import java.util.List; +import java.util.function.Function; + +/** + * Cache previous checksums to avoid recomputing them. + * This is a stop-gap solution to reduce checksum calc overhead inside critical section + * without having to do a major refactoring/overhaul over protobuf and interfaces. + * This is only supposed to be used by BlockOutputStream, for now. + *

    + * Each BlockOutputStream has its own Checksum instance. + * Each block chunk (4 MB default) is divided into 16 KB (default) each for checksum calculation. + * For CRC32/CRC32C, each checksum takes 4 bytes. Thus each block chunk has 4 MB / 16 KB * 4 B = 1 KB of checksum data. + */ +public class ChecksumCache { + public static final Logger LOG = LoggerFactory.getLogger(ChecksumCache.class); + + private final int bytesPerChecksum; + private final List checksums; + // Chunk length last time the checksum is computed + private int prevChunkLength; + // This only serves as a hint for array list initial allocation. The array list will still grow as needed. + private static final int BLOCK_CHUNK_SIZE = 4 * 1024 * 1024; // 4 MB + + public ChecksumCache(int bytesPerChecksum) { + LOG.info("Initializing ChecksumCache with bytesPerChecksum = {}", bytesPerChecksum); + this.prevChunkLength = 0; + this.bytesPerChecksum = bytesPerChecksum; + // Set initialCapacity to avoid costly resizes + this.checksums = new ArrayList<>(BLOCK_CHUNK_SIZE / bytesPerChecksum); + } + + /** + * Clear cached checksums. And reset the written index. + */ + public void clear() { + prevChunkLength = 0; + checksums.clear(); + } + + public List getChecksums() { + return checksums; + } + + public List computeChecksum(ChunkBuffer data, Function function) { + // Indicates how much data the current chunk buffer holds + final int currChunkLength = data.limit(); + + if (currChunkLength == prevChunkLength) { + LOG.debug("ChunkBuffer data limit same as last time ({}). No new checksums need to be computed", prevChunkLength); + return checksums; + } + + // Sanity check + if (currChunkLength < prevChunkLength) { + // If currChunkLength <= lastChunkLength, it indicates a bug that needs to be addressed. + // It means BOS has not properly clear()ed the cache when a new chunk is started in that code path. + throw new IllegalArgumentException("ChunkBuffer data limit (" + currChunkLength + ")" + + " must not be smaller than last time (" + prevChunkLength + ")"); + } + + // One or more checksums need to be computed + + // Start of the checksum index that need to be (re)computed + final int ciStart = prevChunkLength / bytesPerChecksum; + final int ciEnd = currChunkLength / bytesPerChecksum + (currChunkLength % bytesPerChecksum == 0 ? 0 : 1); + int i = 0; + for (ByteBuffer b : data.iterate(bytesPerChecksum)) { + if (i < ciStart) { + i++; + continue; + } + + // variable i can either point to: + // 1. the last element in the list -- in which case the checksum needs to be updated + // 2. one after the last element -- in which case a new checksum needs to be added + assert i == checksums.size() - 1 || i == checksums.size(); + + // TODO: Furthermore for CRC32/CRC32C, it can be even more efficient by updating the last checksum byte-by-byte. + final ByteString checksum = Checksum.computeChecksum(b, function, bytesPerChecksum); + if (i == checksums.size()) { + checksums.add(checksum); + } else { + checksums.set(i, checksum); + } + + i++; + } + + // Sanity check + if (i != ciEnd) { + throw new IllegalStateException("ChecksumCache: Checksum index end does not match expectation"); + } + + // Update last written index + prevChunkLength = currChunkLength; + return checksums; + } +} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/ChunkBufferImplWithByteBuffer.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/ChunkBufferImplWithByteBuffer.java index 36c16e92bf0..254be93dc4a 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/ChunkBufferImplWithByteBuffer.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/ChunkBufferImplWithByteBuffer.java @@ -25,9 +25,9 @@ import java.util.List; import java.util.NoSuchElementException; import java.util.Objects; -import java.util.UUID; import java.util.function.Function; +import org.apache.hadoop.ozone.common.utils.BufferUtils; import org.apache.ratis.thirdparty.com.google.protobuf.ByteString; import org.apache.ratis.util.UncheckedAutoCloseable; @@ -35,7 +35,6 @@ final class ChunkBufferImplWithByteBuffer implements ChunkBuffer { private final ByteBuffer buffer; private final UncheckedAutoCloseable underlying; - private final UUID identity = UUID.randomUUID(); ChunkBufferImplWithByteBuffer(ByteBuffer buffer) { this(buffer, null); @@ -104,7 +103,7 @@ public List asByteBufferList() { @Override public long writeTo(GatheringByteChannel channel) throws IOException { - return channel.write(buffer); + return BufferUtils.writeFully(channel, buffer); } @Override @@ -163,6 +162,6 @@ public int hashCode() { @Override public String toString() { return getClass().getSimpleName() + ":limit=" + buffer.limit() - + "@" + identity; + + "@" + Integer.toHexString(super.hashCode()); } } diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/ChunkBufferImplWithByteBufferList.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/ChunkBufferImplWithByteBufferList.java index a3b5f9d2eef..e1f169662f8 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/ChunkBufferImplWithByteBufferList.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/ChunkBufferImplWithByteBufferList.java @@ -23,6 +23,8 @@ import java.util.Collections; import java.util.Iterator; import java.util.NoSuchElementException; + +import org.apache.hadoop.ozone.common.utils.BufferUtils; import org.apache.ratis.thirdparty.com.google.protobuf.ByteString; import java.io.IOException; @@ -246,9 +248,9 @@ public List asByteBufferList() { @Override public long writeTo(GatheringByteChannel channel) throws IOException { - long bytes = channel.write(buffers.toArray(new ByteBuffer[0])); + final long written = BufferUtils.writeFully(channel, buffers); findCurrent(); - return bytes; + return written; } @Override diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/IncrementalChunkBuffer.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/IncrementalChunkBuffer.java index dda4fae0d2b..732af4b6850 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/IncrementalChunkBuffer.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/IncrementalChunkBuffer.java @@ -19,6 +19,7 @@ import com.google.common.base.Preconditions; import org.apache.hadoop.hdds.utils.db.CodecBuffer; +import org.apache.hadoop.ozone.common.utils.BufferUtils; import org.apache.ratis.thirdparty.com.google.protobuf.ByteString; import java.io.IOException; @@ -279,7 +280,7 @@ public List asByteBufferList() { @Override public long writeTo(GatheringByteChannel channel) throws IOException { - return channel.write(buffers.toArray(new ByteBuffer[0])); + return BufferUtils.writeFully(channel, buffers); } @Override diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/utils/BufferUtils.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/utils/BufferUtils.java index c6ad754f19b..a266c3615b0 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/utils/BufferUtils.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/utils/BufferUtils.java @@ -19,15 +19,23 @@ package org.apache.hadoop.ozone.common.utils; import com.google.common.base.Preconditions; + +import java.io.IOException; import java.nio.ByteBuffer; +import java.nio.channels.GatheringByteChannel; import java.util.ArrayList; import java.util.List; import org.apache.ratis.thirdparty.com.google.protobuf.ByteString; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** * Utilities for buffers. */ public final class BufferUtils { + public static final Logger LOG = LoggerFactory.getLogger(BufferUtils.class); + + private static final ByteBuffer[] EMPTY_BYTE_BUFFER_ARRAY = {}; /** Utility classes should not be constructed. **/ private BufferUtils() { @@ -136,4 +144,46 @@ public static int getNumberOfBins(long numElements, int maxElementsPerBin) { } return Math.toIntExact(n); } + + /** + * Write all remaining bytes in buffer to the given channel. + */ + public static long writeFully(GatheringByteChannel ch, ByteBuffer bb) throws IOException { + long written = 0; + while (bb.remaining() > 0) { + int n = ch.write(bb); + if (n < 0) { + throw new IllegalStateException("GatheringByteChannel.write returns " + n + " < 0 for " + ch); + } + written += n; + } + return written; + } + + public static long writeFully(GatheringByteChannel ch, List buffers) throws IOException { + return BufferUtils.writeFully(ch, buffers.toArray(EMPTY_BYTE_BUFFER_ARRAY)); + } + + public static long writeFully(GatheringByteChannel ch, ByteBuffer[] buffers) throws IOException { + if (LOG.isDebugEnabled()) { + for (int i = 0; i < buffers.length; i++) { + LOG.debug("buffer[{}]: remaining={}", i, buffers[i].remaining()); + } + } + + long written = 0; + for (int i = 0; i < buffers.length; i++) { + while (buffers[i].remaining() > 0) { + final long n = ch.write(buffers, i, buffers.length - i); + if (LOG.isDebugEnabled()) { + LOG.debug("buffer[{}]: remaining={}, written={}", i, buffers[i].remaining(), n); + } + if (n < 0) { + throw new IllegalStateException("GatheringByteChannel.write returns " + n + " < 0 for " + ch); + } + written += n; + } + } + return written; + } } diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/container/common/helpers/BlockData.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/container/common/helpers/BlockData.java index ea5c5453f3f..4fee39921b6 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/container/common/helpers/BlockData.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/container/common/helpers/BlockData.java @@ -39,7 +39,8 @@ public class BlockData { private static final Codec CODEC = new DelegatedCodec<>( Proto3Codec.get(ContainerProtos.BlockData.getDefaultInstance()), BlockData::getFromProtoBuf, - BlockData::getProtoBufMessage); + BlockData::getProtoBufMessage, + BlockData.class); public static Codec getCodec() { return CODEC; @@ -253,7 +254,7 @@ public void setChunks(List chunks) { size = singleChunk.getLen(); } else { chunkList = chunks; - size = chunks.parallelStream() + size = chunks.stream() .mapToLong(ContainerProtos.ChunkInfo::getLen) .sum(); } diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ChunkInfoList.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ChunkInfoList.java index 832ab40d30f..ab5d39e9c3d 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ChunkInfoList.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ChunkInfoList.java @@ -36,6 +36,7 @@ public class ChunkInfoList { Proto3Codec.get(ContainerProtos.ChunkInfoList.getDefaultInstance()), ChunkInfoList::getFromProtoBuf, ChunkInfoList::getProtoBufMessage, + ChunkInfoList.class, DelegatedCodec.CopyType.SHALLOW); public static Codec getCodec() { diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/grpc/metrics/GrpcMetrics.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/grpc/metrics/GrpcMetrics.java index 6bd83b44a93..6e0dde66986 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/grpc/metrics/GrpcMetrics.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/grpc/metrics/GrpcMetrics.java @@ -33,6 +33,7 @@ import org.apache.hadoop.metrics2.lib.MutableRate; import org.apache.hadoop.ozone.OzoneConfigKeys; import org.apache.hadoop.ozone.OzoneConsts; +import org.apache.hadoop.ozone.util.MetricUtil; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -100,6 +101,8 @@ public static synchronized GrpcMetrics create(Configuration conf) { */ public void unRegister() { DefaultMetricsSystem.instance().unregisterSource(SOURCE_NAME); + MetricUtil.stop(grpcProcessingTimeMillisQuantiles); + MetricUtil.stop(grpcQueueTimeMillisQuantiles); } @Override diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/util/MetricUtil.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/util/MetricUtil.java index 23ff3c0f29e..9d903b900ac 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/util/MetricUtil.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/util/MetricUtil.java @@ -110,4 +110,20 @@ public static List createQuantiles(MetricsRegistry registry, sampleName, valueName, interval); }).collect(Collectors.toList()); } + + public static void stop(MutableQuantiles... quantiles) { + if (quantiles != null) { + stop(Arrays.asList(quantiles)); + } + } + + public static void stop(Iterable quantiles) { + if (quantiles != null) { + for (MutableQuantiles q : quantiles) { + if (q != null) { + q.stop(); + } + } + } + } } diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/util/PerformanceMetrics.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/util/PerformanceMetrics.java index 3f5150bd62c..39e887eaa49 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/util/PerformanceMetrics.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/util/PerformanceMetrics.java @@ -22,7 +22,9 @@ import org.apache.hadoop.metrics2.lib.MutableQuantiles; import org.apache.hadoop.metrics2.lib.MutableStat; +import java.io.Closeable; import java.util.List; +import java.util.Map; /** * The {@code PerformanceMetrics} class encapsulates a collection of related @@ -30,7 +32,7 @@ * This class provides methods to update these metrics and to * snapshot their values for reporting. */ -public class PerformanceMetrics { +public class PerformanceMetrics implements Closeable { private final MutableStat stat; private final List quantiles; private final MutableMinMax minMax; @@ -43,12 +45,13 @@ public class PerformanceMetrics { * @param intervals the intervals for quantiles computation. Note, each * interval in 'intervals' increases memory usage, as it corresponds * to a separate quantile calculator. + * @return {@link PerformanceMetrics} instances created, mapped by field name */ - public static synchronized void initializeMetrics(T source, + public static synchronized Map initializeMetrics(T source, MetricsRegistry registry, String sampleName, String valueName, int[] intervals) { try { - PerformanceMetricsInitializer.initialize( + return PerformanceMetricsInitializer.initialize( source, registry, sampleName, valueName, intervals); } catch (IllegalAccessException e) { throw new RuntimeException("Failed to initialize PerformanceMetrics", e); @@ -73,6 +76,11 @@ public PerformanceMetrics( minMax = new MutableMinMax(registry, name, description, valueName); } + @Override + public void close() { + MetricUtil.stop(quantiles); + } + /** * Adds a value to all the aggregated metrics. * @@ -95,6 +103,5 @@ public void snapshot(MetricsRecordBuilder recordBuilder, boolean all) { this.quantiles.forEach(quantile -> quantile.snapshot(recordBuilder, all)); this.minMax.snapshot(recordBuilder, all); } - } diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/util/PerformanceMetricsInitializer.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/util/PerformanceMetricsInitializer.java index b2e83bb780c..cb6f77e9f5c 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/util/PerformanceMetricsInitializer.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/util/PerformanceMetricsInitializer.java @@ -21,6 +21,8 @@ import org.apache.hadoop.metrics2.lib.MetricsRegistry; import java.lang.reflect.Field; +import java.util.HashMap; +import java.util.Map; /** * Utility class for initializing PerformanceMetrics in a MetricsSource. @@ -36,11 +38,13 @@ private PerformanceMetricsInitializer() { } * @param sampleName sample name * @param valueName value name * @param intervals intervals for quantiles + * @return {@link PerformanceMetrics} instances created, mapped by field name * @throws IllegalAccessException if unable to access the field */ - public static void initialize(T source, MetricsRegistry registry, + public static Map initialize(T source, MetricsRegistry registry, String sampleName, String valueName, int[] intervals) throws IllegalAccessException { + Map instances = new HashMap<>(); Field[] fields = source.getClass().getDeclaredFields(); for (Field field : fields) { @@ -54,8 +58,11 @@ public static void initialize(T source, MetricsRegistry registry, sampleName, valueName, intervals); field.setAccessible(true); field.set(source, performanceMetrics); + instances.put(name, performanceMetrics); } } } + + return instances; } } diff --git a/hadoop-hdds/common/src/main/resources/hdds-version-info.properties b/hadoop-hdds/common/src/main/resources/hdds-version-info.properties index bf887021c5b..3ba2c2cbfa2 100644 --- a/hadoop-hdds/common/src/main/resources/hdds-version-info.properties +++ b/hadoop-hdds/common/src/main/resources/hdds-version-info.properties @@ -18,9 +18,6 @@ version=${declared.hdds.version} revision=${version-info.scm.commit} -branch=${version-info.scm.branch} -user=${user.name} -date=${version-info.build.time} url=${version-info.scm.uri} srcChecksum=${version-info.source.md5} hadoopProtoc2Version=${proto2.hadooprpc.protobuf.version} diff --git a/hadoop-hdds/common/src/main/resources/ozone-default.xml b/hadoop-hdds/common/src/main/resources/ozone-default.xml index 9b0ff0e9625..fdeb5c1c043 100644 --- a/hadoop-hdds/common/src/main/resources/ozone-default.xml +++ b/hadoop-hdds/common/src/main/resources/ozone-default.xml @@ -160,6 +160,13 @@ this not set. Ideally, this should be mapped to a fast disk like an SSD. + + ozone.scm.container.list.max.count + 4096 + OZONE, SCM, CONTAINER + The max number of containers info could be included in + response of ListContainer request. + hdds.datanode.dir @@ -272,16 +279,16 @@ hdds.ratis.snapshot.threshold - 10000 - OZONE, RATIS + 100000 + OZONE, CONTAINER, RATIS Number of transactions after which a ratis snapshot should be taken. hdds.container.ratis.statemachine.max.pending.apply-transactions - 10000 - OZONE, RATIS + 100000 + OZONE, CONTAINER, RATIS Maximum number of pending apply transactions in a data pipeline. The default value is kept same as default snapshot threshold hdds.ratis.snapshot.threshold. @@ -1570,7 +1577,7 @@ hdds.datanode.metadata.rocksdb.cache.size - 64MB + 1GB OZONE, DATANODE, MANAGEMENT Size of the block metadata cache shared among RocksDB instances on each @@ -3469,9 +3476,9 @@ ozone.s3g.client.buffer.size OZONE, S3GATEWAY - 4KB + 4MB - The size of the buffer which is for read block. (4KB by default). + The size of the buffer which is for read block. (4MB by default). @@ -4530,19 +4537,31 @@ - ozone.ec.grpc.zerocopy.enabled - true + ozone.om.max.buckets + 100000 + OZONE, OM + + maximum number of buckets across all volumes. + + + + + ozone.volume.io.percentiles.intervals.seconds + 60 OZONE, DATANODE - Specify if zero-copy should be enabled for EC GRPC protocol. + This setting specifies the interval (in seconds) for monitoring percentile performance metrics. + It helps in tracking the read and write performance of DataNodes in real-time, + allowing for better identification and analysis of performance issues. + - ozone.om.max.buckets - 100000 + ozone.om.server.list.max.size + 1000 OZONE, OM - maximum number of buckets across all volumes. + Configuration property to configure the max server side response size for list calls on om. diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/protocol/TestDatanodeDetails.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/protocol/TestDatanodeDetails.java index aeb1e207e70..78465fd2816 100644 --- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/protocol/TestDatanodeDetails.java +++ b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/protocol/TestDatanodeDetails.java @@ -17,12 +17,16 @@ */ package org.apache.hadoop.hdds.protocol; +import com.google.common.collect.ImmutableSet; import org.apache.hadoop.hdds.DatanodeVersion; import org.apache.hadoop.hdds.protocol.DatanodeDetails.Port; +import org.apache.hadoop.hdds.protocol.DatanodeDetails.Port.Name; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.junit.jupiter.api.Test; import java.util.Set; +import java.util.stream.Collectors; +import java.util.stream.Stream; import static org.apache.hadoop.hdds.protocol.DatanodeDetails.Port.Name.ALL_PORTS; import static org.apache.hadoop.hdds.protocol.DatanodeDetails.Port.Name.V0_PORTS; @@ -48,21 +52,36 @@ void protoIncludesNewPortsOnlyForV1() { subject.toProto(VERSION_HANDLES_UNKNOWN_DN_PORTS.toProtoValue()); assertPorts(protoV1, ALL_PORTS); } + @Test + void testRequiredPortsProto() { + DatanodeDetails subject = MockDatanodeDetails.randomDatanodeDetails(); + Set requiredPorts = Stream.of(Port.Name.STANDALONE, Port.Name.RATIS) + .collect(Collectors.toSet()); + HddsProtos.DatanodeDetailsProto proto = + subject.toProto(subject.getCurrentVersion(), requiredPorts); + assertPorts(proto, ImmutableSet.copyOf(requiredPorts)); + + HddsProtos.DatanodeDetailsProto ioPortProto = + subject.toProto(subject.getCurrentVersion(), Name.IO_PORTS); + assertPorts(ioPortProto, ImmutableSet.copyOf(Name.IO_PORTS)); + } @Test public void testNewBuilderCurrentVersion() { // test that if the current version is not set (Ozone 1.4.0 and earlier), // it falls back to SEPARATE_RATIS_PORTS_AVAILABLE DatanodeDetails dn = MockDatanodeDetails.randomDatanodeDetails(); + Set requiredPorts = Stream.of(Port.Name.STANDALONE, Port.Name.RATIS) + .collect(Collectors.toSet()); HddsProtos.DatanodeDetailsProto.Builder protoBuilder = - dn.toProtoBuilder(DEFAULT_VERSION.toProtoValue()); + dn.toProtoBuilder(DEFAULT_VERSION.toProtoValue(), requiredPorts); protoBuilder.clearCurrentVersion(); DatanodeDetails dn2 = DatanodeDetails.newBuilder(protoBuilder.build()).build(); assertEquals(DatanodeVersion.SEPARATE_RATIS_PORTS_AVAILABLE.toProtoValue(), dn2.getCurrentVersion()); // test that if the current version is set, it is used protoBuilder = - dn.toProtoBuilder(DEFAULT_VERSION.toProtoValue()); + dn.toProtoBuilder(DEFAULT_VERSION.toProtoValue(), requiredPorts); DatanodeDetails dn3 = DatanodeDetails.newBuilder(protoBuilder.build()).build(); assertEquals(DatanodeVersion.CURRENT.toProtoValue(), dn3.getCurrentVersion()); } diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/scm/container/common/helpers/TestExcludeList.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/scm/container/common/helpers/TestExcludeList.java index 5571330ee64..d8af0c4d5ab 100644 --- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/scm/container/common/helpers/TestExcludeList.java +++ b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/scm/container/common/helpers/TestExcludeList.java @@ -37,19 +37,19 @@ public class TestExcludeList { public void excludeNodesShouldBeCleanedBasedOnGivenTime() { ExcludeList list = new ExcludeList(10, clock); list.addDatanode(DatanodeDetails.newBuilder().setUuid(UUID.randomUUID()) - .setIpAddress("127.0.0.1").setHostName("localhost").addPort( - DatanodeDetails.newPort(DatanodeDetails.Port.Name.STANDALONE, 2001)) + .setIpAddress("127.0.0.1").setHostName("localhost") + .addPort(DatanodeDetails.newStandalonePort(2001)) .build()); assertEquals(1, list.getDatanodes().size()); clock.fastForward(11); assertEquals(0, list.getDatanodes().size()); list.addDatanode(DatanodeDetails.newBuilder().setUuid(UUID.randomUUID()) - .setIpAddress("127.0.0.2").setHostName("localhost").addPort( - DatanodeDetails.newPort(DatanodeDetails.Port.Name.STANDALONE, 2001)) + .setIpAddress("127.0.0.2").setHostName("localhost") + .addPort(DatanodeDetails.newStandalonePort(2001)) .build()); list.addDatanode(DatanodeDetails.newBuilder().setUuid(UUID.randomUUID()) - .setIpAddress("127.0.0.3").setHostName("localhost").addPort( - DatanodeDetails.newPort(DatanodeDetails.Port.Name.STANDALONE, 2001)) + .setIpAddress("127.0.0.3").setHostName("localhost") + .addPort(DatanodeDetails.newStandalonePort(2001)) .build()); assertEquals(2, list.getDatanodes().size()); } @@ -58,8 +58,8 @@ public void excludeNodesShouldBeCleanedBasedOnGivenTime() { public void excludeNodeShouldNotBeCleanedIfExpiryTimeIsZero() { ExcludeList list = new ExcludeList(0, clock); list.addDatanode(DatanodeDetails.newBuilder().setUuid(UUID.randomUUID()) - .setIpAddress("127.0.0.1").setHostName("localhost").addPort( - DatanodeDetails.newPort(DatanodeDetails.Port.Name.STANDALONE, 2001)) + .setIpAddress("127.0.0.1").setHostName("localhost") + .addPort(DatanodeDetails.newStandalonePort(2001)) .build()); assertEquals(1, list.getDatanodes().size()); clock.fastForward(1); diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/utils/MockGatheringChannel.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/utils/MockGatheringChannel.java index ce6f58dadcb..83b68512380 100644 --- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/utils/MockGatheringChannel.java +++ b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/utils/MockGatheringChannel.java @@ -21,8 +21,11 @@ import java.nio.ByteBuffer; import java.nio.channels.GatheringByteChannel; import java.nio.channels.WritableByteChannel; +import java.util.concurrent.ThreadLocalRandom; import static com.google.common.base.Preconditions.checkElementIndex; +import static org.assertj.core.api.Assertions.assertThat; +import static org.junit.jupiter.api.Assertions.assertEquals; /** * {@link GatheringByteChannel} implementation for testing. Delegates @@ -45,11 +48,32 @@ public long write(ByteBuffer[] srcs, int offset, int length) checkElementIndex(offset, srcs.length, "offset"); checkElementIndex(offset + length - 1, srcs.length, "offset+length"); - long bytes = 0; - for (ByteBuffer b : srcs) { - bytes += write(b); + long fullLength = 0; + for (int i = offset; i < srcs.length; i++) { + fullLength += srcs[i].remaining(); } - return bytes; + if (fullLength <= 0) { + return 0; + } + + // simulate partial write by setting a random partial length + final long partialLength = ThreadLocalRandom.current().nextLong(fullLength + 1); + + long written = 0; + for (int i = offset; i < srcs.length; i++) { + for (final ByteBuffer src = srcs[i]; src.hasRemaining();) { + final long n = partialLength - written; // write at most n bytes + assertThat(n).isGreaterThanOrEqualTo(0); + if (n == 0) { + return written; + } + + final int remaining = src.remaining(); + final int adjustment = remaining <= n ? 0 : Math.toIntExact(remaining - n); + written += adjustedWrite(src, adjustment); + } + } + return written; } @Override @@ -59,7 +83,40 @@ public long write(ByteBuffer[] srcs) throws IOException { @Override public int write(ByteBuffer src) throws IOException { - return delegate.write(src); + final int remaining = src.remaining(); + if (remaining <= 0) { + return 0; + } + // Simulate partial write by a random adjustment. + final int adjustment = ThreadLocalRandom.current().nextInt(remaining + 1); + return adjustedWrite(src, adjustment); + } + + /** Simulate partial write by the given adjustment. */ + private int adjustedWrite(ByteBuffer src, int adjustment) throws IOException { + assertThat(adjustment).isGreaterThanOrEqualTo(0); + final int remaining = src.remaining(); + if (remaining <= 0) { + return 0; + } + assertThat(adjustment).isLessThanOrEqualTo(remaining); + + final int oldLimit = src.limit(); + final int newLimit = oldLimit - adjustment; + src.limit(newLimit); + assertEquals(newLimit, src.limit()); + final int toWrite = remaining - adjustment; + assertEquals(toWrite, src.remaining()); + + final int written = delegate.write(src); + assertEquals(newLimit, src.limit()); + assertEquals(toWrite - written, src.remaining()); + + src.limit(oldLimit); + assertEquals(oldLimit, src.limit()); + assertEquals(remaining - written, src.remaining()); + + return written; } @Override diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/common/TestChecksum.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/common/TestChecksum.java index 829f4bb150c..7ddb605c0f8 100644 --- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/common/TestChecksum.java +++ b/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/common/TestChecksum.java @@ -19,7 +19,10 @@ import org.apache.commons.lang3.RandomStringUtils; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; -import org.junit.jupiter.api.Test; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.ValueSource; + +import java.nio.ByteBuffer; import static java.nio.charset.StandardCharsets.UTF_8; import static org.junit.jupiter.api.Assertions.assertEquals; @@ -35,23 +38,25 @@ public class TestChecksum { private static final ContainerProtos.ChecksumType CHECKSUM_TYPE_DEFAULT = ContainerProtos.ChecksumType.SHA256; - private Checksum getChecksum(ContainerProtos.ChecksumType type) { + private Checksum getChecksum(ContainerProtos.ChecksumType type, boolean allowChecksumCache) { if (type == null) { type = CHECKSUM_TYPE_DEFAULT; } - return new Checksum(type, BYTES_PER_CHECKSUM); + return new Checksum(type, BYTES_PER_CHECKSUM, allowChecksumCache); } /** * Tests {@link Checksum#verifyChecksum(byte[], ChecksumData)}. */ - @Test - public void testVerifyChecksum() throws Exception { - Checksum checksum = getChecksum(null); + @ParameterizedTest + @ValueSource(booleans = {true, false}) + public void testVerifyChecksum(boolean useChecksumCache) throws Exception { + Checksum checksum = getChecksum(null, useChecksumCache); int dataLen = 55; byte[] data = RandomStringUtils.randomAlphabetic(dataLen).getBytes(UTF_8); + ByteBuffer byteBuffer = ByteBuffer.wrap(data); - ChecksumData checksumData = checksum.computeChecksum(data); + ChecksumData checksumData = checksum.computeChecksum(byteBuffer, useChecksumCache); // A checksum is calculate for each bytesPerChecksum number of bytes in // the data. Since that value is 10 here and the data length is 55, we @@ -65,11 +70,13 @@ public void testVerifyChecksum() throws Exception { /** * Tests that if data is modified, then the checksums should not match. */ - @Test - public void testIncorrectChecksum() throws Exception { - Checksum checksum = getChecksum(null); + @ParameterizedTest + @ValueSource(booleans = {true, false}) + public void testIncorrectChecksum(boolean useChecksumCache) throws Exception { + Checksum checksum = getChecksum(null, useChecksumCache); byte[] data = RandomStringUtils.randomAlphabetic(55).getBytes(UTF_8); - ChecksumData originalChecksumData = checksum.computeChecksum(data); + ByteBuffer byteBuffer = ByteBuffer.wrap(data); + ChecksumData originalChecksumData = checksum.computeChecksum(byteBuffer, useChecksumCache); // Change the data and check if new checksum matches the original checksum. // Modifying one byte of data should be enough for the checksum data to @@ -83,13 +90,14 @@ public void testIncorrectChecksum() throws Exception { * Tests that checksum calculated using two different checksumTypes should * not match. */ - @Test - public void testChecksumMismatchForDifferentChecksumTypes() { + @ParameterizedTest + @ValueSource(booleans = {true, false}) + public void testChecksumMismatchForDifferentChecksumTypes(boolean useChecksumCache) { // Checksum1 of type SHA-256 - Checksum checksum1 = getChecksum(null); + Checksum checksum1 = getChecksum(null, useChecksumCache); // Checksum2 of type CRC32 - Checksum checksum2 = getChecksum(ContainerProtos.ChecksumType.CRC32); + Checksum checksum2 = getChecksum(ContainerProtos.ChecksumType.CRC32, useChecksumCache); // The two checksums should not match as they have different types assertNotEquals(checksum1, checksum2, "Checksums should not match for different checksum types"); diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/common/TestChecksumCache.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/common/TestChecksumCache.java new file mode 100644 index 00000000000..49e0b75127a --- /dev/null +++ b/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/common/TestChecksumCache.java @@ -0,0 +1,75 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + *

    + * http://www.apache.org/licenses/LICENSE-2.0 + *

    + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ +package org.apache.hadoop.ozone.common; + +import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ChecksumType; +import org.apache.hadoop.ozone.common.Checksum.Algorithm; +import org.apache.ratis.thirdparty.com.google.protobuf.ByteString; +import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.EnumSource; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.nio.ByteBuffer; +import java.util.List; +import java.util.function.Function; + +/** + * Test class for {@link ChecksumCache}. + */ +class TestChecksumCache { + public static final Logger LOG = LoggerFactory.getLogger(TestChecksumCache.class); + + @ParameterizedTest + @EnumSource(ChecksumType.class) + void testComputeChecksum(ChecksumType checksumType) throws Exception { + final int bytesPerChecksum = 16; + ChecksumCache checksumCache = new ChecksumCache(bytesPerChecksum); + + final int size = 66; + byte[] byteArray = new byte[size]; + // Fill byteArray with bytes from 0 to 127 for deterministic testing + for (int i = 0; i < size; i++) { + byteArray[i] = (byte) (i % 128); + } + + final Function function = Algorithm.valueOf(checksumType).newChecksumFunction(); + + int iEnd = size / bytesPerChecksum + (size % bytesPerChecksum == 0 ? 0 : 1); + List lastRes = null; + for (int i = 0; i < iEnd; i++) { + int byteBufferLength = Integer.min(byteArray.length, bytesPerChecksum * (i + 1)); + ByteBuffer byteBuffer = ByteBuffer.wrap(byteArray, 0, byteBufferLength); + + try (ChunkBuffer chunkBuffer = ChunkBuffer.wrap(byteBuffer.asReadOnlyBuffer())) { + List res = checksumCache.computeChecksum(chunkBuffer, function); + System.out.println(res); + // Verify that every entry in the res list except the last one is the same as the one in lastRes list + if (i > 0) { + for (int j = 0; j < res.size() - 1; j++) { + Assertions.assertEquals(lastRes.get(j), res.get(j)); + } + } + lastRes = res; + } + } + + // Sanity check + checksumCache.clear(); + } +} diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/container/ContainerTestHelper.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/container/ContainerTestHelper.java index 2b7592e1c35..20372dcc6ea 100644 --- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/container/ContainerTestHelper.java +++ b/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/container/ContainerTestHelper.java @@ -293,18 +293,31 @@ public static Builder newReadChunkRequestBuilder(Pipeline pipeline, */ public static ContainerCommandRequestProto getCreateContainerRequest( long containerID, Pipeline pipeline) throws IOException { + return getCreateContainerRequest(containerID, pipeline, ContainerProtos.ContainerDataProto.State.OPEN); + } + + + /** + * Returns a create container command for test purposes. There are a bunch of + * tests where we need to just send a request and get a reply. + * + * @return ContainerCommandRequestProto. + */ + public static ContainerCommandRequestProto getCreateContainerRequest( + long containerID, Pipeline pipeline, ContainerProtos.ContainerDataProto.State state) throws IOException { LOG.trace("addContainer: {}", containerID); - return getContainerCommandRequestBuilder(containerID, pipeline).build(); + return getContainerCommandRequestBuilder(containerID, pipeline, state) + .build(); } private static Builder getContainerCommandRequestBuilder(long containerID, - Pipeline pipeline) throws IOException { + Pipeline pipeline, ContainerProtos.ContainerDataProto.State state) throws IOException { Builder request = ContainerCommandRequestProto.newBuilder(); request.setCmdType(ContainerProtos.Type.CreateContainer); request.setContainerID(containerID); request.setCreateContainer( - ContainerProtos.CreateContainerRequestProto.getDefaultInstance()); + ContainerProtos.CreateContainerRequestProto.getDefaultInstance().toBuilder().setState(state).build()); request.setDatanodeUuid(pipeline.getFirstNode().getUuidString()); return request; @@ -320,7 +333,8 @@ public static ContainerCommandRequestProto getCreateContainerSecureRequest( long containerID, Pipeline pipeline, Token token) throws IOException { LOG.trace("addContainer: {}", containerID); - Builder request = getContainerCommandRequestBuilder(containerID, pipeline); + Builder request = getContainerCommandRequestBuilder(containerID, pipeline, + ContainerProtos.ContainerDataProto.State.OPEN); if (token != null) { request.setEncodedToken(token.encodeToUrlString()); } diff --git a/hadoop-hdds/config/pom.xml b/hadoop-hdds/config/pom.xml index 1c71bf3d90a..60c63475ae3 100644 --- a/hadoop-hdds/config/pom.xml +++ b/hadoop-hdds/config/pom.xml @@ -20,10 +20,10 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> org.apache.ozone hdds - 1.5.0-SNAPSHOT + 2.0.0-SNAPSHOT hdds-config - 1.5.0-SNAPSHOT + 2.0.0-SNAPSHOT Apache Ozone Distributed Data Store Config Tools Apache Ozone HDDS Config jar diff --git a/hadoop-hdds/container-service/pom.xml b/hadoop-hdds/container-service/pom.xml index d73bea95895..c21ca8203b5 100644 --- a/hadoop-hdds/container-service/pom.xml +++ b/hadoop-hdds/container-service/pom.xml @@ -20,10 +20,10 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> org.apache.ozone hdds - 1.5.0-SNAPSHOT + 2.0.0-SNAPSHOT hdds-container-service - 1.5.0-SNAPSHOT + 2.0.0-SNAPSHOT Apache Ozone Distributed Data Store Container Service Apache Ozone HDDS Container Service jar diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/DNMXBean.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/DNMXBean.java index d36fcdb6fc7..9c077a8e27b 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/DNMXBean.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/DNMXBean.java @@ -26,4 +26,32 @@ */ @InterfaceAudience.Private public interface DNMXBean extends ServiceRuntimeInfo { + + /** + * Gets the datanode hostname. + * + * @return the datanode hostname for the datanode. + */ + String getHostname(); + + /** + * Gets the client rpc port. + * + * @return the client rpc port + */ + String getClientRpcPort(); + + /** + * Gets the http port. + * + * @return the http port + */ + String getHttpPort(); + + /** + * Gets the https port. + * + * @return the http port + */ + String getHttpsPort(); } diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/DNMXBeanImpl.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/DNMXBeanImpl.java index f7b484c6bb3..5a0a4556636 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/DNMXBeanImpl.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/DNMXBeanImpl.java @@ -25,8 +25,53 @@ * This is the JMX management class for DN information. */ public class DNMXBeanImpl extends ServiceRuntimeInfoImpl implements DNMXBean { - public DNMXBeanImpl( - VersionInfo versionInfo) { + + private String hostName; + private String clientRpcPort; + private String httpPort; + private String httpsPort; + + public DNMXBeanImpl(VersionInfo versionInfo) { super(versionInfo); } + + @Override + public String getHostname() { + return hostName; + } + + @Override + public String getClientRpcPort() { + return clientRpcPort; + } + + @Override + public String getHttpPort() { + return httpPort; + } + + @Override + public String getHttpsPort() { + return httpsPort; + } + + public void setHttpPort(String httpPort) { + this.httpPort = httpPort; + } + + public void setHostName(String hostName) { + this.hostName = hostName; + } + + public void setClientRpcPort(String rpcPort) { + this.clientRpcPort = rpcPort; + } + + public String getHostName() { + return hostName; + } + + public void setHttpsPort(String httpsPort) { + this.httpsPort = httpsPort; + } } diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/HddsDatanodeService.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/HddsDatanodeService.java index 6b32b74dc7c..de21e37503a 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/HddsDatanodeService.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/HddsDatanodeService.java @@ -40,6 +40,7 @@ import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.protocol.SecretKeyProtocol; import org.apache.hadoop.hdds.protocolPB.SCMSecurityProtocolClientSideTranslatorPB; +import org.apache.hadoop.hdds.scm.ha.SCMHAUtils; import org.apache.hadoop.hdds.security.SecurityConfig; import org.apache.hadoop.hdds.security.symmetric.DefaultSecretKeyClient; import org.apache.hadoop.hdds.security.symmetric.SecretKeyClient; @@ -116,8 +117,7 @@ public class HddsDatanodeService extends GenericCli implements ServicePlugin { private final Map ratisMetricsMap = new ConcurrentHashMap<>(); private List ratisReporterList = null; - private DNMXBeanImpl serviceRuntimeInfo = - new DNMXBeanImpl(HddsVersionInfo.HDDS_VERSION_INFO) { }; + private DNMXBeanImpl serviceRuntimeInfo; private ObjectName dnInfoBeanName; private HddsDatanodeClientProtocolServer clientProtocolServer; private OzoneAdmins admins; @@ -210,6 +210,12 @@ public void start(OzoneConfiguration configuration) { } public void start() { + serviceRuntimeInfo = new DNMXBeanImpl(HddsVersionInfo.HDDS_VERSION_INFO) { + @Override + public String getNamespace() { + return SCMHAUtils.getScmServiceId(conf); + } + }; serviceRuntimeInfo.setStartTime(); ratisReporterList = RatisDropwizardExports @@ -222,13 +228,13 @@ public void start() { String ip = InetAddress.getByName(hostname).getHostAddress(); datanodeDetails = initializeDatanodeDetails(); datanodeDetails.setHostName(hostname); + serviceRuntimeInfo.setHostName(hostname); datanodeDetails.setIpAddress(ip); datanodeDetails.setVersion( HddsVersionInfo.HDDS_VERSION_INFO.getVersion()); datanodeDetails.setSetupTime(Time.now()); datanodeDetails.setRevision( HddsVersionInfo.HDDS_VERSION_INFO.getRevision()); - datanodeDetails.setBuildDate(HddsVersionInfo.HDDS_VERSION_INFO.getDate()); TracingUtil.initTracing( "HddsDatanodeService." + datanodeDetails.getUuidString() .substring(0, 8), conf); @@ -295,23 +301,30 @@ public void start() { httpServer = new HddsDatanodeHttpServer(conf); httpServer.start(); HttpConfig.Policy policy = HttpConfig.getHttpPolicy(conf); + if (policy.isHttpEnabled()) { - datanodeDetails.setPort(DatanodeDetails.newPort(HTTP, - httpServer.getHttpAddress().getPort())); + int httpPort = httpServer.getHttpAddress().getPort(); + datanodeDetails.setPort(DatanodeDetails.newPort(HTTP, httpPort)); + serviceRuntimeInfo.setHttpPort(String.valueOf(httpPort)); } + if (policy.isHttpsEnabled()) { - datanodeDetails.setPort(DatanodeDetails.newPort(HTTPS, - httpServer.getHttpsAddress().getPort())); + int httpsPort = httpServer.getHttpAddress().getPort(); + datanodeDetails.setPort(DatanodeDetails.newPort(HTTPS, httpsPort)); + serviceRuntimeInfo.setHttpsPort(String.valueOf(httpsPort)); } + } catch (Exception ex) { LOG.error("HttpServer failed to start.", ex); } - clientProtocolServer = new HddsDatanodeClientProtocolServer( datanodeDetails, conf, HddsVersionInfo.HDDS_VERSION_INFO, reconfigurationHandler); + int clientRpcport = clientProtocolServer.getClientRpcAddress().getPort(); + serviceRuntimeInfo.setClientRpcPort(String.valueOf(clientRpcport)); + // Get admin list String starterUser = UserGroupInformation.getCurrentUser().getShortUserName(); diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/HddsPolicyProvider.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/HddsPolicyProvider.java index eeed4fab5f7..52217ce7f83 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/HddsPolicyProvider.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/HddsPolicyProvider.java @@ -24,7 +24,7 @@ import org.apache.hadoop.security.authorize.Service; import org.apache.ratis.util.MemoizedSupplier; -import java.util.Arrays; +import java.util.Collections; import java.util.List; import java.util.function.Supplier; @@ -50,7 +50,7 @@ public static HddsPolicyProvider getInstance() { } private static final List DN_SERVICES = - Arrays.asList( + Collections.singletonList( new Service( OZONE_SECURITY_RECONFIGURE_PROTOCOL_ACL, ReconfigureProtocol.class) diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/CommandHandlerMetrics.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/CommandHandlerMetrics.java index a6e4d6258d9..e52565952a5 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/CommandHandlerMetrics.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/CommandHandlerMetrics.java @@ -34,6 +34,7 @@ import static org.apache.hadoop.ozone.container.common.helpers.CommandHandlerMetrics.CommandMetricsMetricsInfo.TotalRunTimeMs; import static org.apache.hadoop.ozone.container.common.helpers.CommandHandlerMetrics.CommandMetricsMetricsInfo.QueueWaitingTaskCount; import static org.apache.hadoop.ozone.container.common.helpers.CommandHandlerMetrics.CommandMetricsMetricsInfo.InvocationCount; +import static org.apache.hadoop.ozone.container.common.helpers.CommandHandlerMetrics.CommandMetricsMetricsInfo.AvgRunTimeMs; import static org.apache.hadoop.ozone.container.common.helpers.CommandHandlerMetrics.CommandMetricsMetricsInfo.ThreadPoolActivePoolSize; import static org.apache.hadoop.ozone.container.common.helpers.CommandHandlerMetrics.CommandMetricsMetricsInfo.ThreadPoolMaxPoolSize; import static org.apache.hadoop.ozone.container.common.helpers.CommandHandlerMetrics.CommandMetricsMetricsInfo.CommandReceivedCount; @@ -46,6 +47,7 @@ public final class CommandHandlerMetrics implements MetricsSource { enum CommandMetricsMetricsInfo implements MetricsInfo { Command("The type of the SCM command"), TotalRunTimeMs("The total runtime of the command handler in milliseconds"), + AvgRunTimeMs("Average run time of the command handler in milliseconds"), QueueWaitingTaskCount("The number of queued tasks waiting for execution"), InvocationCount("The number of times the command handler has been invoked"), ThreadPoolActivePoolSize("The number of active threads in the thread pool"), @@ -108,6 +110,7 @@ public void getMetrics(MetricsCollector collector, boolean all) { commandHandler.getCommandType().name()); builder.addGauge(TotalRunTimeMs, commandHandler.getTotalRunTime()); + builder.addGauge(AvgRunTimeMs, commandHandler.getAverageRunTime()); builder.addGauge(QueueWaitingTaskCount, commandHandler.getQueuedCount()); builder.addGauge(InvocationCount, commandHandler.getInvocationCount()); int activePoolSize = commandHandler.getThreadPoolActivePoolSize(); diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerMetrics.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerMetrics.java index 91bdb17cda9..03dbce061bb 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerMetrics.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerMetrics.java @@ -30,7 +30,9 @@ import org.apache.hadoop.metrics2.lib.MutableCounterLong; import org.apache.hadoop.metrics2.lib.MutableQuantiles; import org.apache.hadoop.metrics2.lib.MutableRate; +import org.apache.hadoop.ozone.util.MetricUtil; +import java.io.Closeable; import java.util.EnumMap; /** @@ -47,7 +49,7 @@ */ @InterfaceAudience.Private @Metrics(about = "Storage Container DataNode Metrics", context = "dfs") -public class ContainerMetrics { +public class ContainerMetrics implements Closeable { public static final String STORAGE_CONTAINER_METRICS = "StorageContainerMetrics"; @Metric private MutableCounterLong numOps; @@ -109,6 +111,11 @@ public static void remove() { ms.unregisterSource(STORAGE_CONTAINER_METRICS); } + @Override + public void close() { + opsLatQuantiles.values().forEach(MetricUtil::stop); + } + public void incContainerOpsMetrics(ContainerProtos.Type type) { numOps.incr(); numOpsArray.get(type).incr(); diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerSet.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerSet.java index 15cc6245ddb..8dd35064e6b 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerSet.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerSet.java @@ -23,8 +23,12 @@ import com.google.common.collect.ImmutableMap; import com.google.protobuf.Message; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; +import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerDataProto.State; + import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerReportsProto; import org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException; +import org.apache.hadoop.hdds.utils.db.InMemoryTestTable; +import org.apache.hadoop.hdds.utils.db.Table; import org.apache.hadoop.ozone.container.common.interfaces.Container; import org.apache.hadoop.ozone.container.common.statemachine.StateContext; import org.apache.hadoop.ozone.container.common.utils.ContainerLogger; @@ -65,10 +69,24 @@ public class ContainerSet implements Iterable> { new ConcurrentSkipListMap<>(); private Clock clock; private long recoveringTimeout; + private final Table containerIdsTable; + @VisibleForTesting public ContainerSet(long recoveringTimeout) { + this(new InMemoryTestTable<>(), recoveringTimeout); + } + + public ContainerSet(Table continerIdsTable, long recoveringTimeout) { + this(continerIdsTable, recoveringTimeout, false); + } + + public ContainerSet(Table continerIdsTable, long recoveringTimeout, boolean readOnly) { this.clock = Clock.system(ZoneOffset.UTC); + this.containerIdsTable = continerIdsTable; this.recoveringTimeout = recoveringTimeout; + if (!readOnly && containerIdsTable == null) { + throw new IllegalArgumentException("Container table cannot be null when container set is not read only"); + } } public long getCurrentTime() { @@ -85,22 +103,64 @@ public void setRecoveringTimeout(long recoveringTimeout) { this.recoveringTimeout = recoveringTimeout; } + /** + * Add Container to container map. This would fail if the container is already present or has been marked as missing. + * @param container container to be added + * @return If container is added to containerMap returns true, otherwise + * false + */ + public boolean addContainer(Container container) throws StorageContainerException { + return addContainer(container, false); + } + + /** + * Add Container to container map. This would overwrite the container even if it is missing. But would fail if the + * container is already present. + * @param container container to be added + * @return If container is added to containerMap returns true, otherwise + * false + */ + public boolean addContainerByOverwriteMissingContainer(Container container) throws StorageContainerException { + return addContainer(container, true); + } + + public void ensureContainerNotMissing(long containerId, State state) throws StorageContainerException { + if (missingContainerSet.contains(containerId)) { + throw new StorageContainerException(String.format("Container with container Id %d with state : %s is missing in" + + " the DN.", containerId, state), + ContainerProtos.Result.CONTAINER_MISSING); + } + } + /** * Add Container to container map. * @param container container to be added + * @param overwrite if true should overwrite the container if the container was missing. * @return If container is added to containerMap returns true, otherwise * false */ - public boolean addContainer(Container container) throws + private boolean addContainer(Container container, boolean overwrite) throws StorageContainerException { Preconditions.checkNotNull(container, "container cannot be null"); long containerId = container.getContainerData().getContainerID(); + State containerState = container.getContainerData().getState(); + if (!overwrite) { + ensureContainerNotMissing(containerId, containerState); + } if (containerMap.putIfAbsent(containerId, container) == null) { if (LOG.isDebugEnabled()) { LOG.debug("Container with container Id {} is added to containerMap", containerId); } + try { + if (containerIdsTable != null) { + containerIdsTable.put(containerId, containerState.toString()); + } + } catch (IOException e) { + throw new StorageContainerException(e, ContainerProtos.Result.IO_EXCEPTION); + } + missingContainerSet.remove(containerId); // wish we could have done this from ContainerData.setState container.getContainerData().commitSpace(); if (container.getContainerData().getState() == RECOVERING) { @@ -122,21 +182,69 @@ public boolean addContainer(Container container) throws * @return Container */ public Container getContainer(long containerId) { - Preconditions.checkState(containerId >= 0, - "Container Id cannot be negative."); + Preconditions.checkState(containerId >= 0, "Container Id cannot be negative."); return containerMap.get(containerId); } + /** + * Removes container from both memory and database. This should be used when the containerData on disk has been + * removed completely from the node. + * @param containerId + * @return True if container is removed from containerMap. + * @throws StorageContainerException + */ + public boolean removeContainer(long containerId) throws StorageContainerException { + return removeContainer(containerId, false, true); + } + + /** + * Removes containerId from memory. This needs to be used when the container is still present on disk, and the + * inmemory state of the container needs to be updated. + * @param containerId + * @return True if container is removed from containerMap. + * @throws StorageContainerException + */ + public boolean removeContainerOnlyFromMemory(long containerId) throws StorageContainerException { + return removeContainer(containerId, false, false); + } + + /** + * Marks a container to be missing, thus it removes the container from inmemory containerMap and marks the + * container as missing. + * @param containerId + * @return True if container is removed from containerMap. + * @throws StorageContainerException + */ + public boolean removeMissingContainer(long containerId) throws StorageContainerException { + return removeContainer(containerId, true, false); + } + /** * Removes the Container matching with specified containerId. * @param containerId ID of the container to remove * @return If container is removed from containerMap returns true, otherwise * false */ - public boolean removeContainer(long containerId) { + private boolean removeContainer(long containerId, boolean markMissing, boolean removeFromDB) + throws StorageContainerException { Preconditions.checkState(containerId >= 0, "Container Id cannot be negative."); + //We need to add to missing container set before removing containerMap since there could be write chunk operation + // that could recreate the container in another volume if we remove it from the map before adding to missing + // container. + if (markMissing) { + missingContainerSet.add(containerId); + } Container removed = containerMap.remove(containerId); + if (removeFromDB) { + try { + if (containerIdsTable != null) { + containerIdsTable.delete(containerId); + } + } catch (IOException e) { + throw new StorageContainerException(e, ContainerProtos.Result.IO_EXCEPTION); + } + } if (removed == null) { LOG.debug("Container with containerId {} is not present in " + "containerMap", containerId); @@ -190,20 +298,20 @@ public int containerCount() { * * @param context StateContext */ - public void handleVolumeFailures(StateContext context) { + public void handleVolumeFailures(StateContext context) throws StorageContainerException { AtomicBoolean failedVolume = new AtomicBoolean(false); AtomicInteger containerCount = new AtomicInteger(0); - containerMap.values().forEach(c -> { + for (Container c : containerMap.values()) { ContainerData data = c.getContainerData(); if (data.getVolume().isFailed()) { - removeContainer(data.getContainerID()); + removeMissingContainer(data.getContainerID()); LOG.debug("Removing Container {} as the Volume {} " + - "has failed", data.getContainerID(), data.getVolume()); + "has failed", data.getContainerID(), data.getVolume()); failedVolume.set(true); containerCount.incrementAndGet(); ContainerLogger.logLost(data, "Volume failure"); } - }); + } if (failedVolume.get()) { try { @@ -251,6 +359,21 @@ public Iterator> getContainerIterator(HddsVolume volume) { .iterator(); } + /** + * Get the number of containers based on the given volume. + * + * @param volume hdds volume. + * @return number of containers + */ + public long containerCount(HddsVolume volume) { + Preconditions.checkNotNull(volume); + Preconditions.checkNotNull(volume.getStorageID()); + String volumeUuid = volume.getStorageID(); + return containerMap.values().stream() + .filter(x -> volumeUuid.equals(x.getContainerData().getVolume() + .getStorageID())).count(); + } + /** * Return an containerMap iterator over {@link ContainerSet#containerMap}. * @return containerMap Iterator @@ -347,6 +470,10 @@ public Set getMissingContainerSet() { return missingContainerSet; } + public Table getContainerIdsTable() { + return containerIdsTable; + } + /** * Builds the missing container set by taking a diff between total no * containers actually found and number of containers which actually diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/HddsDispatcher.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/HddsDispatcher.java index 28aa3d8588f..cd99b909231 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/HddsDispatcher.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/HddsDispatcher.java @@ -177,7 +177,8 @@ private boolean canIgnoreException(Result result) { case CONTAINER_UNHEALTHY: case CLOSED_CONTAINER_IO: case DELETE_ON_OPEN_CONTAINER: - case UNSUPPORTED_REQUEST: // Blame client for sending unsupported request. + case UNSUPPORTED_REQUEST:// Blame client for sending unsupported request. + case CONTAINER_MISSING: return true; default: return false; @@ -278,7 +279,8 @@ private ContainerCommandResponseProto dispatchRequest( getMissingContainerSet().remove(containerID); } } - if (getMissingContainerSet().contains(containerID)) { + if (cmdType != Type.CreateContainer && !HddsUtils.isReadOnly(msg) + && getMissingContainerSet().contains(containerID)) { StorageContainerException sce = new StorageContainerException( "ContainerID " + containerID + " has been lost and cannot be recreated on this DataNode", diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/Handler.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/Handler.java index 77a4d97878d..fb9dc49071b 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/Handler.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/Handler.java @@ -23,6 +23,7 @@ import java.io.OutputStream; import java.util.Set; +import com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.hdds.conf.ConfigurationSource; import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; @@ -96,7 +97,8 @@ public abstract StateMachine.DataChannel getStreamDataChannel( * * @return datanode Id */ - protected String getDatanodeId() { + @VisibleForTesting + public String getDatanodeId() { return datanodeId; } diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/EndpointStateMachine.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/EndpointStateMachine.java index a6c3b11de92..b3854e7ecd2 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/EndpointStateMachine.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/EndpointStateMachine.java @@ -234,12 +234,17 @@ public void logIfNeeded(Exception ex) { } if (missCounter == 0) { + long missedDurationSeconds = TimeUnit.MILLISECONDS.toSeconds( + this.getMissedCount() * getScmHeartbeatInterval(this.conf) + ); LOG.warn( - "Unable to communicate to {} server at {} for past {} seconds.", - serverName, - getAddress().getHostString() + ":" + getAddress().getPort(), - TimeUnit.MILLISECONDS.toSeconds(this.getMissedCount() * - getScmHeartbeatInterval(this.conf)), ex); + "Unable to communicate to {} server at {}:{} for past {} seconds.", + serverName, + address.getAddress(), + address.getPort(), + missedDurationSeconds, + ex + ); } if (LOG.isTraceEnabled()) { diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/CloseContainerCommandHandler.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/CloseContainerCommandHandler.java index bc703ac6a55..cd032d4b275 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/CloseContainerCommandHandler.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/CloseContainerCommandHandler.java @@ -31,6 +31,8 @@ import org.apache.hadoop.hdds.protocol.proto .StorageContainerDatanodeProtocolProtos.CloseContainerCommandProto; import org.apache.hadoop.hdds.tracing.TracingUtil; +import org.apache.hadoop.metrics2.lib.MetricsRegistry; +import org.apache.hadoop.metrics2.lib.MutableRate; import org.apache.hadoop.ozone.container.common.interfaces.Container; import org.apache.hadoop.ozone.container.common.statemachine .SCMConnectionManager; @@ -58,7 +60,7 @@ public class CloseContainerCommandHandler implements CommandHandler { private final AtomicLong invocationCount = new AtomicLong(0); private final AtomicInteger queuedCount = new AtomicInteger(0); private final ThreadPoolExecutor executor; - private long totalTime; + private final MutableRate opsLatencyMs; /** * Constructs a close container command handler. @@ -72,6 +74,9 @@ public CloseContainerCommandHandler( new ThreadFactoryBuilder() .setNameFormat(threadNamePrefix + "CloseContainerThread-%d") .build()); + MetricsRegistry registry = new MetricsRegistry( + CloseContainerCommandHandler.class.getSimpleName()); + this.opsLatencyMs = registry.newRate(SCMCommandProto.Type.closeContainerCommand + "Ms"); } /** @@ -155,7 +160,7 @@ public void handle(SCMCommand command, OzoneContainer ozoneContainer, LOG.error("Can't close container #{}", containerId, e); } finally { long endTime = Time.monotonicNow(); - totalTime += endTime - startTime; + this.opsLatencyMs.add(endTime - startTime); } }, executor).whenComplete((v, e) -> queuedCount.decrementAndGet()); } @@ -204,15 +209,12 @@ public int getInvocationCount() { */ @Override public long getAverageRunTime() { - if (invocationCount.get() > 0) { - return totalTime / invocationCount.get(); - } - return 0; + return (long) this.opsLatencyMs.lastStat().mean(); } @Override public long getTotalRunTime() { - return totalTime; + return (long) this.opsLatencyMs.lastStat().total(); } @Override diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/ClosePipelineCommandHandler.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/ClosePipelineCommandHandler.java index 241abb6f4ae..be39277fdfa 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/ClosePipelineCommandHandler.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/ClosePipelineCommandHandler.java @@ -24,6 +24,8 @@ import org.apache.hadoop.hdds.ratis.RatisHelper; import org.apache.hadoop.hdds.scm.client.HddsClientUtils; import org.apache.hadoop.hdds.scm.pipeline.PipelineID; +import org.apache.hadoop.metrics2.lib.MetricsRegistry; +import org.apache.hadoop.metrics2.lib.MutableRate; import org.apache.hadoop.ozone.container.common.statemachine .SCMConnectionManager; import org.apache.hadoop.ozone.container.common.statemachine.StateContext; @@ -60,9 +62,9 @@ public class ClosePipelineCommandHandler implements CommandHandler { private final AtomicLong invocationCount = new AtomicLong(0); private final AtomicInteger queuedCount = new AtomicInteger(0); - private long totalTime; private final Executor executor; private final BiFunction newRaftClient; + private final MutableRate opsLatencyMs; /** * Constructs a closePipelineCommand handler. @@ -80,6 +82,9 @@ public ClosePipelineCommandHandler( Executor executor) { this.newRaftClient = newRaftClient; this.executor = executor; + MetricsRegistry registry = new MetricsRegistry( + ClosePipelineCommandHandler.class.getSimpleName()); + this.opsLatencyMs = registry.newRate(SCMCommandProto.Type.closePipelineCommand + "Ms"); } /** @@ -155,7 +160,7 @@ public void handle(SCMCommand command, OzoneContainer ozoneContainer, } } finally { long endTime = Time.monotonicNow(); - totalTime += endTime - startTime; + this.opsLatencyMs.add(endTime - startTime); } }, executor).whenComplete((v, e) -> queuedCount.decrementAndGet()); } @@ -187,15 +192,12 @@ public int getInvocationCount() { */ @Override public long getAverageRunTime() { - if (invocationCount.get() > 0) { - return totalTime / invocationCount.get(); - } - return 0; + return (long) this.opsLatencyMs.lastStat().mean(); } @Override public long getTotalRunTime() { - return totalTime; + return (long) this.opsLatencyMs.lastStat().total(); } @Override diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/CreatePipelineCommandHandler.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/CreatePipelineCommandHandler.java index 4a36a1987de..62fc8a919d8 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/CreatePipelineCommandHandler.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/CreatePipelineCommandHandler.java @@ -30,6 +30,8 @@ import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMCommandProto; import org.apache.hadoop.hdds.ratis.RatisHelper; import org.apache.hadoop.hdds.scm.pipeline.PipelineID; +import org.apache.hadoop.metrics2.lib.MetricsRegistry; +import org.apache.hadoop.metrics2.lib.MutableRate; import org.apache.hadoop.ozone.container.common.statemachine.SCMConnectionManager; import org.apache.hadoop.ozone.container.common.statemachine.StateContext; import org.apache.hadoop.ozone.container.common.transport.server.XceiverServerSpi; @@ -59,8 +61,8 @@ public class CreatePipelineCommandHandler implements CommandHandler { private final AtomicInteger queuedCount = new AtomicInteger(0); private final BiFunction newRaftClient; - private long totalTime; private final Executor executor; + private final MutableRate opsLatencyMs; /** * Constructs a createPipelineCommand handler. @@ -75,6 +77,9 @@ public CreatePipelineCommandHandler(ConfigurationSource conf, Executor executor) { this.newRaftClient = newRaftClient; this.executor = executor; + MetricsRegistry registry = new MetricsRegistry( + CreatePipelineCommandHandler.class.getSimpleName()); + this.opsLatencyMs = registry.newRate(SCMCommandProto.Type.createPipelineCommand + "Ms"); } /** @@ -135,7 +140,7 @@ public void handle(SCMCommand command, OzoneContainer ozoneContainer, } } finally { long endTime = Time.monotonicNow(); - totalTime += endTime - startTime; + this.opsLatencyMs.add(endTime - startTime); } }, executor).whenComplete((v, e) -> queuedCount.decrementAndGet()); } @@ -167,15 +172,12 @@ public int getInvocationCount() { */ @Override public long getAverageRunTime() { - if (invocationCount.get() > 0) { - return totalTime / invocationCount.get(); - } - return 0; + return (long) this.opsLatencyMs.lastStat().mean(); } @Override public long getTotalRunTime() { - return totalTime; + return (long) this.opsLatencyMs.lastStat().total(); } @Override diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/DeleteBlocksCommandHandler.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/DeleteBlocksCommandHandler.java index bd7431c6145..136c5805821 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/DeleteBlocksCommandHandler.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/DeleteBlocksCommandHandler.java @@ -32,6 +32,8 @@ import org.apache.hadoop.hdds.protocol.proto .StorageContainerDatanodeProtocolProtos.DeletedBlocksTransaction; import org.apache.hadoop.hdds.utils.db.Table; +import org.apache.hadoop.metrics2.lib.MetricsRegistry; +import org.apache.hadoop.metrics2.lib.MutableRate; import org.apache.hadoop.ozone.container.common.helpers.BlockData; import org.apache.hadoop.ozone.container.common.helpers.BlockDeletingServiceMetrics; import org.apache.hadoop.ozone.container.common.helpers.ChunkInfoList; @@ -91,7 +93,6 @@ public class DeleteBlocksCommandHandler implements CommandHandler { private final ContainerSet containerSet; private final ConfigurationSource conf; private int invocationCount; - private long totalTime; private final ThreadPoolExecutor executor; private final LinkedBlockingQueue deleteCommandQueues; private final Daemon handlerThread; @@ -99,6 +100,7 @@ public class DeleteBlocksCommandHandler implements CommandHandler { private final BlockDeletingServiceMetrics blockDeleteMetrics; private final long tryLockTimeoutMs; private final Map schemaHandlers; + private final MutableRate opsLatencyMs; public DeleteBlocksCommandHandler(OzoneContainer container, ConfigurationSource conf, DatanodeConfiguration dnConf, @@ -121,6 +123,9 @@ public DeleteBlocksCommandHandler(OzoneContainer container, dnConf.getBlockDeleteThreads(), threadFactory); this.deleteCommandQueues = new LinkedBlockingQueue<>(dnConf.getBlockDeleteQueueLimit()); + MetricsRegistry registry = new MetricsRegistry( + DeleteBlocksCommandHandler.class.getSimpleName()); + this.opsLatencyMs = registry.newRate(SCMCommandProto.Type.deleteBlocksCommand + "Ms"); long interval = dnConf.getBlockDeleteCommandWorkerInterval().toMillis(); handlerThread = new Daemon(new DeleteCmdWorker(interval)); handlerThread.start(); @@ -354,10 +359,11 @@ private void processCmd(DeleteCmdInfo cmd) { DeletedContainerBlocksSummary summary = DeletedContainerBlocksSummary.getFrom(containerBlocks); LOG.info("Summary of deleting container blocks, numOfTransactions={}, " - + "numOfContainers={}, numOfBlocks={}", + + "numOfContainers={}, numOfBlocks={}, commandId={}.", summary.getNumOfTxs(), summary.getNumOfContainers(), - summary.getNumOfBlocks()); + summary.getNumOfBlocks(), + cmd.getCmd().getId()); if (LOG.isDebugEnabled()) { LOG.debug("Start to delete container blocks, TXIDs={}", summary.getTxIDSummary()); @@ -384,7 +390,8 @@ private void processCmd(DeleteCmdInfo cmd) { LOG.debug("Sending following block deletion ACK to SCM"); for (DeleteBlockTransactionResult result : blockDeletionACK .getResultsList()) { - LOG.debug("{} : {}", result.getTxID(), result.getSuccess()); + LOG.debug("TxId = {} : ContainerId = {} : {}", + result.getTxID(), result.getContainerID(), result.getSuccess()); } } } @@ -403,7 +410,7 @@ private void processCmd(DeleteCmdInfo cmd) { }; updateCommandStatus(cmd.getContext(), cmd.getCmd(), statusUpdater, LOG); long endTime = Time.monotonicNow(); - totalTime += endTime - startTime; + this.opsLatencyMs.add(endTime - startTime); invocationCount++; } } @@ -666,15 +673,12 @@ public int getInvocationCount() { @Override public long getAverageRunTime() { - if (invocationCount > 0) { - return totalTime / invocationCount; - } - return 0; + return (long) this.opsLatencyMs.lastStat().mean(); } @Override public long getTotalRunTime() { - return totalTime; + return (long) this.opsLatencyMs.lastStat().total(); } @Override diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/DeleteContainerCommandHandler.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/DeleteContainerCommandHandler.java index b76e306e1c0..59aaacc1c80 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/DeleteContainerCommandHandler.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/DeleteContainerCommandHandler.java @@ -22,6 +22,8 @@ import java.util.concurrent.RejectedExecutionException; import org.apache.hadoop.hdds.protocol.proto .StorageContainerDatanodeProtocolProtos.SCMCommandProto; +import org.apache.hadoop.metrics2.lib.MetricsRegistry; +import org.apache.hadoop.metrics2.lib.MutableRate; import org.apache.hadoop.ozone.container.common.statemachine .SCMConnectionManager; import org.apache.hadoop.ozone.container.common.statemachine.StateContext; @@ -39,7 +41,6 @@ import java.util.concurrent.ThreadPoolExecutor; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicInteger; -import java.util.concurrent.atomic.AtomicLong; /** * Handler to process the DeleteContainerCommand from SCM. @@ -51,10 +52,10 @@ public class DeleteContainerCommandHandler implements CommandHandler { private final AtomicInteger invocationCount = new AtomicInteger(0); private final AtomicInteger timeoutCount = new AtomicInteger(0); - private final AtomicLong totalTime = new AtomicLong(0); private final ThreadPoolExecutor executor; private final Clock clock; private int maxQueueSize; + private final MutableRate opsLatencyMs; public DeleteContainerCommandHandler( int threadPoolSize, Clock clock, int queueSize, String threadNamePrefix) { @@ -73,6 +74,9 @@ protected DeleteContainerCommandHandler(Clock clock, this.executor = executor; this.clock = clock; maxQueueSize = queueSize; + MetricsRegistry registry = new MetricsRegistry( + DeleteContainerCommandHandler.class.getSimpleName()); + this.opsLatencyMs = registry.newRate(SCMCommandProto.Type.deleteContainerCommand + "Ms"); } @Override public void handle(final SCMCommand command, @@ -124,7 +128,7 @@ private void handleInternal(SCMCommand command, StateContext context, } catch (IOException e) { LOG.error("Exception occurred while deleting the container.", e); } finally { - totalTime.getAndAdd(Time.monotonicNow() - startTime); + this.opsLatencyMs.add(Time.monotonicNow() - startTime); } } @@ -149,14 +153,12 @@ public int getTimeoutCount() { @Override public long getAverageRunTime() { - final int invocations = invocationCount.get(); - return invocations == 0 ? - 0 : totalTime.get() / invocations; + return (long) this.opsLatencyMs.lastStat().mean(); } @Override public long getTotalRunTime() { - return totalTime.get(); + return (long) this.opsLatencyMs.lastStat().total(); } @Override diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/FinalizeNewLayoutVersionCommandHandler.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/FinalizeNewLayoutVersionCommandHandler.java index bd7ec5710d9..77e152447b9 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/FinalizeNewLayoutVersionCommandHandler.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/FinalizeNewLayoutVersionCommandHandler.java @@ -20,6 +20,8 @@ import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.FinalizeNewLayoutVersionCommandProto; import org.apache.hadoop.hdds.protocol.proto .StorageContainerDatanodeProtocolProtos.SCMCommandProto; +import org.apache.hadoop.metrics2.lib.MetricsRegistry; +import org.apache.hadoop.metrics2.lib.MutableRate; import org.apache.hadoop.ozone.container.common.statemachine.DatanodeStateMachine; import org.apache.hadoop.ozone.container.common.statemachine .SCMConnectionManager; @@ -42,12 +44,15 @@ public class FinalizeNewLayoutVersionCommandHandler implements CommandHandler { LoggerFactory.getLogger(FinalizeNewLayoutVersionCommandHandler.class); private AtomicLong invocationCount = new AtomicLong(0); - private long totalTime; + private final MutableRate opsLatencyMs; /** * Constructs a FinalizeNewLayoutVersionCommandHandler. */ public FinalizeNewLayoutVersionCommandHandler() { + MetricsRegistry registry = new MetricsRegistry( + FinalizeNewLayoutVersionCommandHandler.class.getSimpleName()); + this.opsLatencyMs = registry.newRate(SCMCommandProto.Type.finalizeNewLayoutVersionCommand + "Ms"); } /** @@ -82,7 +87,7 @@ public void handle(SCMCommand command, OzoneContainer ozoneContainer, LOG.error("Exception during finalization.", e); } finally { long endTime = Time.monotonicNow(); - totalTime += endTime - startTime; + this.opsLatencyMs.add(endTime - startTime); } } @@ -113,15 +118,12 @@ public int getInvocationCount() { */ @Override public long getAverageRunTime() { - if (invocationCount.get() > 0) { - return totalTime / invocationCount.get(); - } - return 0; + return (long) this.opsLatencyMs.lastStat().mean(); } @Override public long getTotalRunTime() { - return totalTime; + return (long) this.opsLatencyMs.lastStat().total(); } @Override diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/ReconstructECContainersCommandHandler.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/ReconstructECContainersCommandHandler.java index 602687d7a00..030d169e9b8 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/ReconstructECContainersCommandHandler.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/ReconstructECContainersCommandHandler.java @@ -36,6 +36,7 @@ public class ReconstructECContainersCommandHandler implements CommandHandler { private final ReplicationSupervisor supervisor; private final ECReconstructionCoordinator coordinator; private final ConfigurationSource conf; + private String metricsName; public ReconstructECContainersCommandHandler(ConfigurationSource conf, ReplicationSupervisor supervisor, @@ -52,8 +53,16 @@ public void handle(SCMCommand command, OzoneContainer container, (ReconstructECContainersCommand) command; ECReconstructionCommandInfo reconstructionCommandInfo = new ECReconstructionCommandInfo(ecContainersCommand); - this.supervisor.addTask(new ECReconstructionCoordinatorTask( - coordinator, reconstructionCommandInfo)); + ECReconstructionCoordinatorTask task = new ECReconstructionCoordinatorTask( + coordinator, reconstructionCommandInfo); + if (this.metricsName == null) { + this.metricsName = task.getMetricName(); + } + this.supervisor.addTask(task); + } + + public String getMetricsName() { + return this.metricsName; } @Override @@ -63,23 +72,26 @@ public Type getCommandType() { @Override public int getInvocationCount() { - return 0; + return this.metricsName == null ? 0 : (int) this.supervisor + .getReplicationRequestCount(metricsName); } @Override public long getAverageRunTime() { - return 0; + return this.metricsName == null ? 0 : (int) this.supervisor + .getReplicationRequestAvgTime(metricsName); } @Override public long getTotalRunTime() { - return 0; + return this.metricsName == null ? 0 : this.supervisor + .getReplicationRequestTotalTime(metricsName); } @Override public int getQueuedCount() { - return supervisor - .getInFlightReplications(ECReconstructionCoordinatorTask.class); + return this.metricsName == null ? 0 : (int) this.supervisor + .getReplicationQueuedCount(metricsName); } public ConfigurationSource getConf() { diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/RefreshVolumeUsageCommandHandler.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/RefreshVolumeUsageCommandHandler.java index 3c14b2fb161..1ab31ba1c41 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/RefreshVolumeUsageCommandHandler.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/RefreshVolumeUsageCommandHandler.java @@ -18,6 +18,8 @@ import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos; import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMCommandProto.Type; +import org.apache.hadoop.metrics2.lib.MetricsRegistry; +import org.apache.hadoop.metrics2.lib.MutableRate; import org.apache.hadoop.ozone.container.common.statemachine.SCMConnectionManager; import org.apache.hadoop.ozone.container.common.statemachine.StateContext; import org.apache.hadoop.ozone.container.ozoneimpl.OzoneContainer; @@ -27,7 +29,6 @@ import org.slf4j.LoggerFactory; import java.util.concurrent.atomic.AtomicInteger; -import java.util.concurrent.atomic.AtomicLong; /** * Command handler to refresh usage info of all volumes. @@ -38,9 +39,12 @@ public class RefreshVolumeUsageCommandHandler implements CommandHandler { LoggerFactory.getLogger(RefreshVolumeUsageCommandHandler.class); private final AtomicInteger invocationCount = new AtomicInteger(0); - private final AtomicLong totalTime = new AtomicLong(0); + private final MutableRate opsLatencyMs; public RefreshVolumeUsageCommandHandler() { + MetricsRegistry registry = new MetricsRegistry( + RefreshVolumeUsageCommandHandler.class.getSimpleName()); + this.opsLatencyMs = registry.newRate(Type.refreshVolumeUsageInfo + "Ms"); } @Override @@ -50,7 +54,7 @@ public void handle(SCMCommand command, OzoneContainer container, invocationCount.incrementAndGet(); final long startTime = Time.monotonicNow(); container.getVolumeSet().refreshAllVolumeUsage(); - totalTime.getAndAdd(Time.monotonicNow() - startTime); + this.opsLatencyMs.add(Time.monotonicNow() - startTime); } @Override @@ -66,14 +70,12 @@ public int getInvocationCount() { @Override public long getAverageRunTime() { - final int invocations = invocationCount.get(); - return invocations == 0 ? - 0 : totalTime.get() / invocations; + return (long) this.opsLatencyMs.lastStat().mean(); } @Override public long getTotalRunTime() { - return totalTime.get(); + return (long) this.opsLatencyMs.lastStat().total(); } @Override diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/ReplicateContainerCommandHandler.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/ReplicateContainerCommandHandler.java index 21b26339e23..242a4eb74be 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/ReplicateContainerCommandHandler.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/ReplicateContainerCommandHandler.java @@ -43,29 +43,28 @@ public class ReplicateContainerCommandHandler implements CommandHandler { static final Logger LOG = LoggerFactory.getLogger(ReplicateContainerCommandHandler.class); - private int invocationCount; - - private long totalTime; - - private ConfigurationSource conf; - private ReplicationSupervisor supervisor; private ContainerReplicator downloadReplicator; private ContainerReplicator pushReplicator; + private String metricsName; + public ReplicateContainerCommandHandler( ConfigurationSource conf, ReplicationSupervisor supervisor, ContainerReplicator downloadReplicator, ContainerReplicator pushReplicator) { - this.conf = conf; this.supervisor = supervisor; this.downloadReplicator = downloadReplicator; this.pushReplicator = pushReplicator; } + public String getMetricsName() { + return this.metricsName; + } + @Override public void handle(SCMCommand command, OzoneContainer container, StateContext context, SCMConnectionManager connectionManager) { @@ -86,12 +85,16 @@ public void handle(SCMCommand command, OzoneContainer container, downloadReplicator : pushReplicator; ReplicationTask task = new ReplicationTask(replicateCommand, replicator); + if (metricsName == null) { + metricsName = task.getMetricName(); + } supervisor.addTask(task); } @Override public int getQueuedCount() { - return supervisor.getInFlightReplications(ReplicationTask.class); + return this.metricsName == null ? 0 : (int) this.supervisor + .getReplicationQueuedCount(metricsName); } @Override @@ -101,19 +104,19 @@ public SCMCommandProto.Type getCommandType() { @Override public int getInvocationCount() { - return this.invocationCount; + return this.metricsName == null ? 0 : (int) this.supervisor + .getReplicationRequestCount(metricsName); } @Override public long getAverageRunTime() { - if (invocationCount > 0) { - return totalTime / invocationCount; - } - return 0; + return this.metricsName == null ? 0 : (int) this.supervisor + .getReplicationRequestAvgTime(metricsName); } @Override public long getTotalRunTime() { - return totalTime; + return this.metricsName == null ? 0 : this.supervisor + .getReplicationRequestTotalTime(metricsName); } } diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/SetNodeOperationalStateCommandHandler.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/SetNodeOperationalStateCommandHandler.java index 6f7f4414eeb..33563624795 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/SetNodeOperationalStateCommandHandler.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/SetNodeOperationalStateCommandHandler.java @@ -21,8 +21,10 @@ import org.apache.hadoop.hdds.conf.ConfigurationSource; import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; -import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos; +import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SetNodeOperationalStateCommandProto; import org.apache.hadoop.hdds.utils.HddsServerUtil; +import org.apache.hadoop.metrics2.lib.MetricsRegistry; +import org.apache.hadoop.metrics2.lib.MutableRate; import org.apache.hadoop.ozone.container.common.helpers.ContainerUtils; import org.apache.hadoop.ozone.container.common.statemachine.SCMConnectionManager; import org.apache.hadoop.ozone.container.common.statemachine.StateContext; @@ -39,7 +41,6 @@ import java.io.File; import java.io.IOException; import java.util.concurrent.atomic.AtomicInteger; -import java.util.concurrent.atomic.AtomicLong; import java.util.function.Consumer; @@ -54,7 +55,7 @@ public class SetNodeOperationalStateCommandHandler implements CommandHandler { private final ConfigurationSource conf; private final Consumer replicationSupervisor; private final AtomicInteger invocationCount = new AtomicInteger(0); - private final AtomicLong totalTime = new AtomicLong(0); + private final MutableRate opsLatencyMs; /** * Set Node State command handler. @@ -65,6 +66,9 @@ public SetNodeOperationalStateCommandHandler(ConfigurationSource conf, Consumer replicationSupervisor) { this.conf = conf; this.replicationSupervisor = replicationSupervisor; + MetricsRegistry registry = new MetricsRegistry( + SetNodeOperationalStateCommandHandler.class.getSimpleName()); + this.opsLatencyMs = registry.newRate(Type.setNodeOperationalStateCommand + "Ms"); } /** @@ -80,9 +84,6 @@ public void handle(SCMCommand command, OzoneContainer container, StateContext context, SCMConnectionManager connectionManager) { long startTime = Time.monotonicNow(); invocationCount.incrementAndGet(); - StorageContainerDatanodeProtocolProtos.SetNodeOperationalStateCommandProto - setNodeCmdProto = null; - if (command.getType() != Type.setNodeOperationalStateCommand) { LOG.warn("Skipping handling command, expected command " + "type {} but found {}", @@ -91,7 +92,7 @@ public void handle(SCMCommand command, OzoneContainer container, } SetNodeOperationalStateCommand setNodeCmd = (SetNodeOperationalStateCommand) command; - setNodeCmdProto = setNodeCmd.getProto(); + SetNodeOperationalStateCommandProto setNodeCmdProto = setNodeCmd.getProto(); DatanodeDetails dni = context.getParent().getDatanodeDetails(); HddsProtos.NodeOperationalState state = setNodeCmdProto.getNodeOperationalState(); @@ -106,7 +107,7 @@ public void handle(SCMCommand command, OzoneContainer container, // handler interface. } replicationSupervisor.accept(state); - totalTime.addAndGet(Time.monotonicNow() - startTime); + this.opsLatencyMs.add(Time.monotonicNow() - startTime); } // TODO - this duplicates code in HddsDatanodeService and InitDatanodeState @@ -125,8 +126,7 @@ private void persistDatanodeDetails(DatanodeDetails dnDetails) * @return Type */ @Override - public StorageContainerDatanodeProtocolProtos.SCMCommandProto.Type - getCommandType() { + public Type getCommandType() { return Type.setNodeOperationalStateCommand; } @@ -147,14 +147,12 @@ public int getInvocationCount() { */ @Override public long getAverageRunTime() { - final int invocations = invocationCount.get(); - return invocations == 0 ? - 0 : totalTime.get() / invocations; + return (long) this.opsLatencyMs.lastStat().mean(); } @Override public long getTotalRunTime() { - return totalTime.get(); + return (long) this.opsLatencyMs.lastStat().total(); } @Override diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/VersionEndpointTask.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/VersionEndpointTask.java index e702b1e6e15..968c9b9a6e6 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/VersionEndpointTask.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/VersionEndpointTask.java @@ -17,6 +17,7 @@ package org.apache.hadoop.ozone.container.common.states.endpoint; import java.io.IOException; +import java.net.BindException; import java.util.concurrent.Callable; import org.apache.hadoop.hdds.conf.ConfigurationSource; @@ -104,7 +105,7 @@ public EndpointStateMachine.EndPointStates call() throws Exception { LOG.debug("Cannot execute GetVersion task as endpoint state machine " + "is in {} state", rpcEndPoint.getState()); } - } catch (DiskOutOfSpaceException ex) { + } catch (DiskOutOfSpaceException | BindException ex) { rpcEndPoint.setState(EndpointStateMachine.EndPointStates.SHUTDOWN); } catch (IOException ex) { rpcEndPoint.logIfNeeded(ex); diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/GrpcXceiverService.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/GrpcXceiverService.java index 9c3f29d0f0c..5f1914402d0 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/GrpcXceiverService.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/GrpcXceiverService.java @@ -20,7 +20,6 @@ import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerCommandRequestProto; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerCommandResponseProto; import org.apache.hadoop.hdds.protocol.datanode.proto.XceiverClientProtocolServiceGrpc; -import org.apache.hadoop.hdds.utils.IOUtils; import org.apache.hadoop.ozone.container.common.interfaces.ContainerDispatcher; import org.apache.ratis.grpc.util.ZeroCopyMessageMarshaller; import org.apache.ratis.thirdparty.com.google.protobuf.MessageLite; @@ -31,7 +30,6 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import java.io.InputStream; import java.util.concurrent.atomic.AtomicBoolean; import static org.apache.hadoop.hdds.protocol.datanode.proto.XceiverClientProtocolServiceGrpc.getSendMethod; @@ -45,28 +43,20 @@ public class GrpcXceiverService extends LOG = LoggerFactory.getLogger(GrpcXceiverService.class); private final ContainerDispatcher dispatcher; - private final boolean zeroCopyEnabled; private final ZeroCopyMessageMarshaller zeroCopyMessageMarshaller = new ZeroCopyMessageMarshaller<>( ContainerCommandRequestProto.getDefaultInstance()); - public GrpcXceiverService(ContainerDispatcher dispatcher, - boolean zeroCopyEnabled) { + public GrpcXceiverService(ContainerDispatcher dispatcher) { this.dispatcher = dispatcher; - this.zeroCopyEnabled = zeroCopyEnabled; } /** - * Bind service with zerocopy marshaller equipped for the `send` API if - * zerocopy is enabled. + * Bind service with zerocopy marshaller equipped for the `send` API. * @return service definition. */ public ServerServiceDefinition bindServiceWithZeroCopy() { ServerServiceDefinition orig = super.bindService(); - if (!zeroCopyEnabled) { - LOG.info("Zerocopy is not enabled."); - return orig; - } ServerServiceDefinition.Builder builder = ServerServiceDefinition.builder(orig.getServiceDescriptor().getName()); @@ -117,10 +107,7 @@ public void onNext(ContainerCommandRequestProto request) { isClosed.set(true); responseObserver.onError(e); } finally { - InputStream popStream = zeroCopyMessageMarshaller.popStream(request); - if (popStream != null) { - IOUtils.close(LOG, popStream); - } + zeroCopyMessageMarshaller.release(request); } } diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/XceiverServerGrpc.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/XceiverServerGrpc.java index ad9c5c9d9ca..0d95ac25eda 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/XceiverServerGrpc.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/XceiverServerGrpc.java @@ -19,6 +19,7 @@ package org.apache.hadoop.ozone.container.common.transport.server; import java.io.IOException; +import java.net.BindException; import java.util.Collections; import java.util.List; import java.util.UUID; @@ -29,7 +30,6 @@ import org.apache.hadoop.hdds.conf.ConfigurationSource; import org.apache.hadoop.hdds.protocol.DatanodeDetails; -import org.apache.hadoop.hdds.protocol.DatanodeDetails.Port.Name; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerCommandRequestProto; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; @@ -66,9 +66,6 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_EC_GRPC_ZERO_COPY_ENABLED; -import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_EC_GRPC_ZERO_COPY_ENABLED_DEFAULT; - /** * Creates a Grpc server endpoint that acts as the communication layer for * Ozone containers. @@ -134,13 +131,9 @@ public XceiverServerGrpc(DatanodeDetails datanodeDetails, eventLoopGroup = new NioEventLoopGroup(poolSize / 10, factory); channelType = NioServerSocketChannel.class; } - final boolean zeroCopyEnabled = conf.getBoolean( - OZONE_EC_GRPC_ZERO_COPY_ENABLED, - OZONE_EC_GRPC_ZERO_COPY_ENABLED_DEFAULT); LOG.info("GrpcServer channel type {}", channelType.getSimpleName()); - GrpcXceiverService xceiverService = new GrpcXceiverService(dispatcher, - zeroCopyEnabled); + GrpcXceiverService xceiverService = new GrpcXceiverService(dispatcher); NettyServerBuilder nettyServerBuilder = NettyServerBuilder.forPort(port) .maxInboundMessageSize(OzoneConsts.OZONE_SCM_CHUNK_MAX_SIZE) .bossEventLoopGroup(eventLoopGroup) @@ -185,7 +178,16 @@ public HddsProtos.ReplicationType getServerType() { @Override public void start() throws IOException { if (!isStarted) { - server.start(); + try { + server.start(); + } catch (IOException e) { + LOG.error("Error while starting the server", e); + if (e.getMessage().contains("Failed to bind to address")) { + throw new BindException(e.getMessage()); + } else { + throw e; + } + } int realPort = server.getPort(); if (port == 0) { @@ -195,9 +197,7 @@ public void start() throws IOException { } //register the real port to the datanode details. - datanodeDetails.setPort(DatanodeDetails - .newPort(Name.STANDALONE, - realPort)); + datanodeDetails.setPort(DatanodeDetails.newStandalonePort(realPort)); isStarted = true; } diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java index be566f84fc9..23be4138b60 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java @@ -41,8 +41,9 @@ import java.util.concurrent.ThreadPoolExecutor; import java.util.concurrent.atomic.AtomicBoolean; import java.util.function.Consumer; -import java.util.stream.Collectors; +import com.google.common.annotations.VisibleForTesting; +import com.google.common.base.Preconditions; import com.google.common.util.concurrent.ThreadFactoryBuilder; import org.apache.hadoop.hdds.HddsUtils; import org.apache.hadoop.hdds.conf.ConfigurationSource; @@ -64,26 +65,25 @@ import org.apache.hadoop.ozone.HddsDatanodeService; import org.apache.hadoop.ozone.OzoneConfigKeys; import org.apache.hadoop.ozone.common.utils.BufferUtils; +import org.apache.hadoop.ozone.container.common.helpers.ContainerUtils; import org.apache.hadoop.ozone.container.common.interfaces.ContainerDispatcher; import org.apache.hadoop.ozone.container.common.statemachine.DatanodeConfiguration; import org.apache.hadoop.ozone.container.keyvalue.impl.KeyValueStreamDataChannel; import org.apache.hadoop.ozone.container.ozoneimpl.ContainerController; import org.apache.hadoop.util.Time; - -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Preconditions; import org.apache.ratis.proto.RaftProtos; -import org.apache.ratis.proto.RaftProtos.StateMachineEntryProto; import org.apache.ratis.proto.RaftProtos.LogEntryProto; import org.apache.ratis.proto.RaftProtos.RaftPeerRole; import org.apache.ratis.proto.RaftProtos.RoleInfoProto; +import org.apache.ratis.proto.RaftProtos.StateMachineEntryProto; import org.apache.ratis.proto.RaftProtos.StateMachineLogEntryProto; import org.apache.ratis.protocol.Message; import org.apache.ratis.protocol.RaftClientRequest; +import org.apache.ratis.protocol.RaftGroup; import org.apache.ratis.protocol.RaftGroupId; import org.apache.ratis.protocol.RaftGroupMemberId; -import org.apache.ratis.protocol.RaftPeerId; import org.apache.ratis.protocol.RaftPeer; +import org.apache.ratis.protocol.RaftPeerId; import org.apache.ratis.protocol.exceptions.StateMachineException; import org.apache.ratis.server.RaftServer; import org.apache.ratis.server.protocol.TermIndex; @@ -98,10 +98,10 @@ import org.apache.ratis.thirdparty.com.google.protobuf.ByteString; import org.apache.ratis.thirdparty.com.google.protobuf.InvalidProtocolBufferException; import org.apache.ratis.thirdparty.com.google.protobuf.TextFormat; +import org.apache.ratis.util.JavaUtils; import org.apache.ratis.util.LifeCycle; import org.apache.ratis.util.TaskQueue; import org.apache.ratis.util.function.CheckedSupplier; -import org.apache.ratis.util.JavaUtils; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -185,7 +185,6 @@ long getStartTime() { private final SimpleStateMachineStorage storage = new SimpleStateMachineStorage(); - private final RaftGroupId gid; private final ContainerDispatcher dispatcher; private final ContainerController containerController; private final XceiverServerRatis ratisServer; @@ -205,6 +204,7 @@ long getStartTime() { private final boolean waitOnBothFollowers; private final HddsDatanodeService datanodeService; private static Semaphore semaphore = new Semaphore(1); + private final AtomicBoolean peersValidated; /** * CSM metrics. @@ -220,7 +220,6 @@ public ContainerStateMachine(HddsDatanodeService hddsDatanodeService, RaftGroupI ConfigurationSource conf, String threadNamePrefix) { this.datanodeService = hddsDatanodeService; - this.gid = gid; this.dispatcher = dispatcher; this.containerController = containerController; this.ratisServer = ratisServer; @@ -256,6 +255,7 @@ public ContainerStateMachine(HddsDatanodeService hddsDatanodeService, RaftGroupI HDDS_CONTAINER_RATIS_STATEMACHINE_MAX_PENDING_APPLY_TXNS_DEFAULT); applyTransactionSemaphore = new Semaphore(maxPendingApplyTransactions); stateMachineHealthy = new AtomicBoolean(true); + this.peersValidated = new AtomicBoolean(false); ThreadFactory threadFactory = new ThreadFactoryBuilder() .setNameFormat( @@ -269,6 +269,19 @@ public ContainerStateMachine(HddsDatanodeService hddsDatanodeService, RaftGroupI } + private void validatePeers() throws IOException { + if (this.peersValidated.get()) { + return; + } + final RaftGroup group = ratisServer.getServerDivision(getGroupId()).getGroup(); + final RaftPeerId selfId = ratisServer.getServer().getId(); + if (group.getPeer(selfId) == null) { + throw new StorageContainerException("Current datanode " + selfId + " is not a member of " + group, + ContainerProtos.Result.INVALID_CONFIG); + } + peersValidated.set(true); + } + @Override public StateMachineStorage getStateMachineStorage() { return storage; @@ -284,8 +297,9 @@ public void initialize( throws IOException { super.initialize(server, id, raftStorage); storage.init(raftStorage); - ratisServer.notifyGroupAdd(gid); + ratisServer.notifyGroupAdd(id); + LOG.info("{}: initialize {}", server.getId(), id); loadSnapshot(storage.getLatestSnapshot()); } @@ -294,7 +308,7 @@ private long loadSnapshot(SingleFileSnapshotInfo snapshot) if (snapshot == null) { TermIndex empty = TermIndex.valueOf(0, RaftLog.INVALID_LOG_INDEX); LOG.info("{}: The snapshot info is null. Setting the last applied index " + - "to:{}", gid, empty); + "to:{}", getGroupId(), empty); setLastAppliedTermIndex(empty); return empty.getIndex(); } @@ -302,7 +316,7 @@ private long loadSnapshot(SingleFileSnapshotInfo snapshot) final File snapshotFile = snapshot.getFile().getPath().toFile(); final TermIndex last = SimpleStateMachineStorage.getTermIndexFromSnapshotFile(snapshotFile); - LOG.info("{}: Setting the last applied index to {}", gid, last); + LOG.info("{}: Setting the last applied index to {}", getGroupId(), last); setLastAppliedTermIndex(last); // initialize the dispatcher with snapshot so that it build the missing @@ -352,7 +366,7 @@ public long takeSnapshot() throws IOException { long startTime = Time.monotonicNow(); if (!isStateMachineHealthy()) { String msg = - "Failed to take snapshot " + " for " + gid + " as the stateMachine" + "Failed to take snapshot " + " for " + getGroupId() + " as the stateMachine" + " is unhealthy. The last applied index is at " + ti; StateMachineException sme = new StateMachineException(msg); LOG.error(msg); @@ -361,19 +375,19 @@ public long takeSnapshot() throws IOException { if (ti != null && ti.getIndex() != RaftLog.INVALID_LOG_INDEX) { final File snapshotFile = storage.getSnapshotFile(ti.getTerm(), ti.getIndex()); - LOG.info("{}: Taking a snapshot at:{} file {}", gid, ti, snapshotFile); + LOG.info("{}: Taking a snapshot at:{} file {}", getGroupId(), ti, snapshotFile); try (FileOutputStream fos = new FileOutputStream(snapshotFile)) { persistContainerSet(fos); fos.flush(); // make sure the snapshot file is synced fos.getFD().sync(); } catch (IOException ioe) { - LOG.error("{}: Failed to write snapshot at:{} file {}", gid, ti, + LOG.error("{}: Failed to write snapshot at:{} file {}", getGroupId(), ti, snapshotFile); throw ioe; } LOG.info("{}: Finished taking a snapshot at:{} file:{} took: {} ms", - gid, ti, snapshotFile, (Time.monotonicNow() - startTime)); + getGroupId(), ti, snapshotFile, (Time.monotonicNow() - startTime)); return ti.getIndex(); } return -1; @@ -387,7 +401,7 @@ public TransactionContext startTransaction(LogEntryProto entry, RaftPeerRole rol final StateMachineLogEntryProto stateMachineLogEntry = entry.getStateMachineLogEntry(); final ContainerCommandRequestProto logProto; try { - logProto = getContainerCommandRequestProto(gid, stateMachineLogEntry.getLogData()); + logProto = getContainerCommandRequestProto(getGroupId(), stateMachineLogEntry.getLogData()); } catch (InvalidProtocolBufferException e) { trx.setException(e); return trx; @@ -414,7 +428,7 @@ public TransactionContext startTransaction(RaftClientRequest request) long startTime = Time.monotonicNowNanos(); final ContainerCommandRequestProto proto = message2ContainerCommandRequestProto(request.getMessage()); - Preconditions.checkArgument(request.getRaftGroupId().equals(gid)); + Preconditions.checkArgument(request.getRaftGroupId().equals(getGroupId())); final TransactionContext.Builder builder = TransactionContext.newBuilder() .setClientRequest(request) @@ -450,7 +464,7 @@ public TransactionContext startTransaction(RaftClientRequest request) final WriteChunkRequestProto.Builder commitWriteChunkProto = WriteChunkRequestProto.newBuilder(write) .clearData(); protoBuilder.setWriteChunk(commitWriteChunkProto) - .setPipelineID(gid.getUuid().toString()) + .setPipelineID(getGroupId().getUuid().toString()) .setTraceID(proto.getTraceID()); builder.setStateMachineData(write.getData()); @@ -492,20 +506,20 @@ private static ContainerCommandRequestProto getContainerCommandRequestProto( private ContainerCommandRequestProto message2ContainerCommandRequestProto( Message message) throws InvalidProtocolBufferException { - return ContainerCommandRequestMessage.toProto(message.getContent(), gid); + return ContainerCommandRequestMessage.toProto(message.getContent(), getGroupId()); } private ContainerCommandResponseProto dispatchCommand( ContainerCommandRequestProto requestProto, DispatcherContext context) { if (LOG.isTraceEnabled()) { - LOG.trace("{}: dispatch {} containerID={} pipelineID={} traceID={}", gid, + LOG.trace("{}: dispatch {} containerID={} pipelineID={} traceID={}", getGroupId(), requestProto.getCmdType(), requestProto.getContainerID(), requestProto.getPipelineID(), requestProto.getTraceID()); } ContainerCommandResponseProto response = dispatcher.dispatch(requestProto, context); if (LOG.isTraceEnabled()) { - LOG.trace("{}: response {}", gid, response); + LOG.trace("{}: response {}", getGroupId(), response); } return response; } @@ -532,7 +546,7 @@ private CompletableFuture writeStateMachineData( RaftServer server = ratisServer.getServer(); Preconditions.checkArgument(!write.getData().isEmpty()); try { - if (server.getDivision(gid).getInfo().isLeader()) { + if (server.getDivision(getGroupId()).getInfo().isLeader()) { stateMachineDataCache.put(entryIndex, write.getData()); } } catch (InterruptedException ioe) { @@ -560,7 +574,7 @@ private CompletableFuture writeStateMachineData( return dispatchCommand(requestProto, context); } catch (Exception e) { LOG.error("{}: writeChunk writeStateMachineData failed: blockId" + - "{} logIndex {} chunkName {}", gid, write.getBlockID(), + "{} logIndex {} chunkName {}", getGroupId(), write.getBlockID(), entryIndex, write.getChunkData().getChunkName(), e); metrics.incNumWriteDataFails(); // write chunks go in parallel. It's possible that one write chunk @@ -574,7 +588,7 @@ private CompletableFuture writeStateMachineData( writeChunkFutureMap.put(entryIndex, writeChunkFuture); if (LOG.isDebugEnabled()) { LOG.debug("{}: writeChunk writeStateMachineData : blockId" + - "{} logIndex {} chunkName {}", gid, write.getBlockID(), + "{} logIndex {} chunkName {}", getGroupId(), write.getBlockID(), entryIndex, write.getChunkData().getChunkName()); } // Remove the future once it finishes execution from the @@ -588,7 +602,7 @@ private CompletableFuture writeStateMachineData( && r.getResult() != ContainerProtos.Result.CHUNK_FILE_INCONSISTENCY) { StorageContainerException sce = new StorageContainerException(r.getMessage(), r.getResult()); - LOG.error(gid + ": writeChunk writeStateMachineData failed: blockId" + + LOG.error(getGroupId() + ": writeChunk writeStateMachineData failed: blockId" + write.getBlockID() + " logIndex " + entryIndex + " chunkName " + write.getChunkData().getChunkName() + " Error message: " + r.getMessage() + " Container Result: " + r.getResult()); @@ -602,7 +616,7 @@ private CompletableFuture writeStateMachineData( metrics.incNumBytesWrittenCount( requestProto.getWriteChunk().getChunkData().getLen()); if (LOG.isDebugEnabled()) { - LOG.debug(gid + + LOG.debug(getGroupId() + ": writeChunk writeStateMachineData completed: blockId" + write.getBlockID() + " logIndex " + entryIndex + " chunkName " + write.getChunkData().getChunkName()); @@ -623,7 +637,7 @@ private StateMachine.DataChannel getStreamDataChannel( DispatcherContext context) throws StorageContainerException { if (LOG.isDebugEnabled()) { LOG.debug("{}: getStreamDataChannel {} containerID={} pipelineID={} " + - "traceID={}", gid, requestProto.getCmdType(), + "traceID={}", getGroupId(), requestProto.getCmdType(), requestProto.getContainerID(), requestProto.getPipelineID(), requestProto.getTraceID()); } @@ -782,7 +796,7 @@ private ByteString readStateMachineData( new StorageContainerException(response.getMessage(), response.getResult()); LOG.error("gid {} : ReadStateMachine failed. cmd {} logIndex {} msg : " - + "{} Container Result: {}", gid, response.getCmdType(), index, + + "{} Container Result: {}", getGroupId(), response.getCmdType(), index, response.getMessage(), response.getResult()); stateMachineHealthy.set(false); throw sce; @@ -818,11 +832,9 @@ private ByteString readStateMachineData( */ @Override public CompletableFuture flush(long index) { - List> futureList = - writeChunkFutureMap.entrySet().stream().filter(x -> x.getKey() <= index) - .map(Map.Entry::getValue).collect(Collectors.toList()); return CompletableFuture.allOf( - futureList.toArray(new CompletableFuture[futureList.size()])); + writeChunkFutureMap.entrySet().stream().filter(x -> x.getKey() <= index) + .map(Map.Entry::getValue).toArray(CompletableFuture[]::new)); } /** @@ -859,7 +871,7 @@ public CompletableFuture read(LogEntryProto entry, TransactionContex .map(TransactionContext::getStateMachineContext) .orElse(null); final ContainerCommandRequestProto requestProto = context != null ? context.getLogProto() - : getContainerCommandRequestProto(gid, entry.getStateMachineLogEntry().getLogData()); + : getContainerCommandRequestProto(getGroupId(), entry.getStateMachineLogEntry().getLogData()); if (requestProto.getCmdType() != Type.WriteChunk) { throw new IllegalStateException("Cmd type:" + requestProto.getCmdType() @@ -877,7 +889,7 @@ public CompletableFuture read(LogEntryProto entry, TransactionContex return future; } catch (Exception e) { metrics.incNumReadStateMachineFails(); - LOG.error("{} unable to read stateMachineData:", gid, e); + LOG.error("{} unable to read stateMachineData:", getGroupId(), e); return completeExceptionally(e); } } @@ -923,7 +935,7 @@ public void notifyServerShutdown(RaftProtos.RoleInfoProto roleInfo, boolean allS // from `HddsDatanodeService.stop()`, otherwise, it indicates this `close` originates from ratis. if (allServer) { if (datanodeService != null && !datanodeService.isStopped()) { - LOG.info("{} is closed by ratis", gid); + LOG.info("{} is closed by ratis", getGroupId()); if (semaphore.tryAcquire()) { // run with a different thread, so this raft group can be closed Runnable runnable = () -> { @@ -955,7 +967,7 @@ public void notifyServerShutdown(RaftProtos.RoleInfoProto roleInfo, boolean allS CompletableFuture.runAsync(runnable); } } else { - LOG.info("{} is closed by HddsDatanodeService", gid); + LOG.info("{} is closed by HddsDatanodeService", getGroupId()); } } } @@ -967,6 +979,11 @@ private CompletableFuture applyTransaction( final CheckedSupplier task = () -> { try { + try { + this.validatePeers(); + } catch (StorageContainerException e) { + return ContainerUtils.logAndReturnError(LOG, e, request); + } long timeNow = Time.monotonicNowNanos(); long queueingDelay = timeNow - context.getStartTime(); metrics.recordQueueingDelay(request.getCmdType(), queueingDelay); @@ -986,14 +1003,17 @@ private CompletableFuture applyTransaction( private void removeStateMachineDataIfNeeded(long index) { if (waitOnBothFollowers) { try { - RaftServer.Division division = ratisServer.getServer().getDivision(gid); + RaftServer.Division division = ratisServer.getServer().getDivision(getGroupId()); if (division.getInfo().isLeader()) { - long minIndex = Arrays.stream(division.getInfo() - .getFollowerNextIndices()).min().getAsLong(); - LOG.debug("Removing data corresponding to log index {} min index {} " - + "from cache", index, minIndex); - removeCacheDataUpTo(Math.min(minIndex, index)); + Arrays.stream(division.getInfo() + .getFollowerNextIndices()).min().ifPresent(minIndex -> { + removeCacheDataUpTo(Math.min(minIndex, index)); + LOG.debug("Removing data corresponding to log index {} min index {} " + + "from cache", index, minIndex); + }); } + } catch (RuntimeException e) { + throw e; } catch (Exception e) { throw new RuntimeException(e); } @@ -1044,7 +1064,7 @@ public CompletableFuture applyTransaction(TransactionContext trx) { CompletableFuture applyTransactionFuture = new CompletableFuture<>(); final Consumer exceptionHandler = e -> { - LOG.error(gid + ": failed to applyTransaction at logIndex " + index + LOG.error(getGroupId() + ": failed to applyTransaction at logIndex " + index + " for " + requestProto.getCmdType(), e); stateMachineHealthy.compareAndSet(true, false); metrics.incNumApplyTransactionsFails(); @@ -1072,7 +1092,7 @@ public CompletableFuture applyTransaction(TransactionContext trx) { new StorageContainerException(r.getMessage(), r.getResult()); LOG.error( "gid {} : ApplyTransaction failed. cmd {} logIndex {} msg : " - + "{} Container Result: {}", gid, r.getCmdType(), index, + + "{} Container Result: {}", getGroupId(), r.getCmdType(), index, r.getMessage(), r.getResult()); metrics.incNumApplyTransactionsFails(); // Since the applyTransaction now is completed exceptionally, @@ -1081,12 +1101,12 @@ public CompletableFuture applyTransaction(TransactionContext trx) { // shutdown. applyTransactionFuture.completeExceptionally(sce); stateMachineHealthy.compareAndSet(true, false); - ratisServer.handleApplyTransactionFailure(gid, trx.getServerRole()); + ratisServer.handleApplyTransactionFailure(getGroupId(), trx.getServerRole()); } else { if (LOG.isDebugEnabled()) { LOG.debug( "gid {} : ApplyTransaction completed. cmd {} logIndex {} msg : " - + "{} Container Result: {}", gid, r.getCmdType(), index, + + "{} Container Result: {}", getGroupId(), r.getCmdType(), index, r.getMessage(), r.getResult()); } if (cmdType == Type.WriteChunk || cmdType == Type.PutSmallFile) { @@ -1164,25 +1184,25 @@ public void evictStateMachineCache() { @Override public void notifyFollowerSlowness(RoleInfoProto roleInfoProto, RaftPeer follower) { - ratisServer.handleFollowerSlowness(gid, roleInfoProto, follower); + ratisServer.handleFollowerSlowness(getGroupId(), roleInfoProto, follower); } @Override public void notifyExtendedNoLeader(RoleInfoProto roleInfoProto) { - ratisServer.handleNoLeader(gid, roleInfoProto); + ratisServer.handleNoLeader(getGroupId(), roleInfoProto); } @Override public void notifyLogFailed(Throwable t, LogEntryProto failedEntry) { - LOG.error("{}: {} {}", gid, TermIndex.valueOf(failedEntry), + LOG.error("{}: {} {}", getGroupId(), TermIndex.valueOf(failedEntry), toStateMachineLogEntryString(failedEntry.getStateMachineLogEntry()), t); - ratisServer.handleNodeLogFailure(gid, t); + ratisServer.handleNodeLogFailure(getGroupId(), t); } @Override public CompletableFuture notifyInstallSnapshotFromLeader( RoleInfoProto roleInfoProto, TermIndex firstTermIndexInLog) { - ratisServer.handleInstallSnapshotFromLeader(gid, roleInfoProto, + ratisServer.handleInstallSnapshotFromLeader(getGroupId(), roleInfoProto, firstTermIndexInLog); final CompletableFuture future = new CompletableFuture<>(); future.complete(firstTermIndexInLog); @@ -1191,7 +1211,7 @@ public CompletableFuture notifyInstallSnapshotFromLeader( @Override public void notifyGroupRemove() { - ratisServer.notifyGroupRemove(gid); + ratisServer.notifyGroupRemove(getGroupId()); // Make best effort to quasi-close all the containers on group removal. // Containers already in terminal state like CLOSED or UNHEALTHY will not // be affected. @@ -1199,7 +1219,7 @@ public void notifyGroupRemove() { try { containerController.markContainerForClose(cid); containerController.quasiCloseContainer(cid, - "Ratis group removed. Group id: " + gid); + "Ratis group removed. Group id: " + getGroupId()); } catch (IOException e) { LOG.debug("Failed to quasi-close container {}", cid); } @@ -1221,7 +1241,7 @@ public void notifyLeaderChanged(RaftGroupMemberId groupMemberId, @Override public String toStateMachineLogEntryString(StateMachineLogEntryProto proto) { - return smProtoToString(gid, containerController, proto); + return smProtoToString(getGroupId(), containerController, proto); } public static String smProtoToString(RaftGroupId gid, diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/HddsVolume.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/HddsVolume.java index c58aab2e5ba..5fced0e39b3 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/HddsVolume.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/HddsVolume.java @@ -29,6 +29,7 @@ import org.apache.commons.io.FileUtils; import org.apache.hadoop.hdds.annotation.InterfaceAudience; import org.apache.hadoop.hdds.annotation.InterfaceStability; +import org.apache.hadoop.hdds.conf.ConfigurationSource; import org.apache.hadoop.hdds.upgrade.HDDSLayoutFeature; import org.apache.hadoop.hdfs.server.datanode.checker.VolumeCheckResult; import org.apache.hadoop.ozone.container.common.statemachine.DatanodeConfiguration; @@ -36,6 +37,7 @@ import org.apache.hadoop.ozone.container.common.utils.HddsVolumeUtil; import org.apache.hadoop.ozone.container.common.utils.RawDB; import org.apache.hadoop.ozone.container.common.utils.StorageVolumeUtil; +import org.apache.hadoop.ozone.container.ozoneimpl.ContainerController; import org.apache.hadoop.ozone.container.upgrade.VersionedDatanodeFeatures; import org.apache.hadoop.ozone.container.upgrade.VersionedDatanodeFeatures.SchemaV3; import org.apache.hadoop.util.Time; @@ -44,6 +46,7 @@ import jakarta.annotation.Nullable; +import static org.apache.hadoop.hdds.HddsConfigKeys.OZONE_DATANODE_IO_METRICS_PERCENTILES_INTERVALS_SECONDS_KEY; import static org.apache.hadoop.ozone.OzoneConsts.CONTAINER_DB_NAME; import static org.apache.hadoop.ozone.container.common.utils.HddsVolumeUtil.initPerDiskDBStore; @@ -80,6 +83,8 @@ public class HddsVolume extends StorageVolume { private final VolumeIOStats volumeIOStats; private final VolumeInfoMetrics volumeInfoMetrics; + private ContainerController controller; + private final AtomicLong committedBytes = new AtomicLong(); // till Open containers become full // Mentions the type of volume @@ -119,8 +124,10 @@ private HddsVolume(Builder b) throws IOException { if (!b.getFailedVolume() && getVolumeInfo().isPresent()) { this.setState(VolumeState.NOT_INITIALIZED); + ConfigurationSource conf = getConf(); + int[] intervals = conf.getInts(OZONE_DATANODE_IO_METRICS_PERCENTILES_INTERVALS_SECONDS_KEY); this.volumeIOStats = new VolumeIOStats(b.getVolumeRootStr(), - this.getStorageDir().toString()); + this.getStorageDir().toString(), intervals); this.volumeInfoMetrics = new VolumeInfoMetrics(b.getVolumeRootStr(), this); @@ -382,6 +389,17 @@ public void loadDbStore(boolean readOnly) throws IOException { getStorageID()); } + public void setController(ContainerController controller) { + this.controller = controller; + } + + public long getContainers() { + if (controller != null) { + return controller.getContainerCount(this); + } + return 0; + } + /** * Pick a DbVolume for HddsVolume and init db instance. * Use the HddsVolume directly if no DbVolume found. diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/MutableVolumeSet.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/MutableVolumeSet.java index e195b127d49..9afea8e6b0c 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/MutableVolumeSet.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/MutableVolumeSet.java @@ -44,6 +44,7 @@ import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableMap; +import org.apache.ratis.util.function.CheckedRunnable; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -84,7 +85,7 @@ public class MutableVolumeSet implements VolumeSet { private String clusterID; private final StorageVolumeChecker volumeChecker; - private Runnable failedVolumeListener; + private CheckedRunnable failedVolumeListener; private StateContext context; private final StorageVolumeFactory volumeFactory; private final StorageVolume.VolumeType volumeType; @@ -132,7 +133,7 @@ public MutableVolumeSet(String dnUuid, String clusterID, initializeVolumeSet(); } - public void setFailedVolumeListener(Runnable runnable) { + public void setFailedVolumeListener(CheckedRunnable runnable) { failedVolumeListener = runnable; } diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeIOStats.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeIOStats.java index e22addd354f..2ce19c3bf19 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeIOStats.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeIOStats.java @@ -21,7 +21,10 @@ import org.apache.hadoop.metrics2.MetricsSystem; import org.apache.hadoop.metrics2.annotation.Metric; import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem; +import org.apache.hadoop.metrics2.lib.MetricsRegistry; import org.apache.hadoop.metrics2.lib.MutableCounterLong; +import org.apache.hadoop.metrics2.lib.MutableQuantiles; +import org.apache.hadoop.metrics2.lib.MutableRate; /** * This class is used to track Volume IO stats for each HDDS Volume. @@ -29,12 +32,23 @@ public class VolumeIOStats { private String metricsSourceName = VolumeIOStats.class.getSimpleName(); private String storageDirectory; - private @Metric MutableCounterLong readBytes; - private @Metric MutableCounterLong readOpCount; - private @Metric MutableCounterLong writeBytes; - private @Metric MutableCounterLong writeOpCount; - private @Metric MutableCounterLong readTime; - private @Metric MutableCounterLong writeTime; + private final MetricsRegistry registry = new MetricsRegistry("VolumeIOStats"); + @Metric + private MutableCounterLong readBytes; + @Metric + private MutableCounterLong readOpCount; + @Metric + private MutableCounterLong writeBytes; + @Metric + private MutableCounterLong writeOpCount; + @Metric + private MutableRate readTime; + @Metric + private MutableQuantiles[] readLatencyQuantiles; + @Metric + private MutableRate writeTime; + @Metric + private MutableQuantiles[] writeLatencyQuantiles; @Deprecated public VolumeIOStats() { @@ -44,9 +58,24 @@ public VolumeIOStats() { /** * @param identifier Typically, path to volume root. e.g. /data/hdds */ - public VolumeIOStats(String identifier, String storageDirectory) { + public VolumeIOStats(String identifier, String storageDirectory, int[] intervals) { this.metricsSourceName += '-' + identifier; this.storageDirectory = storageDirectory; + + // Try initializing `readLatencyQuantiles` and `writeLatencyQuantiles` + if (intervals != null && intervals.length > 0) { + final int length = intervals.length; + readLatencyQuantiles = new MutableQuantiles[intervals.length]; + writeLatencyQuantiles = new MutableQuantiles[intervals.length]; + for (int i = 0; i < length; i++) { + readLatencyQuantiles[i] = registry.newQuantiles( + "readLatency" + intervals[i] + "s", + "Read Data File Io Latency in ms", "ops", "latency", intervals[i]); + writeLatencyQuantiles[i] = registry.newQuantiles( + "writeLatency" + intervals[i] + "s", + "Write Data File Io Latency in ms", "ops", "latency", intervals[i]); + } + } init(); } @@ -99,7 +128,10 @@ public void incWriteOpCount() { * @param time */ public void incReadTime(long time) { - readTime.incr(time); + readTime.add(time); + for (MutableQuantiles q : readLatencyQuantiles) { + q.add(time); + } } /** @@ -107,7 +139,10 @@ public void incReadTime(long time) { * @param time */ public void incWriteTime(long time) { - writeTime.incr(time); + writeTime.add(time); + for (MutableQuantiles q : writeLatencyQuantiles) { + q.add(time); + } } /** @@ -147,7 +182,7 @@ public long getWriteOpCount() { * @return long */ public long getReadTime() { - return readTime.value(); + return (long) readTime.lastStat().total(); } /** @@ -155,7 +190,7 @@ public long getReadTime() { * @return long */ public long getWriteTime() { - return writeTime.value(); + return (long) writeTime.lastStat().total(); } @Metric diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeInfoMetrics.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeInfoMetrics.java index 68140600db9..cd31b8063d3 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeInfoMetrics.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeInfoMetrics.java @@ -37,6 +37,7 @@ public class VolumeInfoMetrics { private final HddsVolume volume; @Metric("Returns the RocksDB compact times of the Volume") private MutableRate dbCompactLatency; + private long containers; /** * @param identifier Typically, path to volume root. E.g. /data/hdds @@ -153,4 +154,11 @@ public void dbCompactTimesNanoSecondsIncr(long time) { dbCompactLatency.add(time); } + /** + * Return the Container Count of the Volume. + */ + @Metric("Returns the Container Count of the Volume") + public long getContainers() { + return volume.getContainers(); + } } diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeUsage.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeUsage.java index 733dc7964f1..34ba66c91bb 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeUsage.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeUsage.java @@ -19,6 +19,7 @@ package org.apache.hadoop.ozone.container.common.volume; import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.hdds.conf.ConfigurationException; import org.apache.hadoop.hdds.conf.ConfigurationSource; import org.apache.hadoop.hdds.conf.StorageSize; import org.apache.hadoop.hdds.conf.StorageUnit; @@ -220,9 +221,8 @@ private static long getReserved(ConfigurationSource conf, String rootDir, for (String reserve : reserveList) { String[] words = reserve.split(":"); if (words.length < 2) { - LOG.error("Reserved space should be configured in a pair, but current value is {}", - reserve); - continue; + throw new ConfigurationException("hdds.datanode.dir.du.reserved - " + + "Reserved space should be configured in a pair, but current value is " + reserve); } try { diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ec/reconstruction/ECContainerOperationClient.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ec/reconstruction/ECContainerOperationClient.java index 487e6d37b28..95b7d06167f 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ec/reconstruction/ECContainerOperationClient.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ec/reconstruction/ECContainerOperationClient.java @@ -18,11 +18,13 @@ package org.apache.hadoop.ozone.container.ec.reconstruction; import com.google.common.collect.ImmutableList; +import jakarta.annotation.Nonnull; import org.apache.commons.collections.map.SingletonMap; import org.apache.hadoop.hdds.client.ECReplicationConfig; import org.apache.hadoop.hdds.conf.ConfigurationSource; import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; +import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerDataProto.State; import org.apache.hadoop.hdds.scm.XceiverClientManager; import org.apache.hadoop.hdds.scm.XceiverClientSpi; import org.apache.hadoop.hdds.scm.client.ClientTrustManager; @@ -34,8 +36,6 @@ import org.apache.hadoop.ozone.container.common.helpers.BlockData; import org.apache.hadoop.security.token.Token; import org.apache.hadoop.security.token.TokenIdentifier; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerDataProto.State; -import jakarta.annotation.Nonnull; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -44,7 +44,6 @@ import java.util.List; import java.util.Objects; import java.util.Set; -import java.util.stream.Collectors; /** * This class wraps necessary container-level rpc calls @@ -93,14 +92,11 @@ public BlockData[] listBlock(long containerId, DatanodeDetails dn, try { return BlockData.getFromProtoBuf(i); } catch (IOException e) { - LOG.debug("Failed while converting to protobuf BlockData. Returning" - + " null for listBlock from DN: " + dn, - e); + LOG.debug("Failed while converting to protobuf BlockData. Returning null for listBlock from DN: {}", dn, e); // TODO: revisit here. return null; } - }).collect(Collectors.toList()) - .toArray(new BlockData[blockDataList.size()]); + }).toArray(BlockData[]::new); } finally { this.xceiverClientManager.releaseClient(xceiverClient, false); } diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java index d587748e6f8..716eb440530 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java @@ -103,6 +103,7 @@ import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerDataProto.State.CLOSED; import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerDataProto.State.QUASI_CLOSED; import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerDataProto.State.UNHEALTHY; +import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerDataProto.State.RECOVERING; import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Result.CLOSED_CONTAINER_IO; import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Result.CONTAINER_ALREADY_EXISTS; import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Result.CONTAINER_INTERNAL_ERROR; @@ -110,6 +111,7 @@ import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Result.DELETE_ON_NON_EMPTY_CONTAINER; import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Result.DELETE_ON_OPEN_CONTAINER; import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Result.GET_SMALL_FILE_ERROR; +import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Result.INVALID_ARGUMENT; import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Result.INVALID_CONTAINER_STATE; import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Result.IO_EXCEPTION; import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Result.PUT_SMALL_FILE_ERROR; @@ -132,12 +134,8 @@ import static org.apache.hadoop.hdds.scm.protocolPB.ContainerCommandResponseBuilders.putBlockResponseSuccess; import static org.apache.hadoop.hdds.scm.protocolPB.ContainerCommandResponseBuilders.unsupportedRequest; import static org.apache.hadoop.hdds.scm.utils.ClientCommandsUtils.getReadChunkVersion; -import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos - .ContainerDataProto.State.RECOVERING; - -import org.apache.hadoop.ozone.container.common.interfaces.ScanResult; -import static org.apache.hadoop.ozone.ClientVersion.EC_REPLICA_INDEX_REQUIRED_IN_BLOCK_REQUEST; import static org.apache.hadoop.ozone.OzoneConsts.INCREMENTAL_CHUNK_LIST; +import org.apache.hadoop.ozone.container.common.interfaces.ScanResult; import org.apache.hadoop.util.Time; import org.apache.ratis.statemachine.StateMachine; @@ -260,6 +258,15 @@ static ContainerCommandResponseProto dispatchRequest(KeyValueHandler handler, ContainerCommandRequestProto request, KeyValueContainer kvContainer, DispatcherContext dispatcherContext) { Type cmdType = request.getCmdType(); + // Validate the request has been made to the correct datanode with the node id matching. + if (kvContainer != null) { + try { + handler.validateRequestDatanodeId(kvContainer.getContainerData().getReplicaIndex(), + request.getDatanodeUuid()); + } catch (StorageContainerException e) { + return ContainerUtils.logAndReturnError(LOG, e, request); + } + } switch (cmdType) { case CreateContainer: @@ -378,7 +385,23 @@ ContainerCommandResponseProto handleCreateContainer( " already exists", null, CONTAINER_ALREADY_EXISTS), request); } + try { + this.validateRequestDatanodeId(request.getCreateContainer().hasReplicaIndex() ? + request.getCreateContainer().getReplicaIndex() : null, request.getDatanodeUuid()); + } catch (StorageContainerException e) { + return ContainerUtils.logAndReturnError(LOG, e, request); + } + long containerID = request.getContainerID(); + State containerState = request.getCreateContainer().getState(); + + if (containerState != RECOVERING) { + try { + containerSet.ensureContainerNotMissing(containerID, containerState); + } catch (StorageContainerException ex) { + return ContainerUtils.logAndReturnError(LOG, ex, request); + } + } ContainerLayoutVersion layoutVersion = ContainerLayoutVersion.getConfiguredVersion(conf); @@ -403,7 +426,11 @@ ContainerCommandResponseProto handleCreateContainer( try { if (containerSet.getContainer(containerID) == null) { newContainer.create(volumeSet, volumeChoosingPolicy, clusterId); - created = containerSet.addContainer(newContainer); + if (RECOVERING == newContainer.getContainerState()) { + created = containerSet.addContainerByOverwriteMissingContainer(newContainer); + } else { + created = containerSet.addContainer(newContainer); + } } else { // The create container request for an already existing container can // arrive in case the ContainerStateMachine reapplies the transaction @@ -608,6 +635,8 @@ ContainerCommandResponseProto handlePutBlock( endOfBlock = true; } + // Note: checksum held inside blockData. But no extra checksum validation here with handlePutBlock. + long bcsId = dispatcherContext == null ? 0 : dispatcherContext.getLogIndex(); blockData.setBlockCommitSequenceId(bcsId); @@ -723,15 +752,6 @@ ContainerCommandResponseProto handleGetContainerChecksumInfo( return getGetContainerMerkleTreeResponse(request, checksumTree); } - /** - * Checks if a replicaIndex needs to be checked based on the client version for a request. - * @param request ContainerCommandRequest object. - * @return true if the validation is required for the client version else false. - */ - private boolean replicaIndexCheckRequired(ContainerCommandRequestProto request) { - return request.hasVersion() && request.getVersion() >= EC_REPLICA_INDEX_REQUIRED_IN_BLOCK_REQUEST.toProtoValue(); - } - /** * Handle Get Block operation. Calls BlockManager to process the request. */ @@ -750,9 +770,7 @@ ContainerCommandResponseProto handleGetBlock( try { BlockID blockID = BlockID.getFromProtobuf( request.getGetBlock().getBlockID()); - if (replicaIndexCheckRequired(request)) { - BlockUtils.verifyReplicaIdx(kvContainer, blockID); - } + BlockUtils.verifyReplicaIdx(kvContainer, blockID); responseData = blockManager.getBlock(kvContainer, blockID).getProtoBufMessage(); final long numBytes = responseData.getSerializedSize(); metrics.incContainerBytesStats(Type.GetBlock, numBytes); @@ -875,9 +893,7 @@ ContainerCommandResponseProto handleReadChunk( ChunkInfo chunkInfo = ChunkInfo.getFromProtoBuf(request.getReadChunk() .getChunkData()); Preconditions.checkNotNull(chunkInfo); - if (replicaIndexCheckRequired(request)) { - BlockUtils.verifyReplicaIdx(kvContainer, blockID); - } + BlockUtils.verifyReplicaIdx(kvContainer, blockID); BlockUtils.verifyBCSId(kvContainer, blockID); if (dispatcherContext == null) { @@ -977,6 +993,7 @@ ContainerCommandResponseProto handleWriteChunk( if (isWrite) { data = ChunkBuffer.wrap(writeChunk.getData().asReadOnlyByteBufferList()); + // TODO: Can improve checksum validation here. Make this one-shot after protocol change. validateChunkChecksumData(data, chunkInfo); } chunkManager @@ -1225,7 +1242,7 @@ private void checkContainerOpen(KeyValueContainer kvContainer) * might already be in closing state here. */ if (containerState == State.OPEN || containerState == State.CLOSING - || containerState == State.RECOVERING) { + || containerState == RECOVERING) { return; } @@ -1706,4 +1723,22 @@ public static FaultInjector getInjector() { public static void setInjector(FaultInjector instance) { injector = instance; } + + /** + * Verify if request's replicaIndex matches with containerData. This validates only for EC containers i.e. + * containerReplicaIdx should be > 0. + * + * @param containerReplicaIdx replicaIndex for the container command. + * @param requestDatanodeUUID requested block info + * @throws StorageContainerException if replicaIndex mismatches. + */ + private boolean validateRequestDatanodeId(Integer containerReplicaIdx, String requestDatanodeUUID) + throws StorageContainerException { + if (containerReplicaIdx != null && containerReplicaIdx > 0 && !requestDatanodeUUID.equals(this.getDatanodeId())) { + throw new StorageContainerException( + String.format("Request is trying to write to node with uuid : %s but the current nodeId is: %s .", + requestDatanodeUUID, this.getDatanodeId()), INVALID_ARGUMENT); + } + return true; + } } diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/BlockUtils.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/BlockUtils.java index 945efbcf6ea..8bbc2478004 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/BlockUtils.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/BlockUtils.java @@ -247,7 +247,9 @@ public static void verifyBCSId(Container container, BlockID blockID) public static void verifyReplicaIdx(Container container, BlockID blockID) throws IOException { Integer containerReplicaIndex = container.getContainerData().getReplicaIndex(); - if (containerReplicaIndex > 0 && !containerReplicaIndex.equals(blockID.getReplicaIndex())) { + Integer blockReplicaIndex = blockID.getReplicaIndex(); + if (containerReplicaIndex > 0 && blockReplicaIndex != null && blockReplicaIndex != 0 && + !containerReplicaIndex.equals(blockReplicaIndex)) { throw new StorageContainerException( "Unable to find the Container with replicaIdx " + blockID.getReplicaIndex() + ". Container " + container.getContainerData().getContainerID() + " replicaIdx is " diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/AbstractDatanodeStore.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/AbstractDatanodeStore.java index 88aeb3c174d..d9edd6d4cb0 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/AbstractDatanodeStore.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/AbstractDatanodeStore.java @@ -17,27 +17,22 @@ */ package org.apache.hadoop.ozone.container.metadata; -import com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.conf.StorageUnit; import org.apache.hadoop.hdds.StringUtils; import org.apache.hadoop.hdds.annotation.InterfaceAudience; import org.apache.hadoop.hdds.conf.ConfigurationSource; import org.apache.hadoop.hdds.utils.MetadataKeyFilters; import org.apache.hadoop.hdds.utils.MetadataKeyFilters.KeyPrefixFilter; -import org.apache.hadoop.hdds.utils.db.BatchOperationHandler; import org.apache.hadoop.hdds.utils.db.DBProfile; import org.apache.hadoop.hdds.utils.db.DBStore; import org.apache.hadoop.hdds.utils.db.DBStoreBuilder; import org.apache.hadoop.hdds.utils.db.Table; import org.apache.hadoop.hdds.utils.db.TableIterator; -import org.apache.hadoop.hdds.utils.db.managed.ManagedColumnFamilyOptions; import org.apache.hadoop.hdds.utils.db.managed.ManagedDBOptions; import org.apache.hadoop.ozone.container.common.helpers.BlockData; import org.apache.hadoop.ozone.container.common.helpers.ChunkInfoList; import org.apache.hadoop.ozone.container.common.interfaces.BlockIterator; import org.apache.hadoop.ozone.container.common.statemachine.DatanodeConfiguration; -import org.apache.hadoop.ozone.container.common.utils.db.DatanodeDBProfile; -import org.rocksdb.InfoLogLevel; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -45,14 +40,11 @@ import java.io.IOException; import java.util.NoSuchElementException; -import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_DB_PROFILE; -import static org.apache.hadoop.hdds.utils.db.DBStoreBuilder.HDDS_DEFAULT_DB_PROFILE; - /** * Implementation of the {@link DatanodeStore} interface that contains * functionality common to all more derived datanode store implementations. */ -public abstract class AbstractDatanodeStore implements DatanodeStore { +public class AbstractDatanodeStore extends AbstractRDBStore implements DatanodeStore { private Table metadataTable; @@ -68,12 +60,6 @@ public abstract class AbstractDatanodeStore implements DatanodeStore { public static final Logger LOG = LoggerFactory.getLogger(AbstractDatanodeStore.class); - private volatile DBStore store; - private final AbstractDatanodeDBDefinition dbDef; - private final ManagedColumnFamilyOptions cfOptions; - - private static DatanodeDBProfile dbProfile; - private final boolean openReadOnly; /** * Constructs the metadata store and starts the DB services. @@ -84,114 +70,64 @@ public abstract class AbstractDatanodeStore implements DatanodeStore { protected AbstractDatanodeStore(ConfigurationSource config, AbstractDatanodeDBDefinition dbDef, boolean openReadOnly) throws IOException { - - dbProfile = DatanodeDBProfile - .getProfile(config.getEnum(HDDS_DB_PROFILE, HDDS_DEFAULT_DB_PROFILE)); - - // The same config instance is used on each datanode, so we can share the - // corresponding column family options, providing a single shared cache - // for all containers on a datanode. - cfOptions = dbProfile.getColumnFamilyOptions(config); - - this.dbDef = dbDef; - this.openReadOnly = openReadOnly; - start(config); + super(dbDef, config, openReadOnly); } @Override - public void start(ConfigurationSource config) + protected DBStore initDBStore(DBStoreBuilder dbStoreBuilder, ManagedDBOptions options, ConfigurationSource config) throws IOException { - if (this.store == null) { - ManagedDBOptions options = dbProfile.getDBOptions(); - options.setCreateIfMissing(true); - options.setCreateMissingColumnFamilies(true); - - if (this.dbDef instanceof DatanodeSchemaOneDBDefinition || - this.dbDef instanceof DatanodeSchemaTwoDBDefinition) { - long maxWalSize = DBProfile.toLong(StorageUnit.MB.toBytes(2)); - options.setMaxTotalWalSize(maxWalSize); - } - - DatanodeConfiguration dc = - config.getObject(DatanodeConfiguration.class); - // Config user log files - InfoLogLevel level = InfoLogLevel.valueOf( - dc.getRocksdbLogLevel() + "_LEVEL"); - options.setInfoLogLevel(level); - options.setMaxLogFileSize(dc.getRocksdbLogMaxFileSize()); - options.setKeepLogFileNum(dc.getRocksdbLogMaxFileNum()); - - if (this.dbDef instanceof DatanodeSchemaThreeDBDefinition) { - options.setDeleteObsoleteFilesPeriodMicros( - dc.getRocksdbDeleteObsoleteFilesPeriod()); - - // For V3, all Rocksdb dir has the same "container.db" name. So use - // parentDirName(storage UUID)-dbDirName as db metrics name - this.store = DBStoreBuilder.newBuilder(config, dbDef) - .setDBOptions(options) - .setDefaultCFOptions(cfOptions) - .setOpenReadOnly(openReadOnly) - .setDBJmxBeanNameName(dbDef.getDBLocation(config).getName() + "-" + - dbDef.getName()) - .build(); - } else { - this.store = DBStoreBuilder.newBuilder(config, dbDef) - .setDBOptions(options) - .setDefaultCFOptions(cfOptions) - .setOpenReadOnly(openReadOnly) - .build(); - } + AbstractDatanodeDBDefinition dbDefinition = this.getDbDef(); + if (dbDefinition instanceof DatanodeSchemaOneDBDefinition || + dbDefinition instanceof DatanodeSchemaTwoDBDefinition) { + long maxWalSize = DBProfile.toLong(StorageUnit.MB.toBytes(2)); + options.setMaxTotalWalSize(maxWalSize); + } + DatanodeConfiguration dc = + config.getObject(DatanodeConfiguration.class); - // Use the DatanodeTable wrapper to disable the table iterator on - // existing Table implementations retrieved from the DBDefinition. - // See the DatanodeTable's Javadoc for an explanation of why this is - // necessary. - metadataTable = new DatanodeTable<>( - dbDef.getMetadataColumnFamily().getTable(this.store)); - checkTableStatus(metadataTable, metadataTable.getName()); - - // The block iterator this class returns will need to use the table - // iterator internally, so construct a block data table instance - // that does not have the iterator disabled by DatanodeTable. - blockDataTableWithIterator = - dbDef.getBlockDataColumnFamily().getTable(this.store); - - blockDataTable = new DatanodeTable<>(blockDataTableWithIterator); - checkTableStatus(blockDataTable, blockDataTable.getName()); - - if (dbDef.getFinalizeBlocksColumnFamily() != null) { - finalizeBlocksTableWithIterator = - dbDef.getFinalizeBlocksColumnFamily().getTable(this.store); - - finalizeBlocksTable = new DatanodeTable<>( - finalizeBlocksTableWithIterator); - checkTableStatus(finalizeBlocksTable, finalizeBlocksTable.getName()); - } + if (dbDefinition instanceof DatanodeSchemaThreeDBDefinition) { + options.setDeleteObsoleteFilesPeriodMicros( + dc.getRocksdbDeleteObsoleteFilesPeriod()); - if (dbDef.getLastChunkInfoColumnFamily() != null) { - lastChunkInfoTable = new DatanodeTable<>( - dbDef.getLastChunkInfoColumnFamily().getTable(this.store)); - checkTableStatus(lastChunkInfoTable, lastChunkInfoTable.getName()); - } + // For V3, all Rocksdb dir has the same "container.db" name. So use + // parentDirName(storage UUID)-dbDirName as db metrics name + dbStoreBuilder.setDBJmxBeanNameName(dbDefinition.getDBLocation(config).getName() + "-" + + dbDefinition.getName()); } - } - - @Override - public synchronized void stop() throws Exception { - if (store != null) { - store.close(); - store = null; + DBStore dbStore = dbStoreBuilder.setDBOptions(options).build(); + + // Use the DatanodeTable wrapper to disable the table iterator on + // existing Table implementations retrieved from the DBDefinition. + // See the DatanodeTable's Javadoc for an explanation of why this is + // necessary. + metadataTable = new DatanodeTable<>( + dbDefinition.getMetadataColumnFamily().getTable(dbStore)); + checkTableStatus(metadataTable, metadataTable.getName()); + + // The block iterator this class returns will need to use the table + // iterator internally, so construct a block data table instance + // that does not have the iterator disabled by DatanodeTable. + blockDataTableWithIterator = + dbDefinition.getBlockDataColumnFamily().getTable(dbStore); + + blockDataTable = new DatanodeTable<>(blockDataTableWithIterator); + checkTableStatus(blockDataTable, blockDataTable.getName()); + + if (dbDefinition.getFinalizeBlocksColumnFamily() != null) { + finalizeBlocksTableWithIterator = + dbDefinition.getFinalizeBlocksColumnFamily().getTable(dbStore); + + finalizeBlocksTable = new DatanodeTable<>( + finalizeBlocksTableWithIterator); + checkTableStatus(finalizeBlocksTable, finalizeBlocksTable.getName()); } - } - @Override - public DBStore getStore() { - return this.store; - } - - @Override - public BatchOperationHandler getBatchHandler() { - return this.store; + if (dbDefinition.getLastChunkInfoColumnFamily() != null) { + lastChunkInfoTable = new DatanodeTable<>( + dbDefinition.getLastChunkInfoColumnFamily().getTable(dbStore)); + checkTableStatus(lastChunkInfoTable, lastChunkInfoTable.getName()); + } + return dbStore; } @Override @@ -240,44 +176,6 @@ public BlockIterator getFinalizeBlockIterator(long containerID, finalizeBlocksTableWithIterator.iterator(), filter); } - @Override - public synchronized boolean isClosed() { - if (this.store == null) { - return true; - } - return this.store.isClosed(); - } - - @Override - public void close() throws IOException { - this.store.close(); - this.cfOptions.close(); - } - - @Override - public void flushDB() throws IOException { - store.flushDB(); - } - - @Override - public void flushLog(boolean sync) throws IOException { - store.flushLog(sync); - } - - @Override - public void compactDB() throws IOException { - store.compactDB(); - } - - @VisibleForTesting - public DatanodeDBProfile getDbProfile() { - return dbProfile; - } - - protected AbstractDatanodeDBDefinition getDbDef() { - return this.dbDef; - } - protected Table getBlockDataTableWithIterator() { return this.blockDataTableWithIterator; } diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/AbstractRDBStore.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/AbstractRDBStore.java new file mode 100644 index 00000000000..5ce1a85b388 --- /dev/null +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/AbstractRDBStore.java @@ -0,0 +1,135 @@ +package org.apache.hadoop.ozone.container.metadata; + +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.hdds.conf.ConfigurationSource; +import org.apache.hadoop.hdds.utils.db.BatchOperationHandler; +import org.apache.hadoop.hdds.utils.db.DBDefinition; +import org.apache.hadoop.hdds.utils.db.DBStore; +import org.apache.hadoop.hdds.utils.db.DBStoreBuilder; +import org.apache.hadoop.hdds.utils.db.managed.ManagedColumnFamilyOptions; +import org.apache.hadoop.hdds.utils.db.managed.ManagedDBOptions; +import org.apache.hadoop.ozone.container.common.statemachine.DatanodeConfiguration; +import org.apache.hadoop.ozone.container.common.utils.db.DatanodeDBProfile; +import org.rocksdb.InfoLogLevel; + +import java.io.IOException; + +import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_DB_PROFILE; +import static org.apache.hadoop.hdds.utils.db.DBStoreBuilder.HDDS_DEFAULT_DB_PROFILE; + +/** + * Abstract Interface defining the way to interact with any rocksDB in the datanode. + * @param Generic parameter defining the schema for the DB. + */ +public abstract class AbstractRDBStore implements DBStoreManager { + private final DEF dbDef; + private final ManagedColumnFamilyOptions cfOptions; + private static DatanodeDBProfile dbProfile; + private final boolean openReadOnly; + private volatile DBStore store; + + protected AbstractRDBStore(DEF dbDef, ConfigurationSource config, boolean openReadOnly) throws IOException { + dbProfile = DatanodeDBProfile.getProfile(config.getEnum(HDDS_DB_PROFILE, HDDS_DEFAULT_DB_PROFILE)); + + // The same config instance is used on each datanode, so we can share the + // corresponding column family options, providing a single shared cache + // for all containers on a datanode. + cfOptions = dbProfile.getColumnFamilyOptions(config); + this.dbDef = dbDef; + this.openReadOnly = openReadOnly; + start(config); + } + + public void start(ConfigurationSource config) + throws IOException { + if (this.store == null) { + ManagedDBOptions options = dbProfile.getDBOptions(); + options.setCreateIfMissing(true); + options.setCreateMissingColumnFamilies(true); + + DatanodeConfiguration dc = + config.getObject(DatanodeConfiguration.class); + // Config user log files + InfoLogLevel level = InfoLogLevel.valueOf( + dc.getRocksdbLogLevel() + "_LEVEL"); + options.setInfoLogLevel(level); + options.setMaxLogFileSize(dc.getRocksdbLogMaxFileSize()); + options.setKeepLogFileNum(dc.getRocksdbLogMaxFileNum()); + this.store = initDBStore(DBStoreBuilder.newBuilder(config, dbDef) + .setDBOptions(options) + .setDefaultCFOptions(cfOptions) + .setOpenReadOnly(openReadOnly), options, config); + } + } + + protected abstract DBStore initDBStore(DBStoreBuilder dbStoreBuilder, ManagedDBOptions options, + ConfigurationSource config) throws IOException; + + public synchronized void stop() throws Exception { + if (store != null) { + store.close(); + store = null; + } + } + + public DBStore getStore() { + return this.store; + } + + public synchronized boolean isClosed() { + if (this.store == null) { + return true; + } + return this.store.isClosed(); + } + + public BatchOperationHandler getBatchHandler() { + return this.store; + } + + public void close() throws IOException { + this.store.close(); + this.cfOptions.close(); + } + + public void flushDB() throws IOException { + store.flushDB(); + } + + public void flushLog(boolean sync) throws IOException { + store.flushLog(sync); + } + + public void compactDB() throws IOException { + store.compactDB(); + } + + @VisibleForTesting + public DatanodeDBProfile getDbProfile() { + return dbProfile; + } + + protected DEF getDbDef() { + return this.dbDef; + } + +} diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/DBStoreManager.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/DBStoreManager.java new file mode 100644 index 00000000000..ec9849950a0 --- /dev/null +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/DBStoreManager.java @@ -0,0 +1,71 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements.  See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership.  The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License.  You may obtain a copy of the License at + * + *      http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.ozone.container.metadata; + +import org.apache.hadoop.hdds.conf.ConfigurationSource; +import org.apache.hadoop.hdds.utils.db.BatchOperationHandler; +import org.apache.hadoop.hdds.utils.db.DBStore; + +import java.io.Closeable; +import java.io.IOException; + +/** + * Interface for interacting with datanode databases. + */ +public interface DBStoreManager extends Closeable { + + /** + * Start datanode manager. + * + * @param configuration - Configuration + * @throws IOException - Unable to start datanode store. + */ + void start(ConfigurationSource configuration) throws IOException; + + /** + * Stop datanode manager. + */ + void stop() throws Exception; + + /** + * Get datanode store. + * + * @return datanode store. + */ + DBStore getStore(); + + /** + * Helper to create and write batch transactions. + */ + BatchOperationHandler getBatchHandler(); + + void flushLog(boolean sync) throws IOException; + + void flushDB() throws IOException; + + void compactDB() throws IOException; + + /** + * Returns if the underlying DB is closed. This call is thread safe. + * @return true if the DB is closed. + */ + boolean isClosed(); + + default void compactionIfNeeded() throws Exception { + } +} diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/DatanodeSchemaOneDBDefinition.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/DatanodeSchemaOneDBDefinition.java index 4f54e85da2b..bd1c0fb368a 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/DatanodeSchemaOneDBDefinition.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/DatanodeSchemaOneDBDefinition.java @@ -51,27 +51,21 @@ public class DatanodeSchemaOneDBDefinition BLOCK_DATA = new DBColumnFamilyDefinition<>( StringUtils.bytes2String(DEFAULT_COLUMN_FAMILY), - String.class, SchemaOneKeyCodec.get(), - BlockData.class, BlockData.getCodec()); public static final DBColumnFamilyDefinition METADATA = new DBColumnFamilyDefinition<>( StringUtils.bytes2String(DEFAULT_COLUMN_FAMILY), - String.class, SchemaOneKeyCodec.get(), - Long.class, LongCodec.get()); public static final DBColumnFamilyDefinition DELETED_BLOCKS = new DBColumnFamilyDefinition<>( StringUtils.bytes2String(DEFAULT_COLUMN_FAMILY), - String.class, SchemaOneKeyCodec.get(), - ChunkInfoList.class, SchemaOneChunkInfoListCodec.get()); private static final Map>> diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/DatanodeSchemaThreeDBDefinition.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/DatanodeSchemaThreeDBDefinition.java index d47446d49b0..10537ca6f2d 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/DatanodeSchemaThreeDBDefinition.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/DatanodeSchemaThreeDBDefinition.java @@ -59,45 +59,35 @@ public class DatanodeSchemaThreeDBDefinition BLOCK_DATA = new DBColumnFamilyDefinition<>( "block_data", - String.class, FixedLengthStringCodec.get(), - BlockData.class, BlockData.getCodec()); public static final DBColumnFamilyDefinition METADATA = new DBColumnFamilyDefinition<>( "metadata", - String.class, FixedLengthStringCodec.get(), - Long.class, LongCodec.get()); public static final DBColumnFamilyDefinition DELETE_TRANSACTION = new DBColumnFamilyDefinition<>( "delete_txns", - String.class, FixedLengthStringCodec.get(), - DeletedBlocksTransaction.class, Proto2Codec.get(DeletedBlocksTransaction.getDefaultInstance())); public static final DBColumnFamilyDefinition FINALIZE_BLOCKS = new DBColumnFamilyDefinition<>( "finalize_blocks", - String.class, FixedLengthStringCodec.get(), - Long.class, LongCodec.get()); public static final DBColumnFamilyDefinition LAST_CHUNK_INFO = new DBColumnFamilyDefinition<>( "last_chunk_info", - String.class, FixedLengthStringCodec.get(), - BlockData.class, BlockData.getCodec()); private static String separator = ""; diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/DatanodeSchemaTwoDBDefinition.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/DatanodeSchemaTwoDBDefinition.java index b9e7ec7bd5b..bf6b1d0a29c 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/DatanodeSchemaTwoDBDefinition.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/DatanodeSchemaTwoDBDefinition.java @@ -18,7 +18,6 @@ package org.apache.hadoop.ozone.container.metadata; import org.apache.hadoop.hdds.conf.ConfigurationSource; -import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos; import org.apache.hadoop.hdds.utils.db.DBColumnFamilyDefinition; import org.apache.hadoop.hdds.utils.db.DBDefinition; import org.apache.hadoop.hdds.utils.db.FixedLengthStringCodec; @@ -44,45 +43,35 @@ public class DatanodeSchemaTwoDBDefinition BLOCK_DATA = new DBColumnFamilyDefinition<>( "block_data", - String.class, StringCodec.get(), - BlockData.class, BlockData.getCodec()); public static final DBColumnFamilyDefinition METADATA = new DBColumnFamilyDefinition<>( "metadata", - String.class, StringCodec.get(), - Long.class, LongCodec.get()); public static final DBColumnFamilyDefinition DELETE_TRANSACTION = new DBColumnFamilyDefinition<>( "delete_txns", - Long.class, LongCodec.get(), - StorageContainerDatanodeProtocolProtos.DeletedBlocksTransaction.class, Proto2Codec.get(DeletedBlocksTransaction.getDefaultInstance())); public static final DBColumnFamilyDefinition FINALIZE_BLOCKS = new DBColumnFamilyDefinition<>( "finalize_blocks", - String.class, FixedLengthStringCodec.get(), - Long.class, LongCodec.get()); public static final DBColumnFamilyDefinition LAST_CHUNK_INFO = new DBColumnFamilyDefinition<>( "last_chunk_info", - String.class, FixedLengthStringCodec.get(), - BlockData.class, BlockData.getCodec()); public DatanodeSchemaTwoDBDefinition(String dbPath, diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/DatanodeStore.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/DatanodeStore.java index d791d9bbeab..3ebdc3f6295 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/DatanodeStore.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/DatanodeStore.java @@ -17,22 +17,16 @@ */ package org.apache.hadoop.ozone.container.metadata; -import com.google.common.annotations.VisibleForTesting; - import org.apache.hadoop.hdds.client.BlockID; -import org.apache.hadoop.hdds.conf.ConfigurationSource; import org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException; import org.apache.hadoop.hdds.utils.MetadataKeyFilters.KeyPrefixFilter; import org.apache.hadoop.hdds.utils.db.BatchOperation; -import org.apache.hadoop.hdds.utils.db.BatchOperationHandler; -import org.apache.hadoop.hdds.utils.db.DBStore; import org.apache.hadoop.hdds.utils.db.Table; import org.apache.hadoop.ozone.container.common.helpers.BlockData; import org.apache.hadoop.ozone.container.common.helpers.ChunkInfoList; import org.apache.hadoop.ozone.container.common.interfaces.BlockIterator; import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData; -import java.io.Closeable; import java.io.IOException; import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Result.NO_SUCH_BLOCK; @@ -40,31 +34,10 @@ /** * Interface for interacting with datanode databases. */ -public interface DatanodeStore extends Closeable { +public interface DatanodeStore extends DBStoreManager { String NO_SUCH_BLOCK_ERR_MSG = "Unable to find the block."; - /** - * Start datanode manager. - * - * @param configuration - Configuration - * @throws IOException - Unable to start datanode store. - */ - void start(ConfigurationSource configuration) throws IOException; - - /** - * Stop datanode manager. - */ - void stop() throws Exception; - - /** - * Get datanode store. - * - * @return datanode store. - */ - @VisibleForTesting - DBStore getStore(); - /** * A Table that keeps the block data. * @@ -100,17 +73,6 @@ public interface DatanodeStore extends Closeable { */ Table getLastChunkInfoTable(); - /** - * Helper to create and write batch transactions. - */ - BatchOperationHandler getBatchHandler(); - - void flushLog(boolean sync) throws IOException; - - void flushDB() throws IOException; - - void compactDB() throws IOException; - BlockIterator getBlockIterator(long containerID) throws IOException; @@ -120,15 +82,6 @@ BlockIterator getBlockIterator(long containerID, BlockIterator getFinalizeBlockIterator(long containerID, KeyPrefixFilter filter) throws IOException; - /** - * Returns if the underlying DB is closed. This call is thread safe. - * @return true if the DB is closed. - */ - boolean isClosed(); - - default void compactionIfNeeded() throws Exception { - } - default BlockData getBlockByID(BlockID blockID, String blockKey) throws IOException { diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/SchemaOneChunkInfoListCodec.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/SchemaOneChunkInfoListCodec.java index 1be5a3819c8..25a49eaabe4 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/SchemaOneChunkInfoListCodec.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/SchemaOneChunkInfoListCodec.java @@ -57,6 +57,11 @@ private SchemaOneChunkInfoListCodec() { // singleton } + @Override + public Class getTypeClass() { + return ChunkInfoList.class; + } + @Override public byte[] toPersistedFormat(ChunkInfoList chunkList) { return chunkList.getProtoBufMessage().toByteArray(); diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/SchemaOneKeyCodec.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/SchemaOneKeyCodec.java index 2f1660f4d2e..add24874a31 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/SchemaOneKeyCodec.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/SchemaOneKeyCodec.java @@ -48,6 +48,11 @@ private SchemaOneKeyCodec() { // singleton } + @Override + public Class getTypeClass() { + return String.class; + } + @Override public byte[] toPersistedFormat(String stringObject) throws IOException { try { diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/WitnessedContainerDBDefinition.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/WitnessedContainerDBDefinition.java new file mode 100644 index 00000000000..a15ab27a69d --- /dev/null +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/WitnessedContainerDBDefinition.java @@ -0,0 +1,71 @@ +package org.apache.hadoop.ozone.container.metadata; + +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +import org.apache.hadoop.hdds.scm.ScmConfigKeys; +import org.apache.hadoop.hdds.utils.db.DBColumnFamilyDefinition; +import org.apache.hadoop.hdds.utils.db.DBDefinition; +import org.apache.hadoop.hdds.utils.db.LongCodec; +import org.apache.hadoop.hdds.utils.db.StringCodec; +import org.apache.hadoop.ozone.OzoneConsts; + +import java.util.Map; + +/** + * Class for defining the schema for master volume in a datanode. + */ +public final class WitnessedContainerDBDefinition extends DBDefinition.WithMap { + + private static final String CONTAINER_IDS_TABLE_NAME = "containerIds"; + + public static final DBColumnFamilyDefinition + CONTAINER_IDS_TABLE = new DBColumnFamilyDefinition<>( + CONTAINER_IDS_TABLE_NAME, + LongCodec.get(), + StringCodec.get()); + + private static final Map> + COLUMN_FAMILIES = DBColumnFamilyDefinition.newUnmodifiableMap( + CONTAINER_IDS_TABLE); + + private static final WitnessedContainerDBDefinition INSTANCE = new WitnessedContainerDBDefinition(); + + public static WitnessedContainerDBDefinition get() { + return INSTANCE; + } + + private WitnessedContainerDBDefinition() { + super(COLUMN_FAMILIES); + } + + @Override + public String getName() { + return OzoneConsts.WITNESSED_CONTAINER_DB_NAME; + } + + @Override + public String getLocationConfigKey() { + return ScmConfigKeys.OZONE_SCM_DATANODE_ID_DIR; + } + + public DBColumnFamilyDefinition getContainerIdsTable() { + return CONTAINER_IDS_TABLE; + } +} diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/WitnessedContainerMetadataStore.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/WitnessedContainerMetadataStore.java new file mode 100644 index 00000000000..b16c7b981ce --- /dev/null +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/WitnessedContainerMetadataStore.java @@ -0,0 +1,34 @@ +package org.apache.hadoop.ozone.container.metadata; + +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +import org.apache.hadoop.hdds.utils.db.Table; + +/** + * Interface for interacting with database in the master volume of a datanode. + */ +public interface WitnessedContainerMetadataStore extends DBStoreManager { + /** + * A Table that keeps the containerIds in a datanode. + * + * @return Table + */ + Table getContainerIdsTable(); +} diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/WitnessedContainerMetadataStoreImpl.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/WitnessedContainerMetadataStoreImpl.java new file mode 100644 index 00000000000..270daf815b2 --- /dev/null +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/WitnessedContainerMetadataStoreImpl.java @@ -0,0 +1,78 @@ +package org.apache.hadoop.ozone.container.metadata; + +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +import org.apache.hadoop.hdds.conf.ConfigurationSource; +import org.apache.hadoop.hdds.utils.db.DBStore; +import org.apache.hadoop.hdds.utils.db.DBStoreBuilder; +import org.apache.hadoop.hdds.utils.db.Table; +import org.apache.hadoop.hdds.utils.db.managed.ManagedDBOptions; + +import java.io.IOException; +import java.io.UncheckedIOException; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.ConcurrentMap; + +/** + * Class for interacting with database in the master volume of a datanode. + */ +public final class WitnessedContainerMetadataStoreImpl extends AbstractRDBStore + implements WitnessedContainerMetadataStore { + + private Table containerIdsTable; + private static final ConcurrentMap INSTANCES = + new ConcurrentHashMap<>(); + + public static WitnessedContainerMetadataStore get(ConfigurationSource conf) + throws IOException { + String dbDirPath = DBStoreBuilder.getDBDirPath(WitnessedContainerDBDefinition.get(), conf).getAbsolutePath(); + try { + return INSTANCES.compute(dbDirPath, (k, v) -> { + if (v == null || v.isClosed()) { + try { + return new WitnessedContainerMetadataStoreImpl(conf, false); + } catch (IOException e) { + throw new UncheckedIOException(e); + } + } + return v; + }); + } catch (UncheckedIOException e) { + throw e.getCause(); + } + } + + private WitnessedContainerMetadataStoreImpl(ConfigurationSource config, boolean openReadOnly) throws IOException { + super(WitnessedContainerDBDefinition.get(), config, openReadOnly); + } + + @Override + protected DBStore initDBStore(DBStoreBuilder dbStoreBuilder, ManagedDBOptions options, ConfigurationSource config) + throws IOException { + DBStore dbStore = dbStoreBuilder.build(); + this.containerIdsTable = this.getDbDef().getContainerIdsTable().getTable(dbStore); + return dbStore; + } + + @Override + public Table getContainerIdsTable() { + return containerIdsTable; + } +} diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/BackgroundContainerDataScanner.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/BackgroundContainerDataScanner.java index 1a4f0bf6460..af810c62842 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/BackgroundContainerDataScanner.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/BackgroundContainerDataScanner.java @@ -62,6 +62,7 @@ public BackgroundContainerDataScanner(ContainerScannerConfiguration conf, throttler = new HddsDataTransferThrottler(conf.getBandwidthPerVolume()); canceler = new Canceler(); this.metrics = ContainerDataScannerMetrics.create(volume.toString()); + this.metrics.setStorageDirectory(volume.toString()); this.minScanGap = conf.getContainerScanMinGap(); this.checksumManager = checksumManager; } diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerController.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerController.java index 84ddba759fe..94841c9d2ea 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerController.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerController.java @@ -249,6 +249,16 @@ public Iterator> getContainers(HddsVolume volume) { return containerSet.getContainerIterator(volume); } + /** + * Get the number of containers based on the given volume. + * + * @param volume hdds volume. + * @return number of containers. + */ + public long getContainerCount(HddsVolume volume) { + return containerSet.containerCount(volume); + } + void updateDataScanTimestamp(long containerId, Instant timestamp) throws IOException { Container container = containerSet.getContainer(containerId); diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerDataScannerMetrics.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerDataScannerMetrics.java index a3f71d34ba1..76e71312aed 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerDataScannerMetrics.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerDataScannerMetrics.java @@ -37,6 +37,8 @@ public final class ContainerDataScannerMetrics @Metric("disk bandwidth used by the container data scanner per volume") private MutableRate numBytesScanned; + private String storageDirectory; + public double getNumBytesScannedMean() { return numBytesScanned.lastStat().mean(); } @@ -66,4 +68,13 @@ public static ContainerDataScannerMetrics create(final String volumeName) { return ms.register(name, null, new ContainerDataScannerMetrics(name, ms)); } + + @Metric("Returns the Directory name for the volume") + public String getStorageDirectory() { + return storageDirectory; + } + + public void setStorageDirectory(final String volumeName) { + this.storageDirectory = volumeName; + } } diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerReader.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerReader.java index 1685d1c5fe2..027fbff89c8 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerReader.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerReader.java @@ -320,8 +320,7 @@ private void resolveDuplicate(KeyValueContainer existing, private void swapAndRemoveContainer(KeyValueContainer existing, KeyValueContainer toAdd) throws IOException { - containerSet.removeContainer( - existing.getContainerData().getContainerID()); + containerSet.removeContainerOnlyFromMemory(existing.getContainerData().getContainerID()); containerSet.addContainer(toAdd); KeyValueContainerUtil.removeContainer(existing.getContainerData(), hddsVolume.getConf()); diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/OnDemandContainerDataScanner.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/OnDemandContainerDataScanner.java index eb0f3eedb03..df5050266bd 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/OnDemandContainerDataScanner.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/OnDemandContainerDataScanner.java @@ -80,6 +80,9 @@ public static synchronized void init( } private static boolean shouldScan(Container container) { + if (container == null) { + return false; + } long containerID = container.getContainerData().getContainerID(); if (instance == null) { LOG.debug("Skipping on demand scan for container {} since scanner was " + diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/OzoneContainer.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/OzoneContainer.java index 8ae838a7e53..8316d687b8e 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/OzoneContainer.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/OzoneContainer.java @@ -29,11 +29,15 @@ import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerReplicaProto; import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.IncrementalContainerReportProto; import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.PipelineReportsProto; +import org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException; import org.apache.hadoop.hdds.security.SecurityConfig; import org.apache.hadoop.hdds.security.symmetric.SecretKeyVerifierClient; import org.apache.hadoop.hdds.security.token.TokenVerifier; import org.apache.hadoop.hdds.security.x509.certificate.client.CertificateClient; import org.apache.hadoop.hdds.utils.HddsServerUtil; +import org.apache.hadoop.hdds.utils.db.Table; +import org.apache.hadoop.hdds.utils.db.TableIterator; +import org.apache.hadoop.hdds.utils.IOUtils; import org.apache.hadoop.ozone.container.checksum.ContainerChecksumTreeManager; import org.apache.hadoop.ozone.HddsDatanodeService; import org.apache.hadoop.ozone.container.common.helpers.ContainerMetrics; @@ -58,6 +62,8 @@ import org.apache.hadoop.ozone.container.common.volume.StorageVolume.VolumeType; import org.apache.hadoop.ozone.container.common.volume.StorageVolumeChecker; import org.apache.hadoop.ozone.container.keyvalue.statemachine.background.StaleRecoveringContainerScrubbingService; +import org.apache.hadoop.ozone.container.metadata.WitnessedContainerMetadataStore; +import org.apache.hadoop.ozone.container.metadata.WitnessedContainerMetadataStoreImpl; import org.apache.hadoop.ozone.container.replication.ContainerImporter; import org.apache.hadoop.ozone.container.replication.ReplicationServer; import org.apache.hadoop.ozone.container.replication.ReplicationServer.ReplicationConfig; @@ -71,6 +77,7 @@ import java.io.IOException; import java.time.Duration; import java.util.ArrayList; +import java.util.HashMap; import java.util.Iterator; import java.util.LinkedList; import java.util.List; @@ -131,6 +138,7 @@ public class OzoneContainer { private ScheduledExecutorService dbCompactionExecutorService; private final ContainerMetrics metrics; + private WitnessedContainerMetadataStore witnessedContainerMetadataStore; enum InitializingStatus { UNINITIALIZED, INITIALIZING, INITIALIZED @@ -181,12 +189,11 @@ public OzoneContainer(HddsDatanodeService hddsDatanodeService, TimeUnit.MINUTES); } } - long recoveringContainerTimeout = config.getTimeDuration( OZONE_RECOVERING_CONTAINER_TIMEOUT, OZONE_RECOVERING_CONTAINER_TIMEOUT_DEFAULT, TimeUnit.MILLISECONDS); - - containerSet = new ContainerSet(recoveringContainerTimeout); + this.witnessedContainerMetadataStore = WitnessedContainerMetadataStoreImpl.get(conf); + containerSet = new ContainerSet(witnessedContainerMetadataStore.getContainerIdsTable(), recoveringContainerTimeout); metadataScanner = null; metrics = ContainerMetrics.create(conf); @@ -309,7 +316,7 @@ public GrpcTlsConfig getTlsClientConfig() { * Build's container map after volume format. */ @VisibleForTesting - public void buildContainerSet() { + public void buildContainerSet() throws IOException { Iterator volumeSetIterator = volumeSet.getVolumesList() .iterator(); ArrayList volumeThreads = new ArrayList<>(); @@ -337,6 +344,14 @@ public void buildContainerSet() { for (int i = 0; i < volumeThreads.size(); i++) { volumeThreads.get(i).join(); } + try (TableIterator> itr = + containerSet.getContainerIdsTable().iterator()) { + Map containerIds = new HashMap<>(); + while (itr.hasNext()) { + containerIds.put(itr.next().getKey(), 0L); + } + containerSet.buildMissingContainerSetAndValidate(containerIds); + } } catch (InterruptedException ex) { LOG.error("Volume Threads Interrupted exception", ex); Thread.currentThread().interrupt(); @@ -392,6 +407,18 @@ private void initContainerScanner(ContainerScannerConfiguration c) { } } + /** + * We need to inject the containerController into the hddsVolume. + * because we need to obtain the container count + * for each disk based on the container controller. + */ + private void initHddsVolumeContainer() { + for (StorageVolume v : volumeSet.getVolumesList()) { + HddsVolume hddsVolume = (HddsVolume) v; + hddsVolume.setController(controller); + } + } + private void initMetadataScanner(ContainerScannerConfiguration c) { if (this.metadataScanner == null) { this.metadataScanner = @@ -490,6 +517,8 @@ public void start(String clusterId) throws IOException { blockDeletingService.start(); recoveringContainerScrubbingService.start(); + initHddsVolumeContainer(); + // mark OzoneContainer as INITIALIZED. initializingStatus.set(InitializingStatus.INITIALIZED); } @@ -517,11 +546,21 @@ public void stop() { } blockDeletingService.shutdown(); recoveringContainerScrubbingService.shutdown(); + IOUtils.closeQuietly(metrics); ContainerMetrics.remove(); checksumTreeManager.stop(); + if (this.witnessedContainerMetadataStore != null) { + try { + this.witnessedContainerMetadataStore.stop(); + } catch (Exception e) { + LOG.error("Error while stopping witnessedContainerMetadataStore. Status of store: {}", + witnessedContainerMetadataStore.isClosed(), e); + } + this.witnessedContainerMetadataStore = null; + } } - public void handleVolumeFailures() { + public void handleVolumeFailures() throws StorageContainerException { if (containerSet != null) { containerSet.handleVolumeFailures(context); } diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ContainerImporter.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ContainerImporter.java index 9e5b5dbdabd..db86882bfb8 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ContainerImporter.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ContainerImporter.java @@ -128,7 +128,7 @@ public void importContainer(long containerID, Path tarFilePath, try (FileInputStream input = new FileInputStream(tarFilePath.toFile())) { Container container = controller.importContainer( containerData, input, packer); - containerSet.addContainer(container); + containerSet.addContainerByOverwriteMissingContainer(container); } } finally { importContainerProgress.remove(containerID); diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/GrpcReplicationService.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/GrpcReplicationService.java index 6bc237207b3..26cd0d82a99 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/GrpcReplicationService.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/GrpcReplicationService.java @@ -19,7 +19,6 @@ package org.apache.hadoop.ozone.container.replication; import java.io.IOException; -import java.io.InputStream; import java.io.OutputStream; import java.util.HashSet; import java.util.Set; @@ -59,37 +58,24 @@ public class GrpcReplicationService extends private final ContainerReplicationSource source; private final ContainerImporter importer; - private final boolean zeroCopyEnabled; - private final ZeroCopyMessageMarshaller sendContainerZeroCopyMessageMarshaller; private final ZeroCopyMessageMarshaller copyContainerZeroCopyMessageMarshaller; - public GrpcReplicationService(ContainerReplicationSource source, - ContainerImporter importer, boolean zeroCopyEnabled) { + public GrpcReplicationService(ContainerReplicationSource source, ContainerImporter importer) { this.source = source; this.importer = importer; - this.zeroCopyEnabled = zeroCopyEnabled; - - if (zeroCopyEnabled) { - sendContainerZeroCopyMessageMarshaller = new ZeroCopyMessageMarshaller<>( - SendContainerRequest.getDefaultInstance()); - copyContainerZeroCopyMessageMarshaller = new ZeroCopyMessageMarshaller<>( - CopyContainerRequestProto.getDefaultInstance()); - } else { - sendContainerZeroCopyMessageMarshaller = null; - copyContainerZeroCopyMessageMarshaller = null; - } + + sendContainerZeroCopyMessageMarshaller = new ZeroCopyMessageMarshaller<>( + SendContainerRequest.getDefaultInstance()); + copyContainerZeroCopyMessageMarshaller = new ZeroCopyMessageMarshaller<>( + CopyContainerRequestProto.getDefaultInstance()); } public ServerServiceDefinition bindServiceWithZeroCopy() { ServerServiceDefinition orig = super.bindService(); - if (!zeroCopyEnabled) { - LOG.info("Zerocopy is not enabled."); - return orig; - } Set methodNames = new HashSet<>(); ServerServiceDefinition.Builder builder = @@ -155,14 +141,7 @@ public void download(CopyContainerRequestProto request, } finally { // output may have already been closed, ignore such errors IOUtils.cleanupWithLogger(LOG, outputStream); - - if (copyContainerZeroCopyMessageMarshaller != null) { - InputStream popStream = - copyContainerZeroCopyMessageMarshaller.popStream(request); - if (popStream != null) { - IOUtils.cleanupWithLogger(LOG, popStream); - } - } + copyContainerZeroCopyMessageMarshaller.release(request); } } diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ReplicationServer.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ReplicationServer.java index b4e92a4a60a..6ca474bdd8a 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ReplicationServer.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ReplicationServer.java @@ -99,13 +99,12 @@ public ReplicationServer(ContainerController controller, new LinkedBlockingQueue<>(replicationQueueLimit), threadFactory); - init(replicationConfig.isZeroCopyEnable()); + init(); } - public void init(boolean enableZeroCopy) { + public void init() { GrpcReplicationService grpcReplicationService = new GrpcReplicationService( - new OnDemandContainerReplicationSource(controller), importer, - enableZeroCopy); + new OnDemandContainerReplicationSource(controller), importer); NettyServerBuilder nettyServerBuilder = NettyServerBuilder.forPort(port) .maxInboundMessageSize(OzoneConsts.OZONE_SCM_CHUNK_MAX_SIZE) .addService(ServerInterceptors.intercept( @@ -203,11 +202,6 @@ public static final class ReplicationConfig { static final String REPLICATION_OUTOFSERVICE_FACTOR_KEY = PREFIX + "." + OUTOFSERVICE_FACTOR_KEY; - public static final String ZEROCOPY_ENABLE_KEY = "zerocopy.enabled"; - private static final boolean ZEROCOPY_ENABLE_DEFAULT = true; - private static final String ZEROCOPY_ENABLE_DEFAULT_VALUE = - "true"; - /** * The maximum number of replication commands a single datanode can execute * simultaneously. @@ -249,15 +243,6 @@ public static final class ReplicationConfig { ) private double outOfServiceFactor = OUTOFSERVICE_FACTOR_DEFAULT; - @Config(key = ZEROCOPY_ENABLE_KEY, - type = ConfigType.BOOLEAN, - defaultValue = ZEROCOPY_ENABLE_DEFAULT_VALUE, - tags = {DATANODE, SCM}, - description = "Specify if zero-copy should be enabled for " + - "replication protocol." - ) - private boolean zeroCopyEnable = ZEROCOPY_ENABLE_DEFAULT; - public double getOutOfServiceFactor() { return outOfServiceFactor; } @@ -291,14 +276,6 @@ public void setReplicationQueueLimit(int limit) { this.replicationQueueLimit = limit; } - public boolean isZeroCopyEnable() { - return zeroCopyEnable; - } - - public void setZeroCopyEnable(boolean zeroCopyEnable) { - this.zeroCopyEnable = zeroCopyEnable; - } - @PostConstruct public void validate() { if (replicationMaxStreams < 1) { diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ReplicationSupervisor.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ReplicationSupervisor.java index 92ff4b6d8d6..9513cac84ef 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ReplicationSupervisor.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ReplicationSupervisor.java @@ -43,6 +43,8 @@ import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ReplicationCommandPriority; +import org.apache.hadoop.metrics2.lib.MetricsRegistry; +import org.apache.hadoop.metrics2.lib.MutableRate; import org.apache.hadoop.ozone.container.common.statemachine.DatanodeConfiguration; import org.apache.hadoop.ozone.container.common.statemachine.StateContext; import org.apache.hadoop.ozone.container.replication.ReplicationServer.ReplicationConfig; @@ -50,6 +52,7 @@ import com.google.common.annotations.VisibleForTesting; import com.google.common.util.concurrent.ThreadFactoryBuilder; +import org.apache.hadoop.util.Time; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -77,6 +80,10 @@ public final class ReplicationSupervisor { private final Map failureCounter = new ConcurrentHashMap<>(); private final Map timeoutCounter = new ConcurrentHashMap<>(); private final Map skippedCounter = new ConcurrentHashMap<>(); + private final Map queuedCounter = new ConcurrentHashMap<>(); + + private final MetricsRegistry registry; + private final Map opsLatencyMs = new ConcurrentHashMap<>(); private static final Map METRICS_MAP; @@ -218,6 +225,7 @@ private ReplicationSupervisor(StateContext context, ExecutorService executor, nodeStateUpdated(dn.getPersistedOpState()); } } + registry = new MetricsRegistry(ReplicationSupervisor.class.getSimpleName()); } /** @@ -240,6 +248,9 @@ public void addTask(AbstractReplicationTask task) { failureCounter.put(task.getMetricName(), new AtomicLong(0)); timeoutCounter.put(task.getMetricName(), new AtomicLong(0)); skippedCounter.put(task.getMetricName(), new AtomicLong(0)); + queuedCounter.put(task.getMetricName(), new AtomicLong(0)); + opsLatencyMs.put(task.getMetricName(), registry.newRate( + task.getClass().getSimpleName() + "Ms")); METRICS_MAP.put(task.getMetricName(), task.getMetricDescriptionSegment()); } } @@ -253,6 +264,7 @@ public void addTask(AbstractReplicationTask task) { taskCounter.computeIfAbsent(task.getClass(), k -> new AtomicInteger()).incrementAndGet(); } + queuedCounter.get(task.getMetricName()).incrementAndGet(); executor.execute(new TaskRunner(task)); } } @@ -353,6 +365,7 @@ public TaskRunner(AbstractReplicationTask task) { @Override public void run() { + final long startTime = Time.monotonicNow(); try { requestCounter.get(task.getMetricName()).incrementAndGet(); @@ -401,6 +414,8 @@ public void run() { LOG.warn("Failed {}", this, e); failureCounter.get(task.getMetricName()).incrementAndGet(); } finally { + queuedCounter.get(task.getMetricName()).decrementAndGet(); + opsLatencyMs.get(task.getMetricName()).add(Time.monotonicNow() - startTime); inFlight.remove(task); decrementTaskCounter(task); } @@ -511,4 +526,22 @@ public long getReplicationSkippedCount(String metricsName) { return counter != null ? counter.get() : 0; } + public long getReplicationQueuedCount() { + return getCount(queuedCounter); + } + + public long getReplicationQueuedCount(String metricsName) { + AtomicLong counter = queuedCounter.get(metricsName); + return counter != null ? counter.get() : 0; + } + + public long getReplicationRequestAvgTime(String metricsName) { + MutableRate rate = opsLatencyMs.get(metricsName); + return rate != null ? (long) rate.lastStat().mean() : 0; + } + + public long getReplicationRequestTotalTime(String metricsName) { + MutableRate rate = opsLatencyMs.get(metricsName); + return rate != null ? (long) rate.lastStat().total() : 0; + } } diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ReplicationSupervisorMetrics.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ReplicationSupervisorMetrics.java index a1763976af9..cd1103a0c46 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ReplicationSupervisorMetrics.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ReplicationSupervisorMetrics.java @@ -67,7 +67,7 @@ public void getMetrics(MetricsCollector collector, boolean all) { supervisor.getTotalInFlightReplications()) .addGauge(Interns.info("numQueuedReplications", "Number of replications in queue"), - supervisor.getQueueSize()) + supervisor.getReplicationQueuedCount()) .addGauge(Interns.info("numRequestedReplications", "Number of requested replications"), supervisor.getReplicationRequestCount()) @@ -107,7 +107,10 @@ public void getMetrics(MetricsCollector collector, boolean all) { .addGauge(Interns.info("numSkipped" + metricsName, "Number of " + descriptionSegment + " skipped as the container is " + "already present"), - supervisor.getReplicationSkippedCount(metricsName)); + supervisor.getReplicationSkippedCount(metricsName)) + .addGauge(Interns.info("numQueued" + metricsName, + "Number of " + descriptionSegment + " in queue"), + supervisor.getReplicationQueuedCount(metricsName)); } }); } diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/SendContainerRequestHandler.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/SendContainerRequestHandler.java index 506a96fe051..40b4dec3493 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/SendContainerRequestHandler.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/SendContainerRequestHandler.java @@ -30,7 +30,6 @@ import org.slf4j.LoggerFactory; import java.io.IOException; -import java.io.InputStream; import java.io.OutputStream; import java.nio.file.Files; import java.nio.file.Path; @@ -105,10 +104,7 @@ public void onNext(SendContainerRequest req) { onError(t); } finally { if (marshaller != null) { - InputStream popStream = marshaller.popStream(req); - if (popStream != null) { - IOUtils.cleanupWithLogger(LOG, popStream); - } + marshaller.release(req); } } } diff --git a/hadoop-hdds/container-service/src/main/resources/webapps/hddsDatanode/dn-overview.html b/hadoop-hdds/container-service/src/main/resources/webapps/hddsDatanode/dn-overview.html index fd3d7407d23..4f51b423e8a 100644 --- a/hadoop-hdds/container-service/src/main/resources/webapps/hddsDatanode/dn-overview.html +++ b/hadoop-hdds/container-service/src/main/resources/webapps/hddsDatanode/dn-overview.html @@ -22,8 +22,32 @@ +

    HeartBeat Information

    + + + + + + + + + + + + + + + + + + + + + +
    AddressLast Successful HeartBeatMissed CountStateTypeVersion Number
    {{scm.addressString}}{{scm.lastSuccessfulHeartbeat}}{{scm.missedCount}}{{scm.state}}{{scm.type}}{{scm.versionNumber}}
    +

    Volume Information

    - +
    @@ -33,6 +57,7 @@

    Volume Information

    + @@ -45,6 +70,7 @@

    Volume Information

    + diff --git a/hadoop-hdds/container-service/src/main/resources/webapps/hddsDatanode/dn-scanner.html b/hadoop-hdds/container-service/src/main/resources/webapps/hddsDatanode/dn-scanner.html new file mode 100644 index 00000000000..5c54a2aa0a7 --- /dev/null +++ b/hadoop-hdds/container-service/src/main/resources/webapps/hddsDatanode/dn-scanner.html @@ -0,0 +1,47 @@ + + + + + DataNode Scanner Status + + +

    DataNode Scanner Information

    +
    Directory Available Space Reserved Total CapacityContainers State
    {{volumeInfo.Available}} {{volumeInfo.Reserved}} {{volumeInfo.TotalCapacity}}{{volumeInfo.Containers}} {{volumeInfo["tag.VolumeState"]}}
    + + + + + + + + + + + + + + + + + + + + +
    DirectoryNumBytesScannedNumOpsNumBytesScannedAvgTimeNumContainersScannedNumScanIterationsNumUnHealthyContainers
    {{scanner["tag.StorageDirectory"]}}{{scanner.NumBytesScannedNumOps}}{{scanner.NumBytesScannedAvgTime | millisecondsToMinutes}}{{scanner.NumContainersScanned}}{{scanner.NumScanIterations}}{{scanner.NumUnHealthyContainers}}
    + + \ No newline at end of file diff --git a/hadoop-hdds/container-service/src/main/resources/webapps/hddsDatanode/dn.js b/hadoop-hdds/container-service/src/main/resources/webapps/hddsDatanode/dn.js index adc507acce9..547e566ef8a 100644 --- a/hadoop-hdds/container-service/src/main/resources/webapps/hddsDatanode/dn.js +++ b/hadoop-hdds/container-service/src/main/resources/webapps/hddsDatanode/dn.js @@ -36,20 +36,104 @@ volume.TotalCapacity = transform(volume.TotalCapacity); }) }); + + $http.get("jmx?qry=Hadoop:service=HddsDatanode,name=SCMConnectionManager") + .then(function (result) { + ctrl.heartbeatmetrics = result.data.beans; + ctrl.heartbeatmetrics.forEach(scm => { + var scmServers = scm.SCMServers; + scmServers.forEach(scmServer => { + scmServer.lastSuccessfulHeartbeat = convertTimestampToDate(scmServer.lastSuccessfulHeartbeat) + }) + }) + }); } }); - function transform(v) { - var UNITS = ['B', 'KB', 'MB', 'GB', 'TB', 'PB', 'ZB']; - var prev = 0, i = 0; - while (Math.floor(v) > 0 && i < UNITS.length) { + + // Register ioStatus Controller + angular.module('ozone').config(function ($routeProvider) { + $routeProvider.when('/iostatus', { + templateUrl: 'iostatus.html', + controller: 'IOStatusController as ioStatusCtrl', + }); + }); + + angular.module('ozone') + .controller('IOStatusController', function ($http) { + var ctrl = this; + $http.get("jmx?qry=Hadoop:service=HddsDatanode,name=VolumeIOStats*") + .then(function (result) { + ctrl.dniostatus = result.data.beans; + }); + }); + + // Register Scanner Controller + angular.module('ozone').config(function ($routeProvider) { + $routeProvider.when('/dn-scanner', { + templateUrl: 'dn-scanner.html', + controller: 'DNScannerController as scannerStatusCtrl', + }); + }); + + angular.module('ozone') + .controller('DNScannerController', function ($http) { + var ctrl = this; + $http.get("jmx?qry=Hadoop:service=HddsDatanode,name=ContainerDataScannerMetrics*") + .then(function (result) { + ctrl.dnscanner = result.data.beans; + }); + }); + + angular.module('ozone') + .filter('millisecondsToMinutes', function() { + return function(milliseconds) { + if (isNaN(milliseconds)) { + return 'Invalid input'; + } + var minutes = Math.floor(milliseconds / 60000); // 1 minute = 60000 milliseconds + var seconds = Math.floor((milliseconds % 60000) / 1000); + return minutes + ' mins ' + seconds + ' secs'; + }; + }); + + angular.module('ozone') + .filter('twoDecimalPlaces', function() { + return function(input) { + if (isNaN(input)) { + return 'Invalid input'; + } + return parseFloat(input).toFixed(2); + }; + }); + + function transform(v) { + var UNITS = ['B', 'KB', 'MB', 'GB', 'TB', 'PB', 'ZB']; + var prev = 0, i = 0; + while (Math.floor(v) > 0 && i < UNITS.length) { prev = v; v /= 1024; i += 1; - } - if (i > 0 && i < UNITS.length) { + } + if (i > 0 && i < UNITS.length) { v = prev; i -= 1; - } - return Math.round(v * 100) / 100 + ' ' + UNITS[i]; } + return Math.round(v * 100) / 100 + ' ' + UNITS[i]; + } + + function convertTimestampToDate(timestamp) { + if (!timestamp) return ''; + var milliseconds = timestamp * 1000; + + var date = new Date(milliseconds); + + var year = date.getFullYear(); + var month = date.getMonth() + 1; + var day = date.getDate(); + var hours = date.getHours(); + var minutes = date.getMinutes(); + var seconds = date.getSeconds(); + + return `${year}-${month.toString().padStart(2, '0')}-${day.toString().padStart(2, '0')} ${hours.toString().padStart(2, '0')}:${minutes.toString().padStart(2, '0')}:${seconds.toString().padStart(2, '0')}`; + } })(); diff --git a/hadoop-hdds/container-service/src/main/resources/webapps/hddsDatanode/index.html b/hadoop-hdds/container-service/src/main/resources/webapps/hddsDatanode/index.html index 1c32fe64e0e..0e1cbf21a00 100644 --- a/hadoop-hdds/container-service/src/main/resources/webapps/hddsDatanode/index.html +++ b/hadoop-hdds/container-service/src/main/resources/webapps/hddsDatanode/index.html @@ -49,11 +49,10 @@ HDDS Datanode Service - - - - + diff --git a/hadoop-hdds/container-service/src/main/resources/webapps/hddsDatanode/iostatus.html b/hadoop-hdds/container-service/src/main/resources/webapps/hddsDatanode/iostatus.html new file mode 100644 index 00000000000..94916821bd8 --- /dev/null +++ b/hadoop-hdds/container-service/src/main/resources/webapps/hddsDatanode/iostatus.html @@ -0,0 +1,76 @@ + + + + + DataNode IO Status + + + +

    Read Performance

    + + + + + + + + + + + + + + + + + + + + + + + +
    DirectoryReadBytesReadOpCountReadAvgTimeReadLatency60s(P90)ReadLatency60s(P95)ReadLatency60s(P99)
    {{volumeInfo["tag.StorageDirectory"]}}{{volumeInfo.ReadBytes}}{{volumeInfo.ReadOpCount}}{{volumeInfo.ReadTimeAvgTime | twoDecimalPlaces}} ms{{volumeInfo.ReadLatency60s90thPercentileLatency | twoDecimalPlaces}} ms{{volumeInfo.ReadLatency60s95thPercentileLatency | twoDecimalPlaces}} ms{{volumeInfo.ReadLatency60s99thPercentileLatency | twoDecimalPlaces}} ms
    + +

    Write Performance

    + + + + + + + + + + + + + + + + + + + + + + + +
    DirectoryWriteBytesWriteOpCountWriteAvgTimeWriteLatency60s(P90)WriteLatency60s(P95)WriteLatency60s(P99)
    {{volumeInfo["tag.StorageDirectory"]}}{{volumeInfo.WriteBytes}}{{volumeInfo.WriteOpCount}}{{volumeInfo.WriteTimeAvgTime | twoDecimalPlaces}} ms{{volumeInfo.WriteLatency60s90thPercentileLatency | twoDecimalPlaces}} ms{{volumeInfo.WriteLatency60s95thPercentileLatency | twoDecimalPlaces}} ms{{volumeInfo.WriteLatency60s99thPercentileLatency | twoDecimalPlaces}} ms
    + + \ No newline at end of file diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/ContainerTestUtils.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/ContainerTestUtils.java index 3b9c4a93ec5..e52328bafd0 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/ContainerTestUtils.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/ContainerTestUtils.java @@ -163,11 +163,11 @@ public static DatanodeDetails createDatanodeDetails() { .nextInt(256) + "." + random.nextInt(256); DatanodeDetails.Port containerPort = - DatanodeDetails.newPort(DatanodeDetails.Port.Name.STANDALONE, 0); + DatanodeDetails.newStandalonePort(0); DatanodeDetails.Port ratisPort = - DatanodeDetails.newPort(DatanodeDetails.Port.Name.RATIS, 0); + DatanodeDetails.newRatisPort(0); DatanodeDetails.Port restPort = - DatanodeDetails.newPort(DatanodeDetails.Port.Name.REST, 0); + DatanodeDetails.newRestPort(0); DatanodeDetails.Builder builder = DatanodeDetails.newBuilder(); builder.setUuid(UUID.randomUUID()) .setHostName("localhost") @@ -414,7 +414,7 @@ public static ContainerController getEmptyContainerController() { public static XceiverServerRatis newXceiverServerRatis( DatanodeDetails dn, OzoneConfiguration conf) throws IOException { conf.setInt(OzoneConfigKeys.HDDS_CONTAINER_RATIS_IPC_PORT, - dn.getPort(DatanodeDetails.Port.Name.RATIS).getValue()); + dn.getRatisPort().getValue()); return XceiverServerRatis.newXceiverServerRatis(null, dn, conf, getNoopContainerDispatcher(), getEmptyContainerController(), diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestDatanodeStateMachine.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestDatanodeStateMachine.java index e1e1ee9172a..41be7acbb14 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestDatanodeStateMachine.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestDatanodeStateMachine.java @@ -198,8 +198,7 @@ public void testDatanodeStateContext() throws IOException, OzoneConsts.OZONE_SCM_DATANODE_ID_FILE_DEFAULT); idPath.delete(); DatanodeDetails datanodeDetails = getNewDatanodeDetails(); - DatanodeDetails.Port port = DatanodeDetails.newPort( - DatanodeDetails.Port.Name.STANDALONE, + DatanodeDetails.Port port = DatanodeDetails.newStandalonePort( OzoneConfigKeys.HDDS_CONTAINER_IPC_PORT_DEFAULT); datanodeDetails.setPort(port); ContainerUtils.writeDatanodeDetailsTo(datanodeDetails, idPath, conf); @@ -325,8 +324,7 @@ public void testDatanodeStateMachineWithIdWriteFail() throws Exception { OzoneConsts.OZONE_SCM_DATANODE_ID_FILE_DEFAULT); idPath.delete(); DatanodeDetails datanodeDetails = getNewDatanodeDetails(); - DatanodeDetails.Port port = DatanodeDetails.newPort( - DatanodeDetails.Port.Name.STANDALONE, + DatanodeDetails.Port port = DatanodeDetails.newStandalonePort( OzoneConfigKeys.HDDS_CONTAINER_IPC_PORT_DEFAULT); datanodeDetails.setPort(port); @@ -408,12 +406,9 @@ public void testDatanodeStateMachineWithInvalidConfiguration() } private DatanodeDetails getNewDatanodeDetails() { - DatanodeDetails.Port containerPort = DatanodeDetails.newPort( - DatanodeDetails.Port.Name.STANDALONE, 0); - DatanodeDetails.Port ratisPort = DatanodeDetails.newPort( - DatanodeDetails.Port.Name.RATIS, 0); - DatanodeDetails.Port restPort = DatanodeDetails.newPort( - DatanodeDetails.Port.Name.REST, 0); + DatanodeDetails.Port containerPort = DatanodeDetails.newStandalonePort(0); + DatanodeDetails.Port ratisPort = DatanodeDetails.newRatisPort(0); + DatanodeDetails.Port restPort = DatanodeDetails.newRestPort(0); DatanodeDetails.Port streamPort = DatanodeDetails.newPort( DatanodeDetails.Port.Name.RATIS_DATASTREAM, 0); return DatanodeDetails.newBuilder() diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/helpers/TestContainerUtils.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/helpers/TestContainerUtils.java index 4f33e833a3c..f825be46882 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/helpers/TestContainerUtils.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/helpers/TestContainerUtils.java @@ -38,7 +38,6 @@ import java.nio.charset.StandardCharsets; import static java.nio.charset.StandardCharsets.UTF_8; -import static org.apache.hadoop.hdds.protocol.DatanodeDetails.Port; import static org.apache.hadoop.hdds.protocol.MockDatanodeDetails.randomDatanodeDetails; import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Type.ReadChunk; import static org.apache.hadoop.hdds.scm.protocolPB.ContainerCommandResponseBuilders.getReadChunkResponse; @@ -92,7 +91,7 @@ public void testTarName() throws IOException { public void testDatanodeIDPersistent(@TempDir File tempDir) throws Exception { // Generate IDs for testing DatanodeDetails id1 = randomDatanodeDetails(); - id1.setPort(DatanodeDetails.newPort(Port.Name.STANDALONE, 1)); + id1.setPort(DatanodeDetails.newStandalonePort(1)); assertWriteRead(tempDir, id1); // Add certificate serial id. diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/helpers/TestDatanodeIdYaml.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/helpers/TestDatanodeIdYaml.java index 8a272868146..1a0401de7e8 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/helpers/TestDatanodeIdYaml.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/helpers/TestDatanodeIdYaml.java @@ -73,7 +73,7 @@ void testWriteReadBeforeRatisDatastreamPortLayoutVersion(@TempDir File dir) // if no separate admin/server/datastream port, return single Ratis one for // compat assertEquals(read.getPort(DatanodeDetails.Port.Name.RATIS_DATASTREAM), - read.getPort(DatanodeDetails.Port.Name.RATIS)); + read.getRatisPort()); } @Test diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestHddsDispatcher.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestHddsDispatcher.java index 05bebdd1b90..fc107414d40 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestHddsDispatcher.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestHddsDispatcher.java @@ -510,12 +510,9 @@ static HddsDispatcher createDispatcher(DatanodeDetails dd, UUID scmId, // This method has to be removed once we move scm/TestUtils.java // from server-scm project to container-service or to common project. private static DatanodeDetails randomDatanodeDetails() { - DatanodeDetails.Port containerPort = DatanodeDetails.newPort( - DatanodeDetails.Port.Name.STANDALONE, 0); - DatanodeDetails.Port ratisPort = DatanodeDetails.newPort( - DatanodeDetails.Port.Name.RATIS, 0); - DatanodeDetails.Port restPort = DatanodeDetails.newPort( - DatanodeDetails.Port.Name.REST, 0); + DatanodeDetails.Port containerPort = DatanodeDetails.newStandalonePort(0); + DatanodeDetails.Port ratisPort = DatanodeDetails.newRatisPort(0); + DatanodeDetails.Port restPort = DatanodeDetails.newRestPort(0); DatanodeDetails.Builder builder = DatanodeDetails.newBuilder(); builder.setUuid(UUID.randomUUID()) .setHostName("localhost") diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerCommandHandler.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerCommandHandler.java index a3b60aa36da..2e1e0eafd01 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerCommandHandler.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerCommandHandler.java @@ -273,12 +273,9 @@ private CloseContainerCommand forceCloseWithoutPipeline() { */ private static DatanodeDetails randomDatanodeDetails() { String ipAddress = "127.0.0.1"; - DatanodeDetails.Port containerPort = DatanodeDetails.newPort( - DatanodeDetails.Port.Name.STANDALONE, 0); - DatanodeDetails.Port ratisPort = DatanodeDetails.newPort( - DatanodeDetails.Port.Name.RATIS, 0); - DatanodeDetails.Port restPort = DatanodeDetails.newPort( - DatanodeDetails.Port.Name.REST, 0); + DatanodeDetails.Port containerPort = DatanodeDetails.newStandalonePort(0); + DatanodeDetails.Port ratisPort = DatanodeDetails.newRatisPort(0); + DatanodeDetails.Port restPort = DatanodeDetails.newRestPort(0); DatanodeDetails.Builder builder = DatanodeDetails.newBuilder(); builder.setUuid(UUID.randomUUID()) .setHostName("localhost") diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestReconstructECContainersCommandHandler.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestReconstructECContainersCommandHandler.java new file mode 100644 index 00000000000..7e6c7608180 --- /dev/null +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestReconstructECContainersCommandHandler.java @@ -0,0 +1,139 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + *

    + * http://www.apache.org/licenses/LICENSE-2.0 + *

    + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ +package org.apache.hadoop.ozone.container.common.statemachine.commandhandler; + +import com.google.protobuf.ByteString; +import com.google.protobuf.Proto2Utils; +import org.apache.hadoop.hdds.client.ECReplicationConfig; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.protocol.DatanodeDetails; +import org.apache.hadoop.hdds.protocol.MockDatanodeDetails; +import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMCommandProto; +import org.apache.hadoop.metrics2.impl.MetricsCollectorImpl; +import org.apache.hadoop.ozone.container.common.helpers.CommandHandlerMetrics; +import org.apache.hadoop.ozone.container.common.statemachine.SCMConnectionManager; +import org.apache.hadoop.ozone.container.common.statemachine.StateContext; +import org.apache.hadoop.ozone.container.ec.reconstruction.ECReconstructionCoordinator; +import org.apache.hadoop.ozone.container.ozoneimpl.OzoneContainer; +import org.apache.hadoop.ozone.container.replication.ReplicationSupervisor; +import org.apache.hadoop.ozone.protocol.commands.ReconstructECContainersCommand; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; + +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.stream.Collectors; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.doNothing; +import static org.mockito.Mockito.when; + +/** + * Test cases to verify {@link ReconstructECContainersCommandHandler}. + */ +public class TestReconstructECContainersCommandHandler { + private OzoneConfiguration conf; + private ReplicationSupervisor supervisor; + private ECReconstructionCoordinator coordinator; + private OzoneContainer ozoneContainer; + private StateContext stateContext; + private SCMConnectionManager connectionManager; + + @BeforeEach + public void setUp() { + supervisor = mock(ReplicationSupervisor.class); + coordinator = mock(ECReconstructionCoordinator.class); + conf = new OzoneConfiguration(); + ozoneContainer = mock(OzoneContainer.class); + connectionManager = mock(SCMConnectionManager.class); + stateContext = mock(StateContext.class); + } + + @Test + public void testMetrics() { + ReconstructECContainersCommandHandler commandHandler = + new ReconstructECContainersCommandHandler(conf, supervisor, coordinator); + doNothing().when(supervisor).addTask(any()); + Map handlerMap = new HashMap<>(); + handlerMap.put(commandHandler.getCommandType(), commandHandler); + CommandHandlerMetrics metrics = CommandHandlerMetrics.create(handlerMap); + try { + byte[] missingIndexes = {1, 2}; + ByteString missingContainerIndexes = Proto2Utils.unsafeByteString(missingIndexes); + ECReplicationConfig ecReplicationConfig = new ECReplicationConfig(3, 2); + List dnDetails = getDNDetails(5); + List sources = + dnDetails.stream().map(a -> new ReconstructECContainersCommand + .DatanodeDetailsAndReplicaIndex(a, dnDetails.indexOf(a))) + .collect(Collectors.toList()); + List targets = getDNDetails(2); + ReconstructECContainersCommand reconstructECContainersCommand = + new ReconstructECContainersCommand(1L, sources, targets, + missingContainerIndexes, ecReplicationConfig); + + commandHandler.handle(reconstructECContainersCommand, ozoneContainer, + stateContext, connectionManager); + String metricsName = "ECReconstructions"; + assertEquals(commandHandler.getMetricsName(), metricsName); + when(supervisor.getReplicationRequestCount(metricsName)).thenReturn(1L); + assertEquals(commandHandler.getInvocationCount(), 1); + + commandHandler.handle(new ReconstructECContainersCommand(2L, sources, + targets, missingContainerIndexes, ecReplicationConfig), ozoneContainer, + stateContext, connectionManager); + commandHandler.handle(new ReconstructECContainersCommand(3L, sources, + targets, missingContainerIndexes, ecReplicationConfig), ozoneContainer, + stateContext, connectionManager); + commandHandler.handle(new ReconstructECContainersCommand(4L, sources, + targets, missingContainerIndexes, ecReplicationConfig), ozoneContainer, + stateContext, connectionManager); + commandHandler.handle(new ReconstructECContainersCommand(5L, sources, + targets, missingContainerIndexes, ecReplicationConfig), ozoneContainer, + stateContext, connectionManager); + commandHandler.handle(new ReconstructECContainersCommand(6L, sources, + targets, missingContainerIndexes, ecReplicationConfig), ozoneContainer, + stateContext, connectionManager); + + when(supervisor.getReplicationRequestCount(metricsName)).thenReturn(5L); + when(supervisor.getReplicationRequestTotalTime(metricsName)).thenReturn(10L); + when(supervisor.getReplicationRequestAvgTime(metricsName)).thenReturn(2L); + when(supervisor.getReplicationQueuedCount(metricsName)).thenReturn(1L); + assertEquals(commandHandler.getInvocationCount(), 5); + assertEquals(commandHandler.getQueuedCount(), 1); + assertEquals(commandHandler.getTotalRunTime(), 10); + assertEquals(commandHandler.getAverageRunTime(), 2); + + MetricsCollectorImpl metricsCollector = new MetricsCollectorImpl(); + metrics.getMetrics(metricsCollector, true); + assertEquals(1, metricsCollector.getRecords().size()); + } finally { + metrics.unRegister(); + } + } + + private List getDNDetails(int numDns) { + List dns = new ArrayList<>(); + for (int i = 0; i < numDns; i++) { + dns.add(MockDatanodeDetails.randomDatanodeDetails()); + } + return dns; + } +} diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestReplicateContainerCommandHandler.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestReplicateContainerCommandHandler.java new file mode 100644 index 00000000000..9de00877e5b --- /dev/null +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestReplicateContainerCommandHandler.java @@ -0,0 +1,118 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + *

    + * http://www.apache.org/licenses/LICENSE-2.0 + *

    + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ +package org.apache.hadoop.ozone.container.common.statemachine.commandhandler; + +import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.protocol.DatanodeDetails; +import org.apache.hadoop.hdds.protocol.MockDatanodeDetails; +import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMCommandProto; +import org.apache.hadoop.metrics2.impl.MetricsCollectorImpl; +import org.apache.hadoop.ozone.container.common.helpers.CommandHandlerMetrics; +import org.apache.hadoop.ozone.container.common.statemachine.SCMConnectionManager; +import org.apache.hadoop.ozone.container.common.statemachine.StateContext; +import org.apache.hadoop.ozone.container.ozoneimpl.OzoneContainer; +import org.apache.hadoop.ozone.container.replication.ContainerReplicator; +import org.apache.hadoop.ozone.container.replication.ReplicationSupervisor; +import org.apache.hadoop.ozone.protocol.commands.ReplicateContainerCommand; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; + +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.when; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.doNothing; + +/** + * Test cases to verify {@link ReplicateContainerCommandHandler}. + */ +public class TestReplicateContainerCommandHandler { + private OzoneConfiguration conf; + private ReplicationSupervisor supervisor; + private ContainerReplicator downloadReplicator; + private ContainerReplicator pushReplicator; + private OzoneContainer ozoneContainer; + private StateContext stateContext; + private SCMConnectionManager connectionManager; + + @BeforeEach + public void setUp() { + conf = new OzoneConfiguration(); + supervisor = mock(ReplicationSupervisor.class); + downloadReplicator = mock(ContainerReplicator.class); + pushReplicator = mock(ContainerReplicator.class); + ozoneContainer = mock(OzoneContainer.class); + connectionManager = mock(SCMConnectionManager.class); + stateContext = mock(StateContext.class); + } + + @Test + public void testMetrics() { + ReplicateContainerCommandHandler commandHandler = + new ReplicateContainerCommandHandler(conf, supervisor, + downloadReplicator, pushReplicator); + Map handlerMap = new HashMap<>(); + handlerMap.put(commandHandler.getCommandType(), commandHandler); + CommandHandlerMetrics metrics = CommandHandlerMetrics.create(handlerMap); + try { + doNothing().when(supervisor).addTask(any()); + DatanodeDetails source = MockDatanodeDetails.randomDatanodeDetails(); + DatanodeDetails target = MockDatanodeDetails.randomDatanodeDetails(); + List sourceList = new ArrayList<>(); + sourceList.add(source); + + ReplicateContainerCommand command = ReplicateContainerCommand.fromSources( + 1, sourceList); + commandHandler.handle(command, ozoneContainer, stateContext, connectionManager); + String metricsName = "ContainerReplications"; + assertEquals(commandHandler.getMetricsName(), metricsName); + when(supervisor.getReplicationRequestCount(metricsName)).thenReturn(1L); + assertEquals(commandHandler.getInvocationCount(), 1); + + commandHandler.handle(ReplicateContainerCommand.fromSources(2, sourceList), + ozoneContainer, stateContext, connectionManager); + commandHandler.handle(ReplicateContainerCommand.fromSources(3, sourceList), + ozoneContainer, stateContext, connectionManager); + commandHandler.handle(ReplicateContainerCommand.toTarget(4, target), + ozoneContainer, stateContext, connectionManager); + commandHandler.handle(ReplicateContainerCommand.toTarget(5, target), + ozoneContainer, stateContext, connectionManager); + commandHandler.handle(ReplicateContainerCommand.fromSources(6, sourceList), + ozoneContainer, stateContext, connectionManager); + + when(supervisor.getReplicationRequestCount(metricsName)).thenReturn(5L); + when(supervisor.getReplicationRequestTotalTime(metricsName)).thenReturn(10L); + when(supervisor.getReplicationRequestAvgTime(metricsName)).thenReturn(3L); + when(supervisor.getReplicationQueuedCount(metricsName)).thenReturn(1L); + assertEquals(commandHandler.getInvocationCount(), 5); + assertEquals(commandHandler.getQueuedCount(), 1); + assertEquals(commandHandler.getTotalRunTime(), 10); + assertEquals(commandHandler.getAverageRunTime(), 3); + + MetricsCollectorImpl metricsCollector = new MetricsCollectorImpl(); + metrics.getMetrics(metricsCollector, true); + assertEquals(1, metricsCollector.getRecords().size()); + } finally { + metrics.unRegister(); + } + } +} diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestReservedVolumeSpace.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestReservedVolumeSpace.java index 5e2dd0c75c9..5e0a31944f7 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestReservedVolumeSpace.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestReservedVolumeSpace.java @@ -20,6 +20,7 @@ import org.apache.hadoop.conf.StorageUnit; import org.apache.hadoop.hdds.HddsConfigKeys; +import org.apache.hadoop.hdds.conf.ConfigurationException; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.fs.MockSpaceUsageCheckFactory; import org.apache.hadoop.hdds.scm.ScmConfigKeys; @@ -37,6 +38,7 @@ import static org.apache.hadoop.hdds.scm.ScmConfigKeys.HDDS_DATANODE_DIR_DU_RESERVED_PERCENT; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.HDDS_DATANODE_DIR_DU_RESERVED_PERCENT_DEFAULT; import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertThrows; import static org.junit.jupiter.api.Assertions.assertNotEquals; /** @@ -166,6 +168,16 @@ public void testInvalidConfig() throws Exception { assertEquals(getExpectedDefaultReserved(hddsVolume2), reservedFromVolume2); } + @Test + public void testInvalidConfigThrowsException() { + OzoneConfiguration conf = new OzoneConfiguration(); + conf.set(ScmConfigKeys.HDDS_DATANODE_DIR_DU_RESERVED, "15GB"); + + assertThrows(ConfigurationException.class, + () -> volumeBuilder.conf(conf).build(), + "Reserved space should be configured in a pair"); + } + @Test public void testPathsCanonicalized() throws Exception { OzoneConfiguration conf = new OzoneConfiguration(); diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestVolumeIOStatsWithPrometheusSink.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestVolumeIOStatsWithPrometheusSink.java index c8934bab416..1df886098ab 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestVolumeIOStatsWithPrometheusSink.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestVolumeIOStatsWithPrometheusSink.java @@ -17,6 +17,7 @@ */ package org.apache.hadoop.ozone.container.common.volume; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.server.http.PrometheusMetricsSink; import org.apache.hadoop.metrics2.MetricsSystem; import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem; @@ -30,6 +31,7 @@ import static org.assertj.core.api.Assertions.assertThat; import static java.nio.charset.StandardCharsets.UTF_8; +import static org.apache.hadoop.hdds.HddsConfigKeys.OZONE_DATANODE_IO_METRICS_PERCENTILES_INTERVALS_SECONDS_KEY; /** * Test PrometheusMetricSink regarding VolumeIOStats. @@ -54,11 +56,14 @@ public void tearDown() { @Test public void testMultipleVolumeIOMetricsExist() throws IOException { + OzoneConfiguration conf = new OzoneConfiguration(); + int[] intervals = conf.getInts(OZONE_DATANODE_IO_METRICS_PERCENTILES_INTERVALS_SECONDS_KEY); + //GIVEN VolumeIOStats volumeIOStats1 = new VolumeIOStats("VolumeIOStat1", - "vol1/dir"); + "vol1/dir", intervals); VolumeIOStats volumeIOStat2 = new VolumeIOStats("VolumeIOStat2", - "vol2/dir"); + "vol2/dir", intervals); //WHEN String writtenMetrics = publishMetricsAndGetOutput(); diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestVolumeSetDiskChecks.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestVolumeSetDiskChecks.java index 55df5f43b6b..0b24161aadb 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestVolumeSetDiskChecks.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestVolumeSetDiskChecks.java @@ -340,6 +340,7 @@ public void testVolumeFailure() throws IOException { conSet.handleVolumeFailures(stateContext); // ContainerID1 should be removed belonging to failed volume assertNull(conSet.getContainer(containerID1)); + assertTrue(conSet.getMissingContainerSet().contains(containerID1)); // ContainerID should exist belonging to normal volume assertNotNull(conSet.getContainer(containerID)); expectedReportCount.put( diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainer.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainer.java index e1a3de30ddf..584db675d93 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainer.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainer.java @@ -689,7 +689,7 @@ public void testContainerRocksDB(ContainerTestVersionInfo versionInfo) try (DBHandle db = BlockUtils.getDB(keyValueContainerData, CONF)) { RDBStore store = (RDBStore) db.getStore().getStore(); - long defaultCacheSize = 64 * OzoneConsts.MB; + long defaultCacheSize = OzoneConsts.GB; long cacheSize = Long.parseLong(store .getProperty("rocksdb.block-cache-capacity")); assertEquals(defaultCacheSize, cacheSize); diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueHandler.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueHandler.java index 30a8a9bcbce..0ff2aaa22b5 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueHandler.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueHandler.java @@ -83,6 +83,7 @@ import org.junit.jupiter.api.Test; import org.junit.jupiter.api.Timeout; import org.junit.jupiter.api.io.TempDir; +import org.mockito.Mockito; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.reset; @@ -147,7 +148,13 @@ public void testHandlerCommandHandling() throws Exception { .build(); KeyValueContainer container = mock(KeyValueContainer.class); - + KeyValueContainerData containerData = mock(KeyValueContainerData.class); + Mockito.when(container.getContainerData()).thenReturn(containerData); + Mockito.when(containerData.getReplicaIndex()).thenReturn(1); + ContainerProtos.ContainerCommandResponseProto responseProto = KeyValueHandler.dispatchRequest(handler, + createContainerRequest, container, null); + assertEquals(ContainerProtos.Result.INVALID_ARGUMENT, responseProto.getResult()); + Mockito.when(handler.getDatanodeId()).thenReturn(DATANODE_UUID); KeyValueHandler .dispatchRequest(handler, createContainerRequest, container, null); verify(handler, times(0)).handleListBlock( diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueHandlerWithUnhealthyContainer.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueHandlerWithUnhealthyContainer.java index af0c430c86d..5f1a93ef2fb 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueHandlerWithUnhealthyContainer.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueHandlerWithUnhealthyContainer.java @@ -133,8 +133,7 @@ public void testGetBlockWithReplicaIndexMismatch(ClientVersion clientVersion, in handler.handleGetBlock( getDummyCommandRequestProto(clientVersion, ContainerProtos.Type.GetBlock, rid), container); - assertEquals((replicaIndex > 0 && rid != replicaIndex && clientVersion.toProtoValue() >= - ClientVersion.EC_REPLICA_INDEX_REQUIRED_IN_BLOCK_REQUEST.toProtoValue()) ? + assertEquals((replicaIndex > 0 && rid != 0 && rid != replicaIndex) ? ContainerProtos.Result.CONTAINER_NOT_FOUND : UNKNOWN_BCSID, response.getResult()); } @@ -176,8 +175,7 @@ public void testReadChunkWithReplicaIndexMismatch(ClientVersion clientVersion, i ContainerProtos.ContainerCommandResponseProto response = handler.handleReadChunk(getDummyCommandRequestProto(clientVersion, ContainerProtos.Type.ReadChunk, rid), container, null); - assertEquals((replicaIndex > 0 && rid != replicaIndex && - clientVersion.toProtoValue() >= ClientVersion.EC_REPLICA_INDEX_REQUIRED_IN_BLOCK_REQUEST.toProtoValue()) ? + assertEquals((replicaIndex > 0 && rid != 0 && rid != replicaIndex) ? ContainerProtos.Result.CONTAINER_NOT_FOUND : UNKNOWN_BCSID, response.getResult()); } diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java index 07804c2a20b..2f2cbc81e90 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java @@ -20,6 +20,7 @@ import com.google.common.base.Preconditions; +import org.apache.commons.io.FileUtils; import org.apache.hadoop.conf.StorageUnit; import org.apache.hadoop.hdds.HddsConfigKeys; import org.apache.hadoop.hdds.client.BlockID; @@ -51,7 +52,9 @@ import java.io.File; import java.nio.file.Files; import java.nio.file.Path; +import java.util.HashSet; import java.util.Random; +import java.util.Set; import java.util.UUID; import java.util.HashMap; import java.util.List; @@ -122,7 +125,7 @@ public void testBuildContainerMap(ContainerTestVersionInfo versionInfo) volume.format(clusterId); commitSpaceMap.put(getVolumeKey(volume), Long.valueOf(0)); } - + List containerDatas = new ArrayList<>(); // Add containers to disk int numTestContainers = 10; for (int i = 0; i < numTestContainers; i++) { @@ -136,6 +139,7 @@ public void testBuildContainerMap(ContainerTestVersionInfo versionInfo) layout, maxCap, UUID.randomUUID().toString(), datanodeDetails.getUuidString()); + containerDatas.add(keyValueContainerData); keyValueContainer = new KeyValueContainer( keyValueContainerData, conf); keyValueContainer.create(volumeSet, volumeChoosingPolicy, clusterId); @@ -156,8 +160,22 @@ public void testBuildContainerMap(ContainerTestVersionInfo versionInfo) ozoneContainer.buildContainerSet(); ContainerSet containerset = ozoneContainer.getContainerSet(); assertEquals(numTestContainers, containerset.containerCount()); - verifyCommittedSpace(ozoneContainer); + Set missingContainers = new HashSet<>(); + for (int i = 0; i < numTestContainers; i++) { + if (i % 2 == 0) { + missingContainers.add(containerDatas.get(i).getContainerID()); + FileUtils.deleteDirectory(new File(containerDatas.get(i).getContainerPath())); + } + } + ozoneContainer.stop(); + ozoneContainer = ContainerTestUtils.getOzoneContainer(datanodeDetails, conf); + ozoneContainer.buildContainerSet(); + containerset = ozoneContainer.getContainerSet(); + assertEquals(numTestContainers / 2, containerset.containerCount()); + assertEquals(numTestContainers / 2 + numTestContainers % 2, containerset.getMissingContainerSet().size()); + assertEquals(missingContainers, containerset.getMissingContainerSet()); + ozoneContainer.stop(); } @ContainerTestVersionInfo.ContainerTest @@ -300,12 +318,9 @@ private DatanodeDetails createDatanodeDetails() { random.nextInt(256) + "." + random.nextInt(256) + "." + random .nextInt(256) + "." + random.nextInt(256); - DatanodeDetails.Port containerPort = DatanodeDetails.newPort( - DatanodeDetails.Port.Name.STANDALONE, 0); - DatanodeDetails.Port ratisPort = DatanodeDetails.newPort( - DatanodeDetails.Port.Name.RATIS, 0); - DatanodeDetails.Port restPort = DatanodeDetails.newPort( - DatanodeDetails.Port.Name.REST, 0); + DatanodeDetails.Port containerPort = DatanodeDetails.newStandalonePort(0); + DatanodeDetails.Port ratisPort = DatanodeDetails.newRatisPort(0); + DatanodeDetails.Port restPort = DatanodeDetails.newRestPort(0); DatanodeDetails.Builder builder = DatanodeDetails.newBuilder(); builder.setUuid(UUID.randomUUID()) .setHostName("localhost") diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestGrpcReplicationService.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestGrpcReplicationService.java index b8c43460ba3..c1cf59f0fdd 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestGrpcReplicationService.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestGrpcReplicationService.java @@ -82,17 +82,15 @@ class TestGrpcReplicationService { @BeforeEach public void setUp() throws Exception { - init(false); + init(); } - public void init(boolean isZeroCopy) throws Exception { + public void init() throws Exception { conf = new OzoneConfiguration(); ReplicationServer.ReplicationConfig replicationConfig = conf.getObject(ReplicationServer.ReplicationConfig.class); - replicationConfig.setZeroCopyEnable(isZeroCopy); - SecurityConfig secConf = new SecurityConfig(conf); ContainerSet containerSet = new ContainerSet(1000); @@ -103,11 +101,9 @@ public void init(boolean isZeroCopy) throws Exception { .setPersistedOpState(HddsProtos.NodeOperationalState.IN_SERVICE) .setPersistedOpStateExpiry(0); DatanodeDetails.Port containerPort = - DatanodeDetails.newPort(DatanodeDetails.Port.Name.STANDALONE, - OzoneConfigKeys.HDDS_CONTAINER_IPC_PORT_DEFAULT); + DatanodeDetails.newStandalonePort(OzoneConfigKeys.HDDS_CONTAINER_IPC_PORT_DEFAULT); DatanodeDetails.Port ratisPort = - DatanodeDetails.newPort(DatanodeDetails.Port.Name.RATIS, - OzoneConfigKeys.HDDS_CONTAINER_RATIS_IPC_PORT_DEFAULT); + DatanodeDetails.newRatisPort(OzoneConfigKeys.HDDS_CONTAINER_RATIS_IPC_PORT_DEFAULT); DatanodeDetails.Port replicationPort = DatanodeDetails.newPort(DatanodeDetails.Port.Name.REPLICATION, replicationConfig.getPort()); @@ -226,7 +222,7 @@ public void copyData(long containerId, OutputStream destination, }; ContainerImporter importer = mock(ContainerImporter.class); GrpcReplicationService subject = - new GrpcReplicationService(source, importer, false); + new GrpcReplicationService(source, importer); CopyContainerRequestProto request = CopyContainerRequestProto.newBuilder() .setContainerID(1) diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestReplicationSupervisor.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestReplicationSupervisor.java index ef37c226653..315e0c0253b 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestReplicationSupervisor.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestReplicationSupervisor.java @@ -87,6 +87,7 @@ import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertDoesNotThrow; import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertTrue; import static org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ReplicationCommandPriority.LOW; import static org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ReplicationCommandPriority.NORMAL; import static org.mockito.Mockito.any; @@ -488,6 +489,15 @@ public void testMultipleReplication(ContainerLayoutVersion layout, assertEquals(0, ecReconstructionSupervisor.getReplicationRequestCount( task1.getMetricName())); + assertTrue(replicationSupervisor.getReplicationRequestTotalTime( + task1.getMetricName()) > 0); + assertTrue(ecReconstructionSupervisor.getReplicationRequestTotalTime( + task2.getMetricName()) > 0); + assertTrue(replicationSupervisor.getReplicationRequestAvgTime( + task1.getMetricName()) > 0); + assertTrue(ecReconstructionSupervisor.getReplicationRequestAvgTime( + task2.getMetricName()) > 0); + MetricsCollectorImpl replicationMetricsCollector = new MetricsCollectorImpl(); replicationMetrics.getMetrics(replicationMetricsCollector, true); assertEquals(1, replicationMetricsCollector.getRecords().size()); diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/upgrade/TestDataNodeStartupSlvLessThanMlv.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/upgrade/TestDataNodeStartupSlvLessThanMlv.java index e9fef6ecfd6..55bddf2e99a 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/upgrade/TestDataNodeStartupSlvLessThanMlv.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/upgrade/TestDataNodeStartupSlvLessThanMlv.java @@ -72,12 +72,9 @@ public void testStartupSlvLessThanMlv() throws Exception { } private DatanodeDetails getNewDatanodeDetails() { - DatanodeDetails.Port containerPort = DatanodeDetails.newPort( - DatanodeDetails.Port.Name.STANDALONE, 0); - DatanodeDetails.Port ratisPort = DatanodeDetails.newPort( - DatanodeDetails.Port.Name.RATIS, 0); - DatanodeDetails.Port restPort = DatanodeDetails.newPort( - DatanodeDetails.Port.Name.REST, 0); + DatanodeDetails.Port containerPort = DatanodeDetails.newStandalonePort(0); + DatanodeDetails.Port ratisPort = DatanodeDetails.newRatisPort(0); + DatanodeDetails.Port restPort = DatanodeDetails.newRestPort(0); return DatanodeDetails.newBuilder() .setUuid(UUID.randomUUID()) .setHostName("localhost") diff --git a/hadoop-hdds/crypto-api/pom.xml b/hadoop-hdds/crypto-api/pom.xml index db19cc4f341..ca54b3de9f2 100644 --- a/hadoop-hdds/crypto-api/pom.xml +++ b/hadoop-hdds/crypto-api/pom.xml @@ -19,11 +19,11 @@ org.apache.ozone hdds - 1.5.0-SNAPSHOT + 2.0.0-SNAPSHOT hdds-crypto-api - 1.5.0-SNAPSHOT + 2.0.0-SNAPSHOT Apache Ozone Distributed Data Store cryptographic functions Apache Ozone HDDS Crypto diff --git a/hadoop-hdds/crypto-default/pom.xml b/hadoop-hdds/crypto-default/pom.xml index c586f91712b..6024c3e2ddf 100644 --- a/hadoop-hdds/crypto-default/pom.xml +++ b/hadoop-hdds/crypto-default/pom.xml @@ -19,11 +19,11 @@ org.apache.ozone hdds - 1.5.0-SNAPSHOT + 2.0.0-SNAPSHOT hdds-crypto-default - 1.5.0-SNAPSHOT + 2.0.0-SNAPSHOT Default implementation of Apache Ozone Distributed Data Store's cryptographic functions Apache Ozone HDDS Crypto - Default diff --git a/hadoop-hdds/docs/content/feature/Quota.md b/hadoop-hdds/docs/content/feature/Quota.md index 90e413357b5..53c196307fa 100644 --- a/hadoop-hdds/docs/content/feature/Quota.md +++ b/hadoop-hdds/docs/content/feature/Quota.md @@ -1,6 +1,6 @@ --- title: "Quota in Ozone" -date: "2020-October-22" +date: "2020-10-22" weight: 4 summary: Quota in Ozone icon: user diff --git a/hadoop-hdds/docs/content/feature/Quota.zh.md b/hadoop-hdds/docs/content/feature/Quota.zh.md index 16e5db26cde..d690947ef06 100644 --- a/hadoop-hdds/docs/content/feature/Quota.zh.md +++ b/hadoop-hdds/docs/content/feature/Quota.zh.md @@ -1,6 +1,6 @@ --- title: "Ozone 中的配额" -date: "2020-October-22" +date: "2020-10-22" weight: 4 summary: Ozone中的配额 icon: user diff --git a/hadoop-hdds/docs/content/feature/SCM-HA.md b/hadoop-hdds/docs/content/feature/SCM-HA.md index cc42500e0c3..333c908275d 100644 --- a/hadoop-hdds/docs/content/feature/SCM-HA.md +++ b/hadoop-hdds/docs/content/feature/SCM-HA.md @@ -96,7 +96,7 @@ Second and third nodes should be *bootstrapped* instead of init. These clusters ozone scm --bootstrap ``` -Note: both commands perform one-time initialization. SCM still needs to be started by running `ozone scm --daemon start`. +Note: both commands perform one-time initialization. SCM still needs to be started by running `ozone --daemon start scm`. ## Auto-bootstrap @@ -121,7 +121,7 @@ Note: SCM still needs to be started after the init/bootstrap process. ``` ozone scm --init ozone scm --bootstrap -ozone scm --daemon start +ozone --daemon start scm ``` For Docker/Kubernetes, use `ozone scm` to start it in the foreground. diff --git a/hadoop-hdds/docs/content/feature/dn-merge-rocksdb.md b/hadoop-hdds/docs/content/feature/dn-merge-rocksdb.md index 92ae64f8a1d..002aba4cc2d 100644 --- a/hadoop-hdds/docs/content/feature/dn-merge-rocksdb.md +++ b/hadoop-hdds/docs/content/feature/dn-merge-rocksdb.md @@ -25,15 +25,15 @@ summary: Introduction to Ozone Datanode Container Schema V3 In Ozone, user data are separated into blocks and stored in HDDS Containers. Containers are the fundamental replication unit of Ozone/HDDS. Each Container has its metadata and data. Data are saved as files on disk. Metadata is saved in RocksDB. -Currently there will be one RocksDB for each Container on datanode. With user data continously grow, there will be hundreds of thousands of RocksDB instances on one datanode. It's a big challenge to manage this amount of RocksDB instances in one JVM. +Earlier, there was one RocksDB for each Container on datanode. With user data continously growing, there will be hundreds of thousands of RocksDB instances on one datanode. It's a big challenge to manage this amount of RocksDB instances in one JVM. -Unlike the current approach, this "Merge Container RocksDB in DN" feature will use only one RocksDB for each data volume, holding all metadata of Containers in this RocksDB. +Unlike the previous approach, this "Merge Container RocksDB in DN" feature will use only one RocksDB for each data volume, holding all metadata of Containers in this RocksDB. ## Configuration -This is mainly a DN feature, which doesn't require much configuration. +This is mainly a DN feature, which doesn't require much configuration. By default, it is enabled. -Here is a configuration which disable this feature if the current one RocksDB for each container mode is more preferred. Please be noted that once the feature is enabled, it's strongly suggested not to disable it in later. +Here is a configuration which disables this feature if the "one RocksDB for each container" mode is more preferred. Please be noted that once the feature is enabled, it's strongly suggested not to disable it in later. ```XML diff --git a/hadoop-hdds/docs/content/feature/dn-merge-rocksdb.zh.md b/hadoop-hdds/docs/content/feature/dn-merge-rocksdb.zh.md index cd3eb5fbdc5..65085a99451 100644 --- a/hadoop-hdds/docs/content/feature/dn-merge-rocksdb.zh.md +++ b/hadoop-hdds/docs/content/feature/dn-merge-rocksdb.zh.md @@ -25,13 +25,13 @@ summary: Ozone DataNode Container模式简介V3 在 Ozone 中,用户数据被分割成blocks并存储在 HDDS Container中。Container是 Ozone/HDDS 的基本复制单元。每个Container都有自己的元数据和数据, 数据以文件形式保存在磁盘上,元数据保存在RocksDB中。 -目前,数据节点上的每个Container都有一个RocksDB。随着用户数据的不断增长,一个DataNode上将会有成百上千个RocksDB实例。在一个JVM中管理如此多的RocksDB实例是一个巨大的挑战。 +之前,数据节点上每个Container都有一个RocksDB。随着用户数据的不断增长,一个DataNode上将会有成百上千个RocksDB实例。在一个JVM中管理如此多的RocksDB实例是一个巨大的挑战。 -与当前使用方法不同,"Merge Container RocksDB in DN"功能将为每个Volume只使用一个RocksDB,并在此RocksDB中保存所有Container的元数据。 +与以前的用法不同,"Merge Container RocksDB in DN"功能将为每个Volume只使用一个RocksDB,并在此RocksDB中保存所有Container的元数据。 ## 配置 -这主要是DataNode的功能,不需要太多配置。 +这主要是DataNode的功能,不需要太多配置。默认情况下,它是启用的。 如果更倾向于为每个Container使用一个RocksDB的模式,那么这下面的配置可以禁用上面所介绍的功能。请注意,一旦启用该功能,强烈建议以后不要再禁用。 diff --git a/hadoop-hdds/docs/content/interface/CSI.md b/hadoop-hdds/docs/content/interface/CSI.md index 59b24c94d19..84bd89c049e 100644 --- a/hadoop-hdds/docs/content/interface/CSI.md +++ b/hadoop-hdds/docs/content/interface/CSI.md @@ -57,7 +57,7 @@ Now, create the CSI related resources by execute the follow command. kubectl create -f /ozone/kubernetes/examples/ozone/csi ``` -## Crete pv-test and visit the result. +## Create pv-test and visit the result. Create pv-test related resources by execute the follow command. diff --git a/hadoop-hdds/docs/content/interface/S3.md b/hadoop-hdds/docs/content/interface/S3.md index 1d0c5dcb4cc..c2f0f3af9b9 100644 --- a/hadoop-hdds/docs/content/interface/S3.md +++ b/hadoop-hdds/docs/content/interface/S3.md @@ -163,10 +163,3 @@ Or aws s3 ls --endpoint http://localhost:9878 s3://buckettest ``` -### S3 Fuse driver (goofys) - -[Goofys](https://github.com/kahing/goofys) is a S3 FUSE driver. As Ozone S3 gateway is AWS S3 compatible, it can be used to mount any Ozone buckets as an OS level mounted filesystem. - -```bash -goofys --endpoint http://localhost:9878 bucket1 /mount/bucket1 -``` diff --git a/hadoop-hdds/docs/content/interface/S3.zh.md b/hadoop-hdds/docs/content/interface/S3.zh.md index e3b133a0e16..370098e091e 100644 --- a/hadoop-hdds/docs/content/interface/S3.zh.md +++ b/hadoop-hdds/docs/content/interface/S3.zh.md @@ -142,10 +142,3 @@ aws s3api --endpoint http://localhost:9878 create-bucket --bucket buckettest aws s3 ls --endpoint http://localhost:9878 s3://buckettest ``` -### S3 Fuse 驱动(goofys) - -Goofys 是一个 S3 FUSE 驱动,可以将 Ozone 的桶挂载到 POSIX 文件系统。 - -```bash -goofys --endpoint http://localhost:9878 bucket1 /mount/bucket1 -``` diff --git a/hadoop-hdds/docs/content/security/GDPR.md b/hadoop-hdds/docs/content/security/GDPR.md index 25b2f2c4416..409a3ae7be0 100644 --- a/hadoop-hdds/docs/content/security/GDPR.md +++ b/hadoop-hdds/docs/content/security/GDPR.md @@ -1,6 +1,6 @@ --- title: "GDPR in Ozone" -date: "2019-September-17" +date: "2019-09-17" weight: 3 icon: user menu: diff --git a/hadoop-hdds/docs/content/security/GDPR.zh.md b/hadoop-hdds/docs/content/security/GDPR.zh.md index a7db4030871..8fd3514138f 100644 --- a/hadoop-hdds/docs/content/security/GDPR.zh.md +++ b/hadoop-hdds/docs/content/security/GDPR.zh.md @@ -1,6 +1,6 @@ --- title: "Ozone 中的 GDPR" -date: "2019-September-17" +date: "2019-09-17" weight: 3 summary: Ozone 中的 GDPR menu: diff --git a/hadoop-hdds/docs/content/security/SecureOzone.md b/hadoop-hdds/docs/content/security/SecureOzone.md index 76fd7470109..bbeef79b613 100644 --- a/hadoop-hdds/docs/content/security/SecureOzone.md +++ b/hadoop-hdds/docs/content/security/SecureOzone.md @@ -1,6 +1,6 @@ --- title: "Securing Ozone" -date: "2019-April-03" +date: "2019-04-03" summary: Overview of Ozone security concepts and steps to secure Ozone Manager and SCM. weight: 1 menu: diff --git a/hadoop-hdds/docs/content/security/SecureOzone.zh.md b/hadoop-hdds/docs/content/security/SecureOzone.zh.md index a7660233f4d..e74b5d8dfab 100644 --- a/hadoop-hdds/docs/content/security/SecureOzone.zh.md +++ b/hadoop-hdds/docs/content/security/SecureOzone.zh.md @@ -1,6 +1,6 @@ --- title: "安全化 Ozone" -date: "2019-April-03" +date: "2019-04-03" summary: 简要介绍 Ozone 中的安全概念以及安全化 OM 和 SCM 的步骤。 weight: 1 menu: diff --git a/hadoop-hdds/docs/content/security/SecuringDatanodes.md b/hadoop-hdds/docs/content/security/SecuringDatanodes.md index 717e746cfb9..2254155e1f4 100644 --- a/hadoop-hdds/docs/content/security/SecuringDatanodes.md +++ b/hadoop-hdds/docs/content/security/SecuringDatanodes.md @@ -1,6 +1,6 @@ --- title: "Securing Datanodes" -date: "2019-April-03" +date: "2019-04-03" weight: 3 menu: main: diff --git a/hadoop-hdds/docs/content/security/SecuringDatanodes.zh.md b/hadoop-hdds/docs/content/security/SecuringDatanodes.zh.md index 608be16e8a3..8b37fd2f6ee 100644 --- a/hadoop-hdds/docs/content/security/SecuringDatanodes.zh.md +++ b/hadoop-hdds/docs/content/security/SecuringDatanodes.zh.md @@ -1,6 +1,6 @@ --- title: "安全化 Datanode" -date: "2019-April-03" +date: "2019-04-03" weight: 3 menu: main: diff --git a/hadoop-hdds/docs/content/security/SecuringOzoneHTTP.md b/hadoop-hdds/docs/content/security/SecuringOzoneHTTP.md index 47c04eb94d9..a8601d7a5e1 100644 --- a/hadoop-hdds/docs/content/security/SecuringOzoneHTTP.md +++ b/hadoop-hdds/docs/content/security/SecuringOzoneHTTP.md @@ -1,6 +1,6 @@ --- title: "Securing HTTP" -date: "2020-June-17" +date: "2020-06-17" summary: Secure HTTP web-consoles for Ozone services weight: 4 menu: diff --git a/hadoop-hdds/docs/content/security/SecuringOzoneHTTP.zh.md b/hadoop-hdds/docs/content/security/SecuringOzoneHTTP.zh.md index 07b3f6164f6..5907a7caf9a 100644 --- a/hadoop-hdds/docs/content/security/SecuringOzoneHTTP.zh.md +++ b/hadoop-hdds/docs/content/security/SecuringOzoneHTTP.zh.md @@ -1,6 +1,6 @@ --- title: "安全化 HTTP" -date: "2020-June-17" +date: "2020-06-17" summary: 安全化 Ozone 服务的 HTTP 网络控制台 weight: 4 menu: diff --git a/hadoop-hdds/docs/content/security/SecuringS3.md b/hadoop-hdds/docs/content/security/SecuringS3.md index e6218b95e91..04ef6921af6 100644 --- a/hadoop-hdds/docs/content/security/SecuringS3.md +++ b/hadoop-hdds/docs/content/security/SecuringS3.md @@ -1,6 +1,6 @@ --- title: "Securing S3" -date: "2019-April-03" +date: "2019-04-03" summary: Ozone supports S3 protocol, and uses AWS Signature Version 4 protocol which allows a seamless S3 experience. weight: 5 menu: diff --git a/hadoop-hdds/docs/content/security/SecuringS3.zh.md b/hadoop-hdds/docs/content/security/SecuringS3.zh.md index 218786fd366..395b9303354 100644 --- a/hadoop-hdds/docs/content/security/SecuringS3.zh.md +++ b/hadoop-hdds/docs/content/security/SecuringS3.zh.md @@ -1,6 +1,6 @@ --- title: "安全化 S3" -date: "2019-April-03" +date: "2019-04-03" summary: Ozone 支持 S3 协议,并使用 AWS Signature Version 4 protocol which allows a seamless S3 experience. weight: 5 diff --git a/hadoop-hdds/docs/content/security/SecuringTDE.md b/hadoop-hdds/docs/content/security/SecuringTDE.md index 3b75bee1bfd..0d04a28aec7 100644 --- a/hadoop-hdds/docs/content/security/SecuringTDE.md +++ b/hadoop-hdds/docs/content/security/SecuringTDE.md @@ -1,6 +1,6 @@ --- title: "Transparent Data Encryption" -date: "2019-April-03" +date: "2019-04-03" summary: TDE allows data on the disks to be encrypted-at-rest and automatically decrypted during access. weight: 2 menu: diff --git a/hadoop-hdds/docs/content/security/SecuringTDE.zh.md b/hadoop-hdds/docs/content/security/SecuringTDE.zh.md index ed42519e0b2..d7fa4941e44 100644 --- a/hadoop-hdds/docs/content/security/SecuringTDE.zh.md +++ b/hadoop-hdds/docs/content/security/SecuringTDE.zh.md @@ -1,6 +1,6 @@ --- title: "透明数据加密" -date: "2019-April-03" +date: "2019-04-03" summary: 透明数据加密(Transparent Data Encryption,TDE)以密文形式在磁盘上保存数据,但可以在用户访问的时候自动进行解密。 weight: 2 menu: diff --git a/hadoop-hdds/docs/content/security/SecurityAcls.md b/hadoop-hdds/docs/content/security/SecurityAcls.md index 9976cbbc4fb..ee48999ed25 100644 --- a/hadoop-hdds/docs/content/security/SecurityAcls.md +++ b/hadoop-hdds/docs/content/security/SecurityAcls.md @@ -1,6 +1,6 @@ --- title: "Ozone ACLs" -date: "2019-April-03" +date: "2019-04-03" weight: 6 menu: main: diff --git a/hadoop-hdds/docs/content/security/SecurityAcls.zh.md b/hadoop-hdds/docs/content/security/SecurityAcls.zh.md index 3d95fcf0877..99751cd62da 100644 --- a/hadoop-hdds/docs/content/security/SecurityAcls.zh.md +++ b/hadoop-hdds/docs/content/security/SecurityAcls.zh.md @@ -1,6 +1,6 @@ --- title: "Ozone 访问控制列表" -date: "2019-April-03" +date: "2019-04-03" weight: 6 menu: main: diff --git a/hadoop-hdds/docs/content/security/SecurityWithRanger.md b/hadoop-hdds/docs/content/security/SecurityWithRanger.md index bbbd8c19f32..7dc1895ad3d 100644 --- a/hadoop-hdds/docs/content/security/SecurityWithRanger.md +++ b/hadoop-hdds/docs/content/security/SecurityWithRanger.md @@ -1,6 +1,6 @@ --- title: "Apache Ranger" -date: "2019-April-03" +date: "2019-04-03" weight: 7 menu: main: diff --git a/hadoop-hdds/docs/content/security/SecurityWithRanger.zh.md b/hadoop-hdds/docs/content/security/SecurityWithRanger.zh.md index b7c7b8721bb..8917c0b84bc 100644 --- a/hadoop-hdds/docs/content/security/SecurityWithRanger.zh.md +++ b/hadoop-hdds/docs/content/security/SecurityWithRanger.zh.md @@ -1,6 +1,6 @@ --- title: "Apache Ranger" -date: "2019-April-03" +date: "2019-04-03" weight: 7 menu: main: diff --git a/hadoop-hdds/docs/content/tools/Debug.md b/hadoop-hdds/docs/content/tools/Debug.md new file mode 100644 index 00000000000..79c11f777ef --- /dev/null +++ b/hadoop-hdds/docs/content/tools/Debug.md @@ -0,0 +1,473 @@ +--- +title: "Ozone Debug" +date: 2024-10-14 +summary: Ozone Debug command can be used for all the debugging related tasks. +--- + + +Ozone Debug command (`ozone debug`) is a collection of developer tools intended to help in debugging and get more information of various components of ozone. + +```bash +Usage: ozone debug [-hV] [--verbose] [-conf=] + [-D=]... [COMMAND] +Developer tools for Ozone Debug operations + -conf= + path to the ozone configuration file + -D, --set= + Map of (configuration_key,configuration_value) for any + configuration overrides + -h, --help Show this help message and exit. + -V, --version Print version information and exit. + --verbose More verbose output. Show the stack trace of the errors. +Commands: + chunkinfo returns chunk location information about an + existing key + print-log-dag, pld Create an image of the current compaction log DAG + in OM. + find-missing-padding, fmp List all keys with any missing padding, optionally + limited to a volume/bucket/key URI. + recover recover the lease of a specified file. Make sure + to specify file system scheme if ofs:// is not + the default. + prefix Parse prefix contents + ldb Parse rocksdb file content + read-replicas Reads every replica for all the blocks associated + with a given key. + container Container replica specific operations to be + executed on datanodes only + ratislogparser Shell of printing Ratis Log in understandable text +``` +For more detailed usage see the output of `--help` for each of the subcommands. + + +## ozone debug ldb + +Ozone heavily uses RocksDB for storing metadata. This tool helps parse the contents of RocksDB belonging to Ozone Roles. +Supported DB's : Ozone Manager (om.db) , StorageContainerManager (scm.db), Datanode/Container (container.db) +Below is the usage: + +```bash +Usage: ozone debug ldb --db= [COMMAND] +Parse rocksdb file content + --db= Database File Path +Commands: + scan Parse specified metadataTable + list_column_families, ls list all column families in db. + value-schema Schema of value in metadataTable +``` + +### list_column_families command + +`list_column_families` command lists all the column families in the db provided. + +```bash +$ ozone debug ldb --db=/path/to/scm.db ls +default +sequenceId +revokedCertsV2 +pipelines +crls +crlSequenceId +meta +containers +validCerts +validSCMCerts +scmTransactionInfos +deletedBlocks +statefulServiceConfig +revokedCerts +move +``` + +### scan command + +`scan` command parses a particular column family of a rocksdb provided and prints the records. + +```bash +Usage: ozone debug ldb scan [--compact] [--count] [--with-keys] + [--batch-size=] --cf= + [--cid=] [-d=] + [-e=] [--fields=] + [--filter=] [-l=] [-o=] + [-s=] [--thread-count=] +Parse specified metadataTable + --batch-size= + Batch size for processing DB data. + --cf, --column_family, --column-family= + Table name + --cid, --container-id= + Container ID. Applicable if datanode DB Schema is V3 + --compact disable the pretty print the output + --count, --show-count + Get estimated key count for the given DB column family + Default: false + -d, --dnSchema, --dn-schema= + Datanode DB Schema Version: V1/V2/V3 + -e, --ek, --endkey= + Key at which iteration of the DB ends + --fields= + Comma-separated list of fields needed for each value. + eg.) "name,acls.type" for showing name and type + under acls. + --filter= Comma-separated list of "::" + where is any valid field of the record, + is [EQUALS,LESSER, GREATER or REGEX]. + (EQUALS compares the exact string, REGEX compares + with a valid regular expression passed, and + LESSER/GREATER works with numeric values), + is the value of the field. + eg.) "dataSize:equals:1000" for showing records + having the value 1000 for dataSize, + "keyName:regex:^key.*$" for showing records + having keyName that matches the given regex. + -l, --limit, --length= + Maximum number of items to list. + -o, --out= File to dump table scan data + -s, --sk, --startkey= + Key from which to iterate the DB + --thread-count= + Thread count for concurrent processing. + --with-keys Print a JSON object of key->value pairs (default) + instead of a JSON array of only values. +``` +By default, the contents are printed on the console, but it can be redirected to a file using the `--out` option.
    +`--length` can be used to limit the number of records being printed.
    +`--count` doesn't print the records, it shows the approximate number of records. This is not accurate.
    +`ozone debug ldb scan` command provides many filtering options to make debugging easier, elaborated below:
    + +

    + +#### --startkey and --endkey +As the names suggest, these options specify the keys from/until which the iteration needs to happen.
    +`--startkey` specifies which key to start iterating from, it is inclusive. `--endkey` specifies which key to stop iterating at, it is exclusive. + +```bash +$ ozone debug ldb --db=/path/to/om.db scan --cf=volumeTable --startkey=vol3 --endkey=vol5 +``` +```json +{ "/vol3": { + "metadata" : { }, + "objectID" : -9999, + "updateID" : 4000, + "adminName" : "om", + "ownerName" : "om", + "volume" : "vol3", + "creationTime" : 1707192335309, + "modificationTime" : 1714057412205, + "quotaInBytes" : 22854448694951936, + "quotaInNamespace" : 100000000, + "usedNamespace" : 1, + "acls" : [ { + "type" : "USER", + "name" : "om", + "aclScope" : "ACCESS" + } ], + "refCount" : 0 +} +, "/vol4": { + "metadata" : { }, + "objectID" : -888, + "updateID" : 5000, + "adminName" : "om", + "ownerName" : "om", + "volume" : "vol4", + "creationTime" : 1696280979907, + "modificationTime" : 1696280979907, + "quotaInBytes" : 2251799813685250, + "quotaInNamespace" : 100000000, + "usedNamespace" : 2, + "acls" : [ { + "type" : "USER", + "name" : "om", + "aclScope" : "ACCESS" + } ], + "refCount" : 0 +} + } +``` + +#### --fields +There are multiple fields in each record. `--fields` option allows us to choose the specific fields to display. + +```bash +$ ozone debug ldb --db=/path/to/om.db scan --cf=keyTable -l=1 --fields="volumeName,bucketName,keyName,keyLocationVersions.version,acls.name" +``` +```json +{ "/vol1/ozone-legacy-bucket/10T-1-terasort-input/": { + "keyLocationVersions" : [ { + "version" : 0 + } ], + "keyName" : "10T-1-terasort-input/", + "bucketName" : "ozone-legacy-bucket", + "acls" : [ { + "name" : "om" + }, { + "name" : "scm" + }, { + "name" : "testuser" + } ], + "volumeName" : "vol1" +} +} +``` + +#### --filter +`--filter` can be used to select records whose value matches a given condition. The filter is given in this format: `::`, +where `` is any valid field from the value of the record, `` is one of the 4 supported operations `[equals, regex, lesser, greater]`, `` is the value used for the comparison.
    +'Equals' and 'regex' work with string, bool and numerical fields, 'lesser' and 'greater' work only with numerical values.
    +Multiple filters can also be given in one command, they need to be separated by commas.
    +Using `equals` operator: +```bash +$ ozone debug ldb --db=/path/to/om.db scan --cf=volumeTable --filter="usedNamespace:equals:2" +``` +```json +{ + "/vol4": { + "metadata": {}, + "objectID": -888, + "updateID": 5000, + "adminName": "om", + "ownerName": "om", + "volume": "vol4", + "creationTime": 1696280979907, + "modificationTime": 1696280979907, + "quotaInBytes": 2251799813685250, + "quotaInNamespace": 100000000, + "usedNamespace": 2, + "acls": [ + { + "type": "USER", + "name": "om", + "aclScope": "ACCESS" + } + ], + "refCount": 0 + } +, "/vol5": { + "metadata" : { }, + "objectID" : -956599, + "updateID" : 45600, + "adminName" : "om", + "ownerName" : "om", + "volume" : "vol5", + "creationTime" : 1807192332309, + "modificationTime" : 1914057410005, + "quotaInBytes" : 7785494951936, + "quotaInNamespace" : 100000000, + "usedNamespace" : 2, + "acls" : [ { + "type" : "USER", + "name" : "om", + "aclScope" : "ACCESS" + } ], + "refCount" : 0 +} + } +``` +Using `lesser` operator (`greater` operator can also be used in the same way): +```bash +$ ozone debug ldb --db=/path/to/om.db scan --cf=volumeTable --filter="usedNamespace:lesser:2" +``` +```json +{ + "/vol2": { + "metadata": {}, + "objectID": -73548, + "updateID": 2384, + "adminName": "om", + "ownerName": "om", + "volume": "vol2", + "creationTime": 11980979907, + "modificationTime": 1296280979900, + "quotaInBytes": 417913685250, + "quotaInNamespace": 100000000, + "usedNamespace": 1, + "acls": [ + { + "type": "USER", + "name": "om", + "aclScope": "ACCESS" + } + ], + "refCount": 0 + } + } +``` +Using `regex` operator: +```bash +$ ozone debug ldb --db=/path/to/om.db scan --cf=volumeTable --filter="volume:regex:^v.*2$" +``` +```json +{ + "/vol2": { + "metadata": {}, + "objectID": -73548, + "updateID": 2384, + "adminName": "om", + "ownerName": "om", + "volume": "vol2", + "creationTime": 11980979907, + "modificationTime": 1296280979900, + "quotaInBytes": 417913685250, + "quotaInNamespace": 100000000, + "usedNamespace": 1, + "acls": [ + { + "type": "USER", + "name": "om", + "aclScope": "ACCESS" + } + ], + "refCount": 0 + } + } +``` + +Using multiple filters: +```bash +$ ozone debug ldb --db=/path/to/om.db scan --cf=volumeTable --filter="usedNamespace:equals:2,volume:regex:^.*4$" +``` +```json +{ + "/vol4": { + "metadata": {}, + "objectID": -888, + "updateID": 5000, + "adminName": "om", + "ownerName": "om", + "volume": "vol4", + "creationTime": 1696280979907, + "modificationTime": 1696280979907, + "quotaInBytes": 2251799813685250, + "quotaInNamespace": 100000000, + "usedNamespace": 2, + "acls": [ + { + "type": "USER", + "name": "om", + "aclScope": "ACCESS" + } + ], + "refCount": 0 + } + } +``` + +### value-schema command + +`value-schema` command shows the schema of the value stored in a column-family of a rocksdb, i.e., it shows the fields stored in the value and it's datatype. +`--depth` can be used optionally to limit the level until which the fields are fetched. + +```bash +$ ozone debug ldb --db=/data/metadata/om.db value-schema --cf=keyTable --depth=1 +``` +```json +{ + "OmKeyInfo" : { + "bucketName" : "String", + "metadata" : "struct", + "fileName" : "String", + "creationTime" : "long", + "isFile" : "boolean", + "acls" : "struct", + "keyName" : "String", + "replicationConfig" : "struct", + "encInfo" : "struct", + "dataSize" : "long", + "tags" : "struct", + "keyLocationVersions" : "struct", + "updateID" : "long", + "ownerName" : "String", + "modificationTime" : "long", + "parentObjectID" : "long", + "volumeName" : "String", + "fileChecksum" : "struct", + "objectID" : "long" +} + } +``` +```bash +$ ozone debug ldb --db=/data/metadata/om.db value-schema --cf=keyTable +``` +```json +{ + "OmKeyInfo" : { + "bucketName" : "String", + "metadata" : { }, + "fileName" : "String", + "creationTime" : "long", + "isFile" : "boolean", + "acls" : { + "toStringMethod" : { }, + "hashCodeMethod" : { }, + "name" : "String", + "type" : { + "name" : "String", + "value" : "String", + "ordinal" : "int" + }, + "aclScope" : { + "name" : "String", + "ordinal" : "int" + }, + "aclBits" : "int" + }, + "keyName" : "String", + "replicationConfig" : { }, + "encInfo" : { + "ezKeyVersionName" : "String", + "keyName" : "String", + "edek" : { }, + "cipherSuite" : { + "unknownValue" : { + "value" : "int" + }, + "name" : "String", + "algoBlockSize" : "int", + "ordinal" : "int" + }, + "version" : { + "unknownValue" : { + "value" : "int" + }, + "name" : "String", + "description" : "String", + "version" : "int", + "ordinal" : "int" + }, + "iv" : { } + }, + "dataSize" : "long", + "tags" : { }, + "keyLocationVersions" : { + "isMultipartKey" : "boolean", + "locationVersionMap" : { }, + "version" : "long" + }, + "updateID" : "long", + "ownerName" : "String", + "modificationTime" : "long", + "parentObjectID" : "long", + "volumeName" : "String", + "fileChecksum" : { }, + "objectID" : "long" + } +} +``` \ No newline at end of file diff --git a/hadoop-hdds/docs/content/tools/Debug.zh.md b/hadoop-hdds/docs/content/tools/Debug.zh.md new file mode 100644 index 00000000000..3f3238dd84b --- /dev/null +++ b/hadoop-hdds/docs/content/tools/Debug.zh.md @@ -0,0 +1,466 @@ +--- +title: "Ozone Debug" +date: 2024-10-14 +summary: Ozone Debug 命令可用于所有与调试相关的任务。 +--- + + +Ozone Debug 命令 (`ozone debug`) 是开发人员工具的集合,旨在帮助调试并获取 Ozone 各个组件的更多信息。 + +```bash +Usage: ozone debug [-hV] [--verbose] [-conf=] + [-D=]... [COMMAND] +Developer tools for Ozone Debug operations + -conf= + path to the ozone configuration file + -D, --set= + a map of (configuration_key,configuration_value) for any overrides + -h, --help Show this help message and exit. + -V, --version Print version information and exit. + --verbose More verbose output. Show the stack trace of the errors. +``` +子命令: + chunkinfo 返回指定文件/对象的块位置信息。 + print-log-dag, pld 在 OM 中创建当前压缩日志 DAG 的镜像。 + find-missing-padding, fmp 列出所有缺少填充的文件/对象,可以选择指定卷/存储桶/键 URI。 + recover 恢复指定文件的租约。如果默认值不是 ofs:// ,请确保指定文件系统schema。 + prefix 解析前缀内容。 + ldb 解析 rocksdb 文件内容。 + read-replicas 读取给定路径文件/对象所有块的每个副本。 + container 容器副本特定操作,仅在数据节点上执行。 + ratislogparser 解析Ratis Log 成用户可理解的文字形式。 + +有关更详细的用法,请参阅每个子命令的“--help”输出。 + + +## ozone debug ldb + +Ozone 大量使用 RocksDB 来存储元数据。该工具帮助解析各个Ozone Roles 的 RocksDB 数据内容。 +支持的数据库:Ozone Manager (om.db)、StorageContainerManager (scm.db)、Datanode/Container (container.db) +下面是用法: + +```bash +Usage: ozone debug ldb --db= [COMMAND] +Parse rocksdb file content + --db= Database File Path +Commands: + scan Parse specified metadataTable + list_column_families, ls list all column families in db. + value-schema Schema of value in metadataTable +``` + +### list_column_families command + +`list_column_families` 命令列出指定数据库中的所有列族。 + +```bash +$ ozone debug ldb --db=/path/to/scm.db ls +default +sequenceId +revokedCertsV2 +pipelines +crls +crlSequenceId +meta +containers +validCerts +validSCMCerts +scmTransactionInfos +deletedBlocks +statefulServiceConfig +revokedCerts +move +``` + +### scan command + +`scan` 命令解析提供的 rocksdb 的特定列族并打印记录。 + +```bash +Usage: ozone debug ldb scan [--compact] [--count] [--with-keys] + [--batch-size=] --cf= + [--cid=] [-d=] + [-e=] [--fields=] + [--filter=] [-l=] [-o=] + [-s=] [--thread-count=] +Parse specified metadataTable + --batch-size= + Batch size for processing DB data. + --cf, --column_family, --column-family= + Table name + --cid, --container-id= + Container ID. Applicable if datanode DB Schema is V3 + --compact disable the pretty print the output + --count, --show-count + Get estimated key count for the given DB column family + Default: false + -d, --dnSchema, --dn-schema= + Datanode DB Schema Version: V1/V2/V3 + -e, --ek, --endkey= + Key at which iteration of the DB ends + --fields= + Comma-separated list of fields needed for each value. + eg.) "name,acls.type" for showing name and type + under acls. + --filter= Comma-separated list of "::" + where is any valid field of the record, + is [EQUALS,LESSER, GREATER or REGEX]. + (EQUALS compares the exact string, REGEX compares + with a valid regular expression passed, and + LESSER/GREATER works with numeric values), + is the value of the field. + eg.) "dataSize:equals:1000" for showing records + having the value 1000 for dataSize, + "keyName:regex:^key.*$" for showing records + having keyName that matches the given regex. + -l, --limit, --length= + Maximum number of items to list. + -o, --out= File to dump table scan data + -s, --sk, --startkey= + Key from which to iterate the DB + --thread-count= + Thread count for concurrent processing. + --with-keys Print a JSON object of key->value pairs (default) + instead of a JSON array of only values. +``` +默认情况下,内容打印在控制台上,但可以使用 `--out` 选项将其重定向到文件。
    +`--length` 可用于限制打印的记录数。
    +`--count` 不打印记录,它显示大概的,并不是完全精确的记录数。
    +`ozone debug ldb scan` 命令提供了许多过滤选项以使调试更容易,详细说明如下:
    + + + +#### --startkey and --endkey +顾名思义,这些选项指定迭代需要发生的键。
    +`--startkey` 指定从哪个键开始迭代,包含该键。 `--endkey` 指定停止迭代的键,不包含该键。 + +```bash +$ ozone debug ldb --db=/path/to/om.db scan --cf=volumeTable --startkey=vol3 --endkey=vol5 +``` +```json +{ "/vol3": { + "metadata" : { }, + "objectID" : -9999, + "updateID" : 4000, + "adminName" : "om", + "ownerName" : "om", + "volume" : "vol3", + "creationTime" : 1707192335309, + "modificationTime" : 1714057412205, + "quotaInBytes" : 22854448694951936, + "quotaInNamespace" : 100000000, + "usedNamespace" : 1, + "acls" : [ { + "type" : "USER", + "name" : "om", + "aclScope" : "ACCESS" + } ], + "refCount" : 0 +} +, "/vol4": { + "metadata" : { }, + "objectID" : -888, + "updateID" : 5000, + "adminName" : "om", + "ownerName" : "om", + "volume" : "vol4", + "creationTime" : 1696280979907, + "modificationTime" : 1696280979907, + "quotaInBytes" : 2251799813685250, + "quotaInNamespace" : 100000000, + "usedNamespace" : 2, + "acls" : [ { + "type" : "USER", + "name" : "om", + "aclScope" : "ACCESS" + } ], + "refCount" : 0 +} + } +``` + +#### --fields +每条记录中有多个字段。 `--fields` 选项允许我们选择要显示的特定字段。 + +```bash +$ ozone debug ldb --db=/path/to/om.db scan --cf=keyTable -l=1 --fields="volumeName,bucketName,keyName,keyLocationVersions.version,acls.name" +``` +```json +{ "/vol1/ozone-legacy-bucket/10T-1-terasort-input/": { + "keyLocationVersions" : [ { + "version" : 0 + } ], + "keyName" : "10T-1-terasort-input/", + "bucketName" : "ozone-legacy-bucket", + "acls" : [ { + "name" : "om" + }, { + "name" : "scm" + }, { + "name" : "testuser" + } ], + "volumeName" : "vol1" +} +} +``` + +#### --filter +`--filter` 可用于选择值与给定条件匹配的记录。过滤器按以下格式给出:`::`, +其中“”是记录值中的任何有效字段,“”是 4 个支持的操作 `[equals, regex, lesser, greater]` 之一,“”是使用的值用于比较。
    +`Equals` 和 `regex` 适用于字符串、布尔值和数字字段,`lesser` 和 `greater` 仅适用于数字值。
    +也可以在一个命令中给出多个过滤器,它们需要用逗号分隔。
    +使用 `equals` (等于) 运算符: +```bash +$ ozone debug ldb --db=/path/to/om.db scan --cf=volumeTable --filter="usedNamespace:equals:2" +``` +```json +{ + "/vol4": { + "metadata": {}, + "objectID": -888, + "updateID": 5000, + "adminName": "om", + "ownerName": "om", + "volume": "vol4", + "creationTime": 1696280979907, + "modificationTime": 1696280979907, + "quotaInBytes": 2251799813685250, + "quotaInNamespace": 100000000, + "usedNamespace": 2, + "acls": [ + { + "type": "USER", + "name": "om", + "aclScope": "ACCESS" + } + ], + "refCount": 0 + } +, "/vol5": { + "metadata" : { }, + "objectID" : -956599, + "updateID" : 45600, + "adminName" : "om", + "ownerName" : "om", + "volume" : "vol5", + "creationTime" : 1807192332309, + "modificationTime" : 1914057410005, + "quotaInBytes" : 7785494951936, + "quotaInNamespace" : 100000000, + "usedNamespace" : 2, + "acls" : [ { + "type" : "USER", + "name" : "om", + "aclScope" : "ACCESS" + } ], + "refCount" : 0 +} + } +``` +使用 `lesser` (较小) 运算符(`greater`(较大) 运算符也可以以相同的方式使用): +```bash +$ ozone debug ldb --db=/path/to/om.db scan --cf=volumeTable --filter="usedNamespace:lesser:2" +``` +```json +{ + "/vol2": { + "metadata": {}, + "objectID": -73548, + "updateID": 2384, + "adminName": "om", + "ownerName": "om", + "volume": "vol2", + "creationTime": 11980979907, + "modificationTime": 1296280979900, + "quotaInBytes": 417913685250, + "quotaInNamespace": 100000000, + "usedNamespace": 1, + "acls": [ + { + "type": "USER", + "name": "om", + "aclScope": "ACCESS" + } + ], + "refCount": 0 + } + } +``` +使用 `regex` 运算符: +```bash +$ ozone debug ldb --db=/path/to/om.db scan --cf=volumeTable --filter="volume:regex:^v.*2$" +``` +```json +{ + "/vol2": { + "metadata": {}, + "objectID": -73548, + "updateID": 2384, + "adminName": "om", + "ownerName": "om", + "volume": "vol2", + "creationTime": 11980979907, + "modificationTime": 1296280979900, + "quotaInBytes": 417913685250, + "quotaInNamespace": 100000000, + "usedNamespace": 1, + "acls": [ + { + "type": "USER", + "name": "om", + "aclScope": "ACCESS" + } + ], + "refCount": 0 + } + } +``` + +使用多个过滤器: +```bash +$ ozone debug ldb --db=/path/to/om.db scan --cf=volumeTable --filter="usedNamespace:equals:2,volume:regex:^.*4$" +``` +```json +{ + "/vol4": { + "metadata": {}, + "objectID": -888, + "updateID": 5000, + "adminName": "om", + "ownerName": "om", + "volume": "vol4", + "creationTime": 1696280979907, + "modificationTime": 1696280979907, + "quotaInBytes": 2251799813685250, + "quotaInNamespace": 100000000, + "usedNamespace": 2, + "acls": [ + { + "type": "USER", + "name": "om", + "aclScope": "ACCESS" + } + ], + "refCount": 0 + } + } +``` + +### value-schema command + +“value-schema”命令显示存储在rocksdb的列族中的值的模式,即,它显示存储在值中的字段及其数据类型。 +可以选择使用`--depth`来限制获取字段的级别。 + +```bash +$ ozone debug ldb --db=/data/metadata/om.db value-schema --cf=keyTable --depth=1 +``` +```json +{ + "OmKeyInfo" : { + "bucketName" : "String", + "metadata" : "struct", + "fileName" : "String", + "creationTime" : "long", + "isFile" : "boolean", + "acls" : "struct", + "keyName" : "String", + "replicationConfig" : "struct", + "encInfo" : "struct", + "dataSize" : "long", + "tags" : "struct", + "keyLocationVersions" : "struct", + "updateID" : "long", + "ownerName" : "String", + "modificationTime" : "long", + "parentObjectID" : "long", + "volumeName" : "String", + "fileChecksum" : "struct", + "objectID" : "long" +} + } +``` +```bash +$ ozone debug ldb --db=/data/metadata/om.db value-schema --cf=keyTable +``` +```json +{ + "OmKeyInfo" : { + "bucketName" : "String", + "metadata" : { }, + "fileName" : "String", + "creationTime" : "long", + "isFile" : "boolean", + "acls" : { + "toStringMethod" : { }, + "hashCodeMethod" : { }, + "name" : "String", + "type" : { + "name" : "String", + "value" : "String", + "ordinal" : "int" + }, + "aclScope" : { + "name" : "String", + "ordinal" : "int" + }, + "aclBits" : "int" + }, + "keyName" : "String", + "replicationConfig" : { }, + "encInfo" : { + "ezKeyVersionName" : "String", + "keyName" : "String", + "edek" : { }, + "cipherSuite" : { + "unknownValue" : { + "value" : "int" + }, + "name" : "String", + "algoBlockSize" : "int", + "ordinal" : "int" + }, + "version" : { + "unknownValue" : { + "value" : "int" + }, + "name" : "String", + "description" : "String", + "version" : "int", + "ordinal" : "int" + }, + "iv" : { } + }, + "dataSize" : "long", + "tags" : { }, + "keyLocationVersions" : { + "isMultipartKey" : "boolean", + "locationVersionMap" : { }, + "version" : "long" + }, + "updateID" : "long", + "ownerName" : "String", + "modificationTime" : "long", + "parentObjectID" : "long", + "volumeName" : "String", + "fileChecksum" : { }, + "objectID" : "long" + } +} +``` \ No newline at end of file diff --git a/hadoop-hdds/docs/pom.xml b/hadoop-hdds/docs/pom.xml index d14ae28c10d..7f4ffbb8a70 100644 --- a/hadoop-hdds/docs/pom.xml +++ b/hadoop-hdds/docs/pom.xml @@ -20,10 +20,10 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> org.apache.ozone hdds - 1.5.0-SNAPSHOT + 2.0.0-SNAPSHOT hdds-docs - 1.5.0-SNAPSHOT + 2.0.0-SNAPSHOT Apache Ozone/HDDS Documentation Apache Ozone/HDDS Documentation jar diff --git a/hadoop-hdds/erasurecode/pom.xml b/hadoop-hdds/erasurecode/pom.xml index 201336d5ed3..b540d1c68ea 100644 --- a/hadoop-hdds/erasurecode/pom.xml +++ b/hadoop-hdds/erasurecode/pom.xml @@ -20,10 +20,10 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> org.apache.ozone hdds - 1.5.0-SNAPSHOT + 2.0.0-SNAPSHOT hdds-erasurecode - 1.5.0-SNAPSHOT + 2.0.0-SNAPSHOT Apache Ozone Distributed Data Store Earsurecode utils Apache Ozone HDDS Erasurecode diff --git a/hadoop-hdds/erasurecode/src/main/java/org/apache/ozone/erasurecode/CodecRegistry.java b/hadoop-hdds/erasurecode/src/main/java/org/apache/ozone/erasurecode/CodecRegistry.java index 83650c132b0..2069a51be17 100644 --- a/hadoop-hdds/erasurecode/src/main/java/org/apache/ozone/erasurecode/CodecRegistry.java +++ b/hadoop-hdds/erasurecode/src/main/java/org/apache/ozone/erasurecode/CodecRegistry.java @@ -19,9 +19,9 @@ import com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.hdds.annotation.InterfaceAudience; -import org.apache.ozone.erasurecode.rawcoder.RawErasureCoderFactory; import org.apache.ozone.erasurecode.rawcoder.NativeRSRawErasureCoderFactory; import org.apache.ozone.erasurecode.rawcoder.NativeXORRawErasureCoderFactory; +import org.apache.ozone.erasurecode.rawcoder.RawErasureCoderFactory; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -31,7 +31,6 @@ import java.util.Map; import java.util.ServiceLoader; import java.util.Set; -import java.util.stream.Collectors; /** * This class registers all coder implementations. @@ -108,8 +107,8 @@ void updateCoders(Iterable coderFactories) { String codecName = entry.getKey(); List coders = entry.getValue(); coderNameMap.put(codecName, coders.stream(). - map(RawErasureCoderFactory::getCoderName). - collect(Collectors.toList()).toArray(new String[0])); + map(RawErasureCoderFactory::getCoderName) + .toArray(String[]::new)); } } diff --git a/hadoop-hdds/framework/pom.xml b/hadoop-hdds/framework/pom.xml index 70cce849aec..37d41cde390 100644 --- a/hadoop-hdds/framework/pom.xml +++ b/hadoop-hdds/framework/pom.xml @@ -20,10 +20,10 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> org.apache.ozone hdds - 1.5.0-SNAPSHOT + 2.0.0-SNAPSHOT hdds-server-framework - 1.5.0-SNAPSHOT + 2.0.0-SNAPSHOT Apache Ozone Distributed Data Store Server Framework Apache Ozone HDDS Server Framework diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/MoveDataNodePair.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/MoveDataNodePair.java index 42e8f8202cb..4690054a87d 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/MoveDataNodePair.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/MoveDataNodePair.java @@ -36,6 +36,7 @@ public class MoveDataNodePair { Proto2Codec.get(MoveDataNodePairProto.getDefaultInstance()), MoveDataNodePair::getFromProtobuf, pair -> pair.getProtobufMessage(ClientVersion.CURRENT_VERSION), + MoveDataNodePair.class, DelegatedCodec.CopyType.SHALLOW); public static Codec getCodec() { diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/StorageContainerLocationProtocolClientSideTranslatorPB.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/StorageContainerLocationProtocolClientSideTranslatorPB.java index 11231d2d01b..5e293eae67b 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/StorageContainerLocationProtocolClientSideTranslatorPB.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/StorageContainerLocationProtocolClientSideTranslatorPB.java @@ -109,6 +109,7 @@ import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.ContainerBalancerStatusInfoRequestProto; import org.apache.hadoop.hdds.scm.DatanodeAdminError; import org.apache.hadoop.hdds.scm.ScmInfo; +import org.apache.hadoop.hdds.scm.container.ContainerListResult; import org.apache.hadoop.hdds.scm.container.ContainerID; import org.apache.hadoop.hdds.scm.container.ContainerInfo; import org.apache.hadoop.hdds.scm.container.ReplicationManagerReport; @@ -129,6 +130,7 @@ import java.io.Closeable; import java.io.IOException; +import java.util.Arrays; import java.util.ArrayList; import java.util.HashMap; import java.util.List; @@ -158,6 +160,12 @@ public final class StorageContainerLocationProtocolClientSideTranslatorPB private final StorageContainerLocationProtocolPB rpcProxy; private final SCMContainerLocationFailoverProxyProvider fpp; + /** + * This is used to check if 'leader' or 'follower' exists, + * in order to confirm whether we have enabled Ratis. + */ + private final List scmRatisRolesToCheck = Arrays.asList("leader", "follower"); + /** * Creates a new StorageContainerLocationProtocolClientSideTranslatorPB. * @@ -383,19 +391,19 @@ public List getExistContainerWithPipelinesInBatch( * {@inheritDoc} */ @Override - public List listContainer(long startContainerID, int count) + public ContainerListResult listContainer(long startContainerID, int count) throws IOException { return listContainer(startContainerID, count, null, null, null); } @Override - public List listContainer(long startContainerID, int count, + public ContainerListResult listContainer(long startContainerID, int count, HddsProtos.LifeCycleState state) throws IOException { return listContainer(startContainerID, count, state, null, null); } @Override - public List listContainer(long startContainerID, int count, + public ContainerListResult listContainer(long startContainerID, int count, HddsProtos.LifeCycleState state, HddsProtos.ReplicationType replicationType, ReplicationConfig replicationConfig) @@ -437,12 +445,17 @@ public List listContainer(long startContainerID, int count, .getContainersList()) { containerList.add(ContainerInfo.fromProtobuf(containerInfoProto)); } - return containerList; + + if (response.hasContainerCount()) { + return new ContainerListResult(containerList, response.getContainerCount()); + } else { + return new ContainerListResult(containerList, -1); + } } @Deprecated @Override - public List listContainer(long startContainerID, int count, + public ContainerListResult listContainer(long startContainerID, int count, HddsProtos.LifeCycleState state, HddsProtos.ReplicationFactor factor) throws IOException { throw new UnsupportedOperationException("Should no longer be called from " + @@ -761,8 +774,23 @@ public ScmInfo getScmInfo() throws IOException { .setScmId(resp.getScmId()) .setRatisPeerRoles(resp.getPeerRolesList()); - return builder.build(); + // By default, we assume that SCM Ratis is not enabled. + // If the response contains the `ScmRatisEnabled` field, + // we will set it directly; otherwise, + // we will determine if Ratis is enabled based on + // whether the `peerRolesList` contains the keywords 'leader' or 'follower'. + if (resp.hasScmRatisEnabled()) { + builder.setScmRatisEnabled(resp.getScmRatisEnabled()); + } else { + List peerRolesList = resp.getPeerRolesList(); + if (!peerRolesList.isEmpty()) { + boolean containsScmRoles = peerRolesList.stream().map(String::toLowerCase) + .anyMatch(scmRatisRolesToCheck::contains); + builder.setScmRatisEnabled(containsScmRoles); + } + } + return builder.build(); } @Override @@ -1188,7 +1216,7 @@ public void close() { public List getListOfContainers( long startContainerID, int count, HddsProtos.LifeCycleState state) throws IOException { - return listContainer(startContainerID, count, state); + return listContainer(startContainerID, count, state).getContainerInfoList(); } @Override diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/OzoneSecretManager.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/OzoneSecretManager.java index 601bdf0ea72..b25941ca676 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/OzoneSecretManager.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/OzoneSecretManager.java @@ -172,14 +172,13 @@ public int incrementDelegationTokenSeqNum() { */ private OzoneSecretKey updateCurrentKey(KeyPair keyPair, X509Certificate certificate) { - logger.info("Updating current master key for generating tokens. Cert id {}", - certificate.getSerialNumber().toString()); - int newCurrentId = incrementCurrentKeyId(); OzoneSecretKey newKey = new OzoneSecretKey(newCurrentId, certificate.getNotAfter().getTime(), keyPair, certificate.getSerialNumber().toString()); currentKey.set(newKey); + logger.info("Updated current master key for generating tokens. Cert id {}, Master key id {}", + certificate.getSerialNumber().toString(), newKey.getKeyId()); return newKey; } diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/CertInfo.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/CertInfo.java index b78604643e5..79f41fba865 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/CertInfo.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/CertInfo.java @@ -39,7 +39,8 @@ public final class CertInfo implements Comparable, Serializable { private static final Codec CODEC = new DelegatedCodec<>( Proto2Codec.get(CertInfoProto.getDefaultInstance()), CertInfo::fromProtobuf, - CertInfo::getProtobuf); + CertInfo::getProtobuf, + CertInfo.class); public static Codec getCodec() { return CODEC; @@ -133,7 +134,6 @@ public String toString() { /** * Builder class for CertInfo. */ - @SuppressWarnings("checkstyle:hiddenfield") public static class Builder { private X509Certificate x509Certificate; private long timestamp; diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/OzoneAdmins.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/OzoneAdmins.java index 12b6b64f49a..1f8568866d8 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/OzoneAdmins.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/OzoneAdmins.java @@ -17,13 +17,16 @@ */ package org.apache.hadoop.hdds.server; +import java.io.IOException; import java.util.Collection; import java.util.Collections; +import java.util.HashSet; import java.util.LinkedHashSet; import java.util.Set; import com.google.common.collect.Sets; +import jakarta.annotation.Nullable; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.security.AccessControlException; import org.apache.hadoop.security.UserGroupInformation; @@ -33,6 +36,8 @@ import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ADMINISTRATORS_WILDCARD; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_READONLY_ADMINISTRATORS; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_READONLY_ADMINISTRATORS_GROUPS; +import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_S3_ADMINISTRATORS; +import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_S3_ADMINISTRATORS_GROUPS; /** * This class contains ozone admin user information, username and group, @@ -186,4 +191,88 @@ public static Collection getOzoneReadOnlyAdminsGroupsFromConfig( return conf.getTrimmedStringCollection( OZONE_READONLY_ADMINISTRATORS_GROUPS); } + + /** + * Get the list of S3 administrators from Ozone config. + *

    + * Notes: + *

      + *
    • If ozone.s3.administrators value is empty string or unset, + * defaults to ozone.administrators value.
    • + *
    • If current user is not part of the administrators group, + * {@link UserGroupInformation#getCurrentUser()} will be added to the resulting list
    • + *
    + * @param conf An instance of {@link OzoneConfiguration} being used + * @return A {@link Collection} of the S3 administrator users + */ + public static Set getS3AdminsFromConfig(OzoneConfiguration conf) throws IOException { + Set ozoneAdmins = new HashSet<>(conf.getTrimmedStringCollection(OZONE_S3_ADMINISTRATORS)); + + if (ozoneAdmins.isEmpty()) { + ozoneAdmins = new HashSet<>(conf.getTrimmedStringCollection(OZONE_ADMINISTRATORS)); + } + + String omSPN = UserGroupInformation.getCurrentUser().getShortUserName(); + ozoneAdmins.add(omSPN); + + return ozoneAdmins; + } + + /** + * Get the list of the groups that are a part of S3 administrators from Ozone config. + *

    + * Note: If ozone.s3.administrators.groups value is empty or unset, + * defaults to the ozone.administrators.groups value + * + * @param conf An instance of {@link OzoneConfiguration} being used + * @return A {@link Collection} of the S3 administrator groups + */ + public static Set getS3AdminsGroupsFromConfig(OzoneConfiguration conf) { + Set s3AdminsGroup = new HashSet<>(conf.getTrimmedStringCollection(OZONE_S3_ADMINISTRATORS_GROUPS)); + + if (s3AdminsGroup.isEmpty() && conf.getTrimmedStringCollection(OZONE_S3_ADMINISTRATORS).isEmpty()) { + s3AdminsGroup = new HashSet<>(conf.getTrimmedStringCollection(OZONE_ADMINISTRATORS_GROUPS)); + } + + return s3AdminsGroup; + } + + /** + * Get the users and groups that are a part of S3 administrators. + * @param conf Stores an instance of {@link OzoneConfiguration} being used + * @return an instance of {@link OzoneAdmins} containing the S3 admin users and groups + */ + public static OzoneAdmins getS3Admins(OzoneConfiguration conf) { + Set s3Admins; + try { + s3Admins = getS3AdminsFromConfig(conf); + } catch (IOException ie) { + s3Admins = Collections.emptySet(); + } + Set s3AdminGroups = getS3AdminsGroupsFromConfig(conf); + + return new OzoneAdmins(s3Admins, s3AdminGroups); + } + + /** + * Check if the provided user is an S3 administrator. + * @param user An instance of {@link UserGroupInformation} with information about the user to verify + * @param s3Admins An instance of {@link OzoneAdmins} containing information + * of the S3 administrator users and groups in the system + * @return {@code true} if the provided user is an S3 administrator else {@code false} + */ + public static boolean isS3Admin(@Nullable UserGroupInformation user, OzoneAdmins s3Admins) { + return null != user && s3Admins.isAdmin(user); + } + + /** + * Check if the provided user is an S3 administrator. + * @param user An instance of {@link UserGroupInformation} with information about the user to verify + * @param conf An instance of {@link OzoneConfiguration} being used + * @return {@code true} if the provided user is an S3 administrator else {@code false} + */ + public static boolean isS3Admin(@Nullable UserGroupInformation user, OzoneConfiguration conf) { + OzoneAdmins s3Admins = getS3Admins(conf); + return isS3Admin(user, s3Admins); + } } diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/ServiceRuntimeInfo.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/ServiceRuntimeInfo.java index bcd75f3f215..f966ef00932 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/ServiceRuntimeInfo.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/ServiceRuntimeInfo.java @@ -33,6 +33,15 @@ */ public interface ServiceRuntimeInfo { + /** + * Gets the namespace of Ozone. + * + * @return the namespace + */ + default String getNamespace() { + return ""; + }; + /** * Gets the version of Hadoop. * @@ -47,13 +56,6 @@ public interface ServiceRuntimeInfo { */ String getSoftwareVersion(); - /** - * Get the compilation information which contains date, user and branch. - * - * @return the compilation information, as a JSON string. - */ - String getCompileInfo(); - /** * Gets the NN start time in milliseconds. * diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/ServiceRuntimeInfoImpl.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/ServiceRuntimeInfoImpl.java index 987f4aee031..74ba3c5b629 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/ServiceRuntimeInfoImpl.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/ServiceRuntimeInfoImpl.java @@ -42,12 +42,6 @@ public String getSoftwareVersion() { return versionInfo.getVersion(); } - @Override - public String getCompileInfo() { - return versionInfo.getDate() + " by " + versionInfo.getUser() + " from " - + versionInfo.getBranch(); - } - @Override public long getStartedTimeInMillis() { return startedTimeInMillis; diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/DBCheckpointServlet.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/DBCheckpointServlet.java index 2d718628e1e..cb1fdd3375a 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/DBCheckpointServlet.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/DBCheckpointServlet.java @@ -41,22 +41,20 @@ import org.apache.commons.fileupload.servlet.ServletFileUpload; import org.apache.commons.fileupload.util.Streams; import org.apache.commons.io.FileUtils; +import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.hdds.server.OzoneAdmins; import org.apache.hadoop.hdds.utils.db.DBCheckpoint; import org.apache.hadoop.hdds.utils.db.DBStore; - -import org.apache.commons.lang3.StringUtils; +import org.apache.hadoop.ozone.lock.BootstrapStateHandler; +import org.apache.hadoop.security.UserGroupInformation; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import static org.apache.hadoop.hdds.utils.HddsServerUtil.writeDBCheckpointToStream; import static org.apache.hadoop.ozone.OzoneConsts.OZONE_DB_CHECKPOINT_REQUEST_FLUSH; import static org.apache.hadoop.ozone.OzoneConsts.OZONE_DB_CHECKPOINT_REQUEST_TO_EXCLUDE_SST; import static org.apache.hadoop.ozone.OzoneConsts.ROCKSDB_SST_SUFFIX; -import org.apache.hadoop.ozone.lock.BootstrapStateHandler; -import org.apache.hadoop.security.UserGroupInformation; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - /** * Provides the current checkpoint Snapshot of the OM/SCM DB. (tar) */ @@ -287,7 +285,7 @@ private static String[] parseFormDataParameters(HttpServletRequest request) { LOG.warn("Exception occured during form data parsing {}", e.getMessage()); } - return sstParam.size() == 0 ? null : sstParam.toArray(new String[0]); + return sstParam.isEmpty() ? null : sstParam.toArray(new String[0]); } /** diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/HddsServerUtil.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/HddsServerUtil.java index 94e9dceb6a7..d80b6b3a272 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/HddsServerUtil.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/HddsServerUtil.java @@ -742,9 +742,7 @@ public static String createStartupShutdownMessage(VersionInfo versionInfo, " version = " + versionInfo.getVersion(), " classpath = " + System.getProperty("java.class.path"), " build = " + versionInfo.getUrl() + "/" - + versionInfo.getRevision() - + " ; compiled by '" + versionInfo.getUser() - + "' on " + versionInfo.getDate(), + + versionInfo.getRevision(), " java = " + System.getProperty("java.version"), " conf = " + conf); } diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/TransactionInfo.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/TransactionInfo.java index 29531f31518..8387934261c 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/TransactionInfo.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/TransactionInfo.java @@ -45,6 +45,7 @@ public final class TransactionInfo implements Comparable { StringCodec.get(), TransactionInfo::valueOf, TransactionInfo::toString, + TransactionInfo.class, DelegatedCodec.CopyType.SHALLOW); public static Codec getCodec() { diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/ByteArrayCodec.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/ByteArrayCodec.java index f62d3ac19cf..bb5eef70d25 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/ByteArrayCodec.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/ByteArrayCodec.java @@ -33,6 +33,11 @@ private ByteArrayCodec() { // singleton } + @Override + public Class getTypeClass() { + return byte[].class; + } + @Override public byte[] toPersistedFormat(byte[] bytes) { return bytes; diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/ByteStringCodec.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/ByteStringCodec.java index 997bdf6cf2e..20e373317b1 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/ByteStringCodec.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/ByteStringCodec.java @@ -34,6 +34,11 @@ public static ByteStringCodec get() { private ByteStringCodec() { } + @Override + public Class getTypeClass() { + return ByteString.class; + } + @Override public boolean supportCodecBuffer() { return true; diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/DBColumnFamilyDefinition.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/DBColumnFamilyDefinition.java index 653182214b6..a5268e6031c 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/DBColumnFamilyDefinition.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/DBColumnFamilyDefinition.java @@ -54,32 +54,21 @@ public class DBColumnFamilyDefinition { private final String tableName; - private final Class keyType; - private final Codec keyCodec; - private final Class valueType; - private final Codec valueCodec; - private ManagedColumnFamilyOptions cfOptions; + private volatile ManagedColumnFamilyOptions cfOptions; - public DBColumnFamilyDefinition( - String tableName, - Class keyType, - Codec keyCodec, - Class valueType, - Codec valueCodec) { + public DBColumnFamilyDefinition(String tableName, Codec keyCodec, Codec valueCodec) { this.tableName = tableName; - this.keyType = keyType; this.keyCodec = keyCodec; - this.valueType = valueType; this.valueCodec = valueCodec; this.cfOptions = null; } public Table getTable(DBStore db) throws IOException { - return db.getTable(tableName, keyType, valueType); + return db.getTable(tableName, getKeyType(), getValueType()); } public String getName() { @@ -87,7 +76,7 @@ public String getName() { } public Class getKeyType() { - return keyType; + return keyCodec.getTypeClass(); } public Codec getKeyCodec() { @@ -95,7 +84,7 @@ public Codec getKeyCodec() { } public Class getValueType() { - return valueType; + return valueCodec.getTypeClass(); } public Codec getValueCodec() { diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/DBDefinition.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/DBDefinition.java index 968d62f0dd5..461bd35f413 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/DBDefinition.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/DBDefinition.java @@ -20,6 +20,7 @@ import org.apache.hadoop.hdds.conf.ConfigurationSource; import org.apache.hadoop.hdds.server.ServerUtils; +import org.apache.ratis.util.MemoizedSupplier; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -28,6 +29,9 @@ import java.util.Collections; import java.util.List; import java.util.Map; +import java.util.function.Supplier; +import java.util.stream.Collectors; +import java.util.stream.StreamSupport; /** * Simple interface to provide information to create a DBStore.. @@ -55,6 +59,16 @@ default File getDBLocation(ConfigurationSource conf) { getLocationConfigKey(), getName()); } + static List getColumnFamilyNames(Iterable> columnFamilies) { + return Collections.unmodifiableList(StreamSupport.stream(columnFamilies.spliterator(), false) + .map(DBColumnFamilyDefinition::getName) + .collect(Collectors.toList())); + } + + default List getColumnFamilyNames() { + return getColumnFamilyNames(getColumnFamilies()); + } + /** * @return The column families present in the DB. */ @@ -109,9 +123,17 @@ interface WithMapInterface extends DBDefinition { */ abstract class WithMap implements WithMapInterface { private final Map> map; + private final Supplier> columnFamilyNames; protected WithMap(Map> map) { this.map = map; + this.columnFamilyNames = MemoizedSupplier.valueOf( + () -> DBDefinition.getColumnFamilyNames(getColumnFamilies())); + } + + @Override + public final List getColumnFamilyNames() { + return columnFamilyNames.get(); } @Override diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/DBStoreBuilder.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/DBStoreBuilder.java index ed8d145b666..1e42241ee43 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/DBStoreBuilder.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/DBStoreBuilder.java @@ -163,7 +163,8 @@ private DBStoreBuilder(ConfigurationSource configuration, OZONE_OM_DELTA_UPDATE_DATA_SIZE_MAX_LIMIT_DEFAULT, StorageUnit.BYTES); } - private void applyDBDefinition(DBDefinition definition) { + public static File getDBDirPath(DBDefinition definition, + ConfigurationSource configuration) { // Set metadata dirs. File metadataDir = definition.getDBLocation(configuration); @@ -174,6 +175,12 @@ private void applyDBDefinition(DBDefinition definition) { HddsConfigKeys.OZONE_METADATA_DIRS); metadataDir = getOzoneMetaDirPath(configuration); } + return metadataDir; + } + + private void applyDBDefinition(DBDefinition definition) { + // Set metadata dirs. + File metadataDir = getDBDirPath(definition, configuration); setName(definition.getName()); setPath(Paths.get(metadataDir.getPath())); diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/InMemoryTestTable.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/InMemoryTestTable.java new file mode 100644 index 00000000000..9cc1695298c --- /dev/null +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/InMemoryTestTable.java @@ -0,0 +1,133 @@ +package org.apache.hadoop.hdds.utils.db; + +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +import org.apache.hadoop.hdds.utils.MetadataKeyFilters; + +import java.io.File; +import java.io.IOException; +import java.util.List; +import java.util.Map; +import java.util.concurrent.ConcurrentHashMap; + +/** + * InMemory Table implementation for tests. + */ +public final class InMemoryTestTable implements Table { + private final Map map = new ConcurrentHashMap<>(); + + @Override + public void close() { + } + + @Override + public void put(KEY key, VALUE value) { + map.put(key, value); + } + + @Override + public void putWithBatch(BatchOperation batch, KEY key, VALUE value) { + throw new UnsupportedOperationException(); + } + + @Override + public boolean isEmpty() { + return map.isEmpty(); + } + + @Override + public boolean isExist(KEY key) { + return map.containsKey(key); + } + + @Override + public VALUE get(KEY key) { + return map.get(key); + } + + @Override + public VALUE getIfExist(KEY key) { + return map.get(key); + } + + @Override + public void delete(KEY key) { + map.remove(key); + } + + @Override + public void deleteWithBatch(BatchOperation batch, KEY key) { + throw new UnsupportedOperationException(); + } + + @Override + public void deleteRange(KEY beginKey, KEY endKey) { + throw new UnsupportedOperationException(); + } + + @Override + public TableIterator> iterator() { + throw new UnsupportedOperationException(); + } + + @Override + public TableIterator> iterator(KEY prefix) { + throw new UnsupportedOperationException(); + } + + @Override + public String getName() { + return ""; + } + + @Override + public long getEstimatedKeyCount() { + return map.size(); + } + + @Override + public List> getRangeKVs(KEY startKey, int count, KEY prefix, + MetadataKeyFilters.MetadataKeyFilter... filters) + throws IOException, IllegalArgumentException { + throw new UnsupportedOperationException(); + } + + @Override + public List> getSequentialRangeKVs(KEY startKey, int count, KEY prefix, + MetadataKeyFilters.MetadataKeyFilter... filters) + throws IOException, IllegalArgumentException { + throw new UnsupportedOperationException(); + } + + @Override + public void deleteBatchWithPrefix(BatchOperation batch, KEY prefix) { + throw new UnsupportedOperationException(); + } + + @Override + public void dumpToFileWithPrefix(File externalFile, KEY prefix) { + throw new UnsupportedOperationException(); + } + + @Override + public void loadFromFile(File externalFile) { + throw new UnsupportedOperationException(); + } +} diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RocksDatabase.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RocksDatabase.java index c156b8e4d67..945138b8b8b 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RocksDatabase.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RocksDatabase.java @@ -306,7 +306,7 @@ public void batchPut(ManagedWriteBatch writeBatch, ByteBuffer key, ByteBuffer value) throws IOException { if (LOG.isDebugEnabled()) { LOG.debug("batchPut buffer key {}", bytes2String(key.duplicate())); - LOG.debug("batchPut buffer value {}", bytes2String(value.duplicate())); + LOG.debug("batchPut buffer value size {}", value.remaining()); } try (UncheckedAutoCloseable ignored = acquire()) { diff --git a/hadoop-hdds/framework/src/main/resources/webapps/static/ozone.css b/hadoop-hdds/framework/src/main/resources/webapps/static/ozone.css index 389d9d78f21..4988cc8eeb1 100644 --- a/hadoop-hdds/framework/src/main/resources/webapps/static/ozone.css +++ b/hadoop-hdds/framework/src/main/resources/webapps/static/ozone.css @@ -94,4 +94,23 @@ body { .scm-roles-background { background-color: #dcfbcd!important; -} \ No newline at end of file +} +.toggle-btn { + background: transparent; /* No background color */ + color: #007bff; /* Button text color */ + border: none; /* No border */ + font-size: 12px; /* Font size for better readability */ + cursor: pointer; /* Pointer cursor on hover */ + padding: 5px 10px; /* Padding around the text */ + margin-bottom: 5px; /* Space below the button */ + transition: color 0.3s, transform 0.3s; /* Smooth transition for color and transform */ +} + +.toggle-btn:hover { + color: #0056b3; /* Darker color on hover */ + transform: scale(1.1); /* Slightly scale up the button on hover */ +} + +.toggle-btn:focus { + outline: none; /* Remove default focus outline */ +} diff --git a/hadoop-hdds/framework/src/main/resources/webapps/static/ozone.js b/hadoop-hdds/framework/src/main/resources/webapps/static/ozone.js index a31078cfd7b..7bb93106284 100644 --- a/hadoop-hdds/framework/src/main/resources/webapps/static/ozone.js +++ b/hadoop-hdds/framework/src/main/resources/webapps/static/ozone.js @@ -48,8 +48,14 @@ }); angular.module('ozone').component('jvmParameters', { templateUrl: 'static/templates/jvm.html', - controller: function($http) { + controller: function($http, $scope) { var ctrl = this; + + $scope.contentVisible = false; + $scope.toggleContent = function() { + $scope.contentVisible = !$scope.contentVisible; + }; + $http.get("jmx?qry=java.lang:type=Runtime") .then(function(result) { ctrl.jmx = result.data.beans[0]; @@ -245,7 +251,11 @@ angular.module('ozone').component('navmenu', { bindings: { - metrics: '<' + metrics: '<', + iostatus: '<', + ioLinkHref: '@', + scanner: '<', + scannerLinkHref: '@', }, templateUrl: 'static/templates/menu.html', controller: function($http) { diff --git a/hadoop-hdds/framework/src/main/resources/webapps/static/templates/jvm.html b/hadoop-hdds/framework/src/main/resources/webapps/static/templates/jvm.html index 9706ebdf6b3..c562ae7d9a2 100644 --- a/hadoop-hdds/framework/src/main/resources/webapps/static/templates/jvm.html +++ b/hadoop-hdds/framework/src/main/resources/webapps/static/templates/jvm.html @@ -20,7 +20,16 @@ {{$ctrl.jmx.SystemProperties.java_vm_name}} {{$ctrl.jmx.SystemProperties.java_vm_version}} - Input arguments: -

    {{$ctrl.jmx.InputArguments.join('\n')}}
    + + Input arguments: + + + +
    +
    {{$ctrl.jmx.InputArguments.join('\n')}}
    +
    + diff --git a/hadoop-hdds/framework/src/main/resources/webapps/static/templates/menu.html b/hadoop-hdds/framework/src/main/resources/webapps/static/templates/menu.html index 95f1b4842f1..9a14f356d7a 100644 --- a/hadoop-hdds/framework/src/main/resources/webapps/static/templates/menu.html +++ b/hadoop-hdds/framework/src/main/resources/webapps/static/templates/menu.html @@ -56,5 +56,7 @@ aria-hidden="true"> +
  • IO Status
  • +
  • Data Scanner
  • diff --git a/hadoop-hdds/framework/src/main/resources/webapps/static/templates/overview.html b/hadoop-hdds/framework/src/main/resources/webapps/static/templates/overview.html index 30e2d26f56f..2811e8c36a5 100644 --- a/hadoop-hdds/framework/src/main/resources/webapps/static/templates/overview.html +++ b/hadoop-hdds/framework/src/main/resources/webapps/static/templates/overview.html @@ -14,9 +14,13 @@ See the License for the specific language governing permissions and limitations under the License. --> -

    Overview

    +

    Overview ({{$ctrl.jmx.Hostname}})

    + + + + diff --git a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/server/TestOzoneAdmins.java b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/server/TestOzoneAdmins.java new file mode 100644 index 00000000000..47a90d05df7 --- /dev/null +++ b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/server/TestOzoneAdmins.java @@ -0,0 +1,125 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + *

    + * http://www.apache.org/licenses/LICENSE-2.0 + *

    + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + * + */ + +package org.apache.hadoop.hdds.server; + +import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.ozone.OzoneConfigKeys; +import org.apache.hadoop.security.UserGroupInformation; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.CsvSource; +import org.junit.jupiter.params.provider.ValueSource; + +import java.io.IOException; +import java.util.Arrays; + +import static org.assertj.core.api.Assertions.assertThat; + +/** + * This class is to test the utilities present in the OzoneAdmins class. + */ +class TestOzoneAdmins { + // The following set of tests are to validate the S3 based utilities present in OzoneAdmins + private OzoneConfiguration configuration; + + @BeforeEach + void setUp() { + configuration = new OzoneConfiguration(); + } + + @ParameterizedTest + @ValueSource(strings = {OzoneConfigKeys.OZONE_S3_ADMINISTRATORS, + OzoneConfigKeys.OZONE_ADMINISTRATORS}) + void testS3AdminExtraction(String configKey) throws IOException { + configuration.set(configKey, "alice,bob"); + + assertThat(OzoneAdmins.getS3AdminsFromConfig(configuration)) + .containsAll(Arrays.asList("alice", "bob")); + } + + @ParameterizedTest + @ValueSource(strings = {OzoneConfigKeys.OZONE_S3_ADMINISTRATORS_GROUPS, + OzoneConfigKeys.OZONE_ADMINISTRATORS_GROUPS}) + void testS3AdminGroupExtraction(String configKey) { + configuration.set(configKey, "test1, test2"); + + assertThat(OzoneAdmins.getS3AdminsGroupsFromConfig(configuration)) + .containsAll(Arrays.asList("test1", "test2")); + } + + @ParameterizedTest + @CsvSource({ + OzoneConfigKeys.OZONE_ADMINISTRATORS + ", " + OzoneConfigKeys.OZONE_ADMINISTRATORS_GROUPS, + OzoneConfigKeys.OZONE_S3_ADMINISTRATORS + ", " + OzoneConfigKeys.OZONE_S3_ADMINISTRATORS_GROUPS + }) + void testIsAdmin(String adminKey, String adminGroupKey) { + // When there is no S3 admin, but Ozone admins present + configuration.set(adminKey, "alice"); + configuration.set(adminGroupKey, "test_group"); + + OzoneAdmins admins = OzoneAdmins.getS3Admins(configuration); + UserGroupInformation ugi = UserGroupInformation.createUserForTesting( + "alice", new String[] {"test_group"}); + + assertThat(admins.isAdmin(ugi)).isEqualTo(true); + + // Test that when a user is present in an admin group but not an Ozone Admin + UserGroupInformation ugiGroupOnly = UserGroupInformation.createUserForTesting( + "bob", new String[] {"test_group"}); + assertThat(admins.isAdmin(ugiGroupOnly)).isEqualTo(true); + } + + @ParameterizedTest + @ValueSource(booleans = {true, false}) + void testIsAdminWithUgi(boolean isAdminSet) { + if (isAdminSet) { + configuration.set(OzoneConfigKeys.OZONE_ADMINISTRATORS, "alice"); + configuration.set(OzoneConfigKeys.OZONE_ADMINISTRATORS_GROUPS, "test_group"); + } + OzoneAdmins admins = OzoneAdmins.getS3Admins(configuration); + UserGroupInformation ugi = UserGroupInformation.createUserForTesting( + "alice", new String[] {"test_group"}); + // Test that when a user is present in an admin group but not an Ozone Admin + UserGroupInformation ugiGroupOnly = UserGroupInformation.createUserForTesting( + "bob", new String[] {"test_group"}); + + assertThat(admins.isAdmin(ugi)).isEqualTo(isAdminSet); + assertThat(admins.isAdmin(ugiGroupOnly)).isEqualTo(isAdminSet); + } + + @ParameterizedTest + @ValueSource(booleans = {true, false}) + void testIsS3AdminWithUgiAndConfiguration(boolean isAdminSet) { + if (isAdminSet) { + configuration.set(OzoneConfigKeys.OZONE_S3_ADMINISTRATORS, "alice"); + configuration.set(OzoneConfigKeys.OZONE_S3_ADMINISTRATORS_GROUPS, "test_group"); + UserGroupInformation ugi = UserGroupInformation.createUserForTesting( + "alice", new String[] {"test_group"}); + // Scenario when user is present in an admin group but not an Ozone Admin + UserGroupInformation ugiGroupOnly = UserGroupInformation.createUserForTesting( + "bob", new String[] {"test_group"}); + + assertThat(OzoneAdmins.isS3Admin(ugi, configuration)).isEqualTo(true); + assertThat(OzoneAdmins.isS3Admin(ugiGroupOnly, configuration)).isEqualTo(true); + } else { + assertThat(OzoneAdmins.isS3Admin(null, configuration)).isEqualTo(false); + } + + } +} diff --git a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/TestDBStoreBuilder.java b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/TestDBStoreBuilder.java index aad3e9e12e6..7966afe5045 100644 --- a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/TestDBStoreBuilder.java +++ b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/TestDBStoreBuilder.java @@ -179,7 +179,7 @@ public void builderWithColumnFamilyOptions(@TempDir Path tempDir) String sampleTableName = "sampleTable"; final DBColumnFamilyDefinition sampleTable = new DBColumnFamilyDefinition<>(sampleTableName, - String.class, StringCodec.get(), Long.class, LongCodec.get()); + StringCodec.get(), LongCodec.get()); final DBDefinition sampleDB = new DBDefinition.WithMap( DBColumnFamilyDefinition.newUnmodifiableMap(sampleTable)) { { @@ -250,8 +250,8 @@ public void testIfAutoCompactionDisabled(boolean disableAutoCompaction, String sampleTableName = "sampleTable"; final DBColumnFamilyDefinition sampleTable = - new DBColumnFamilyDefinition<>(sampleTableName, String.class, - StringCodec.get(), Long.class, LongCodec.get()); + new DBColumnFamilyDefinition<>(sampleTableName, + StringCodec.get(), LongCodec.get()); final DBDefinition sampleDB = new DBDefinition.WithMap( DBColumnFamilyDefinition.newUnmodifiableMap(sampleTable)) { @Override diff --git a/hadoop-hdds/hadoop-dependency-client/pom.xml b/hadoop-hdds/hadoop-dependency-client/pom.xml index 03b677e3818..7676f1f45f1 100644 --- a/hadoop-hdds/hadoop-dependency-client/pom.xml +++ b/hadoop-hdds/hadoop-dependency-client/pom.xml @@ -20,10 +20,10 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> org.apache.ozone hdds - 1.5.0-SNAPSHOT + 2.0.0-SNAPSHOT hdds-hadoop-dependency-client - 1.5.0-SNAPSHOT + 2.0.0-SNAPSHOT Apache Ozone Distributed Data Store Hadoop client dependencies Apache Ozone HDDS Hadoop Client dependencies @@ -51,10 +51,6 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> org.xerial.snappy snappy-java - - org.apache.hadoop.thirdparty - hadoop-shaded-guava - org.apache.hadoop hadoop-annotations @@ -63,10 +59,7 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> com.google.guava guava - - commons-cli - commons-cli - + org.apache.commons commons-math3 @@ -210,10 +203,6 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> - - org.apache.hadoop.thirdparty - hadoop-shaded-guava - com.nimbusds nimbus-jose-jwt @@ -224,10 +213,6 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> ${hadoop.version} compile - - org.apache.hadoop.thirdparty - hadoop-shaded-guava - com.google.guava guava diff --git a/hadoop-hdds/hadoop-dependency-server/pom.xml b/hadoop-hdds/hadoop-dependency-server/pom.xml index 69daeac4bd7..6be31002b09 100644 --- a/hadoop-hdds/hadoop-dependency-server/pom.xml +++ b/hadoop-hdds/hadoop-dependency-server/pom.xml @@ -20,10 +20,10 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> org.apache.ozone hdds - 1.5.0-SNAPSHOT + 2.0.0-SNAPSHOT hdds-hadoop-dependency-server - 1.5.0-SNAPSHOT + 2.0.0-SNAPSHOT Apache Ozone Distributed Data Store Hadoop server dependencies Apache Ozone HDDS Hadoop Server dependencies @@ -51,10 +51,6 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> org.xerial.snappy snappy-java - - org.apache.hadoop.thirdparty - hadoop-shaded-guava - org.apache.curator * @@ -148,10 +144,6 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> - - org.apache.hadoop.thirdparty - hadoop-shaded-guava - com.nimbusds nimbus-jose-jwt @@ -171,10 +163,6 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> com.sun.jersey * - - org.apache.hadoop.thirdparty - hadoop-shaded-guava - io.netty * diff --git a/hadoop-hdds/hadoop-dependency-test/pom.xml b/hadoop-hdds/hadoop-dependency-test/pom.xml index 80ec91cd6d9..f04e45a0340 100644 --- a/hadoop-hdds/hadoop-dependency-test/pom.xml +++ b/hadoop-hdds/hadoop-dependency-test/pom.xml @@ -20,10 +20,10 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> org.apache.ozone hdds - 1.5.0-SNAPSHOT + 2.0.0-SNAPSHOT hdds-hadoop-dependency-test - 1.5.0-SNAPSHOT + 2.0.0-SNAPSHOT Apache Ozone Distributed Data Store Hadoop test dependencies Apache Ozone HDDS Hadoop Test dependencies diff --git a/hadoop-hdds/interface-admin/pom.xml b/hadoop-hdds/interface-admin/pom.xml index 9230b02b524..f3197dc8965 100644 --- a/hadoop-hdds/interface-admin/pom.xml +++ b/hadoop-hdds/interface-admin/pom.xml @@ -20,10 +20,10 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> org.apache.ozone hdds - 1.5.0-SNAPSHOT + 2.0.0-SNAPSHOT hdds-interface-admin - 1.5.0-SNAPSHOT + 2.0.0-SNAPSHOT Apache Ozone Distributed Data Store Admin interface Apache Ozone HDDS Admin Interface @@ -31,6 +31,7 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> true + true @@ -80,14 +81,6 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> - - com.github.spotbugs - spotbugs-maven-plugin - - ${basedir}/dev-support/findbugsExcludeFile.xml - - - diff --git a/hadoop-hdds/interface-admin/src/main/proto/ScmAdminProtocol.proto b/hadoop-hdds/interface-admin/src/main/proto/ScmAdminProtocol.proto index dd4350ae499..ee187bfdc5d 100644 --- a/hadoop-hdds/interface-admin/src/main/proto/ScmAdminProtocol.proto +++ b/hadoop-hdds/interface-admin/src/main/proto/ScmAdminProtocol.proto @@ -299,6 +299,7 @@ message SCMListContainerRequestProto { message SCMListContainerResponseProto { repeated ContainerInfoProto containers = 1; + optional int64 containerCount = 2; } message SCMDeleteContainerRequestProto { @@ -619,30 +620,31 @@ message ContainerBalancerStatusInfoRequestProto { message ContainerBalancerStatusInfoResponseProto { optional bool isRunning = 1; - optional ContainerBalancerStatusInfo containerBalancerStatusInfo = 2; + optional ContainerBalancerStatusInfoProto containerBalancerStatusInfo = 2; } -message ContainerBalancerStatusInfo { +message ContainerBalancerStatusInfoProto { optional uint64 startedAt = 1; optional ContainerBalancerConfigurationProto configuration = 2; - repeated ContainerBalancerTaskIterationStatusInfo iterationsStatusInfo = 3; + repeated ContainerBalancerTaskIterationStatusInfoProto iterationsStatusInfo = 3; } -message ContainerBalancerTaskIterationStatusInfo { +message ContainerBalancerTaskIterationStatusInfoProto { optional int32 iterationNumber = 1; optional string iterationResult = 2; - optional int64 sizeScheduledForMoveGB = 3; - optional int64 dataSizeMovedGB = 4; + optional int64 sizeScheduledForMove = 3; + optional int64 dataSizeMoved = 4; optional int64 containerMovesScheduled = 5; optional int64 containerMovesCompleted = 6; optional int64 containerMovesFailed = 7; optional int64 containerMovesTimeout = 8; - repeated NodeTransferInfo sizeEnteringNodesGB = 9; - repeated NodeTransferInfo sizeLeavingNodesGB = 10; + repeated NodeTransferInfoProto sizeEnteringNodes = 9; + repeated NodeTransferInfoProto sizeLeavingNodes = 10; + optional int64 iterationDuration = 11; } -message NodeTransferInfo { +message NodeTransferInfoProto { optional string uuid = 1; - optional int64 dataVolumeGB = 2; + optional int64 dataVolume = 2; } message DecommissionScmRequestProto { diff --git a/hadoop-hdds/interface-client/pom.xml b/hadoop-hdds/interface-client/pom.xml index 98cfc53f5e8..1a61dfa930e 100644 --- a/hadoop-hdds/interface-client/pom.xml +++ b/hadoop-hdds/interface-client/pom.xml @@ -20,10 +20,10 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> org.apache.ozone hdds - 1.5.0-SNAPSHOT + 2.0.0-SNAPSHOT hdds-interface-client - 1.5.0-SNAPSHOT + 2.0.0-SNAPSHOT Apache Ozone Distributed Data Store Client interface Apache Ozone HDDS Client Interface @@ -31,6 +31,7 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> true + true @@ -40,7 +41,7 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> org.apache.hadoop.thirdparty - hadoop-shaded-protobuf_3_7 + hadoop-shaded-protobuf_3_25 org.apache.ratis @@ -176,13 +177,6 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> - - com.github.spotbugs - spotbugs-maven-plugin - - ${basedir}/dev-support/findbugsExcludeFile.xml - - diff --git a/hadoop-hdds/interface-client/src/main/proto/hdds.proto b/hadoop-hdds/interface-client/src/main/proto/hdds.proto index e25d85e1957..cb4862cb6f3 100644 --- a/hadoop-hdds/interface-client/src/main/proto/hdds.proto +++ b/hadoop-hdds/interface-client/src/main/proto/hdds.proto @@ -61,7 +61,7 @@ message ExtendedDatanodeDetailsProto { optional string version = 2; optional int64 setupTime = 3; optional string revision = 4; - optional string buildDate = 5; + optional string buildDate = 5; // unused, reserved for compatibility } message MoveDataNodePairProto { @@ -257,6 +257,7 @@ message GetScmInfoResponseProto { required string clusterId = 1; required string scmId = 2; repeated string peerRoles = 3; + optional bool scmRatisEnabled = 4; } message AddScmRequestProto { diff --git a/hadoop-hdds/interface-client/src/main/resources/proto.lock b/hadoop-hdds/interface-client/src/main/resources/proto.lock index 1f3f552a4d1..e59b77b93d4 100644 --- a/hadoop-hdds/interface-client/src/main/resources/proto.lock +++ b/hadoop-hdds/interface-client/src/main/resources/proto.lock @@ -2433,6 +2433,18 @@ "name": "containerCount", "type": "int64", "optional": true + }, + { + "id": 6, + "name": "committed", + "type": "int64", + "optional": true + }, + { + "id": 7, + "name": "freeSpaceToSpare", + "type": "int64", + "optional": true } ] }, diff --git a/hadoop-hdds/interface-server/pom.xml b/hadoop-hdds/interface-server/pom.xml index df65c1e2b2a..47bde5a0bc7 100644 --- a/hadoop-hdds/interface-server/pom.xml +++ b/hadoop-hdds/interface-server/pom.xml @@ -20,10 +20,10 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> org.apache.ozone hdds - 1.5.0-SNAPSHOT + 2.0.0-SNAPSHOT hdds-interface-server - 1.5.0-SNAPSHOT + 2.0.0-SNAPSHOT Apache Ozone Distributed Data Store Server interface Apache Ozone HDDS Server Interface @@ -31,6 +31,7 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> true + true @@ -142,13 +143,6 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> - - com.github.spotbugs - spotbugs-maven-plugin - - ${basedir}/dev-support/findbugsExcludeFile.xml - - diff --git a/hadoop-hdds/interface-server/src/main/resources/proto.lock b/hadoop-hdds/interface-server/src/main/resources/proto.lock index 6966915f4a2..bb5748eab29 100644 --- a/hadoop-hdds/interface-server/src/main/resources/proto.lock +++ b/hadoop-hdds/interface-server/src/main/resources/proto.lock @@ -1427,6 +1427,30 @@ "value": "false" } ] + }, + { + "id": 8, + "name": "committed", + "type": "uint64", + "optional": true, + "options": [ + { + "name": "default", + "value": "0" + } + ] + }, + { + "id": 9, + "name": "freeSpaceToSpare", + "type": "uint64", + "optional": true, + "options": [ + { + "name": "default", + "value": "0" + } + ] } ] }, diff --git a/hadoop-hdds/managed-rocksdb/pom.xml b/hadoop-hdds/managed-rocksdb/pom.xml index 125783222e5..40ad920647a 100644 --- a/hadoop-hdds/managed-rocksdb/pom.xml +++ b/hadoop-hdds/managed-rocksdb/pom.xml @@ -19,10 +19,10 @@ org.apache.ozone hdds - 1.5.0-SNAPSHOT + 2.0.0-SNAPSHOT hdds-managed-rocksdb - 1.5.0-SNAPSHOT + 2.0.0-SNAPSHOT Apache Ozone Managed RocksDB library Apache Ozone HDDS Managed RocksDB jar diff --git a/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedRocksDB.java b/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedRocksDB.java index 5a5a577351b..ead43e9aaf8 100644 --- a/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedRocksDB.java +++ b/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedRocksDB.java @@ -18,6 +18,7 @@ */ package org.apache.hadoop.hdds.utils.db.managed; +import org.apache.commons.io.FilenameUtils; import org.rocksdb.ColumnFamilyDescriptor; import org.rocksdb.ColumnFamilyHandle; import org.rocksdb.DBOptions; @@ -31,6 +32,8 @@ import java.io.IOException; import java.time.Duration; import java.util.List; +import java.util.Map; +import java.util.stream.Collectors; /** * Managed {@link RocksDB}. @@ -102,4 +105,14 @@ public void deleteFile(LiveFileMetaData fileToBeDeleted) File file = new File(fileToBeDeleted.path(), fileToBeDeleted.fileName()); ManagedRocksObjectUtils.waitForFileDelete(file, Duration.ofSeconds(60)); } + + public static Map getLiveMetadataForSSTFiles(RocksDB db) { + return db.getLiveFilesMetaData().stream().collect( + Collectors.toMap(liveFileMetaData -> FilenameUtils.getBaseName(liveFileMetaData.fileName()), + liveFileMetaData -> liveFileMetaData)); + } + + public Map getLiveMetadataForSSTFiles() { + return getLiveMetadataForSSTFiles(this.get()); + } } diff --git a/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedRocksObjectUtils.java b/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedRocksObjectUtils.java index 148abee7fc0..d58f70495fe 100644 --- a/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedRocksObjectUtils.java +++ b/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedRocksObjectUtils.java @@ -55,12 +55,7 @@ static UncheckedAutoCloseable track(AutoCloseable object) { static void reportLeak(Class clazz, String stackTrace) { ManagedRocksObjectMetrics.INSTANCE.increaseLeakObject(); - String warning = String.format("%s is not closed properly", clazz.getSimpleName()); - if (stackTrace != null && LOG.isDebugEnabled()) { - String debugMessage = String.format("%nStackTrace for unclosed instance: %s", stackTrace); - warning = warning.concat(debugMessage); - } - LOG.warn(warning); + HddsUtils.reportLeak(clazz, stackTrace, LOG); } private static @Nullable StackTraceElement[] getStackTrace() { diff --git a/hadoop-hdds/pom.xml b/hadoop-hdds/pom.xml index 87d76158301..707e9852898 100644 --- a/hadoop-hdds/pom.xml +++ b/hadoop-hdds/pom.xml @@ -20,11 +20,11 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> org.apache.ozone ozone-main - 1.5.0-SNAPSHOT + 2.0.0-SNAPSHOT hdds - 1.5.0-SNAPSHOT + 2.0.0-SNAPSHOT Apache Ozone Distributed Data Store Project Apache Ozone HDDS pom @@ -54,18 +54,6 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> rocks-native - - - apache.snapshots.https - https://repository.apache.org/content/repositories/snapshots - - - - - apache.snapshots.https - https://repository.apache.org/content/repositories/snapshots - - @@ -322,36 +310,5 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> - - add-classpath-descriptor - - - src/main/java - - - - - - org.apache.maven.plugins - maven-dependency-plugin - - - add-classpath-descriptor - prepare-package - - build-classpath - - - ${project.build.outputDirectory}/${project.artifactId}.classpath - $HDDS_LIB_JARS_DIR - true - runtime - - - - - - - diff --git a/hadoop-hdds/rocks-native/pom.xml b/hadoop-hdds/rocks-native/pom.xml index 5fc9949514b..4c751e0b10a 100644 --- a/hadoop-hdds/rocks-native/pom.xml +++ b/hadoop-hdds/rocks-native/pom.xml @@ -18,7 +18,7 @@ hdds org.apache.ozone - 1.5.0-SNAPSHOT + 2.0.0-SNAPSHOT 4.0.0 Apache Ozone HDDS RocksDB Tools @@ -385,7 +385,7 @@ maven-dependency-plugin - copy-jars + copy-dependencies process-sources copy-dependencies diff --git a/hadoop-hdds/rocks-native/src/main/java/org/apache/hadoop/hdds/utils/NativeLibraryLoader.java b/hadoop-hdds/rocks-native/src/main/java/org/apache/hadoop/hdds/utils/NativeLibraryLoader.java index d93933dee36..08e397d0683 100644 --- a/hadoop-hdds/rocks-native/src/main/java/org/apache/hadoop/hdds/utils/NativeLibraryLoader.java +++ b/hadoop-hdds/rocks-native/src/main/java/org/apache/hadoop/hdds/utils/NativeLibraryLoader.java @@ -20,6 +20,7 @@ import com.google.common.annotations.VisibleForTesting; import org.apache.commons.lang3.tuple.Pair; +import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.ozone.util.ShutdownHookManager; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -28,6 +29,7 @@ import java.io.IOException; import java.io.InputStream; import java.nio.file.Files; +import java.nio.file.Path; import java.nio.file.StandardCopyOption; import java.util.ArrayList; import java.util.List; @@ -170,31 +172,38 @@ private Pair, List> copyResourceFromJarToTemp(final String getSystemProperty(NATIVE_LIB_TMP_DIR) : ""; final File dir = new File(nativeLibDir).getAbsoluteFile(); - // create a temporary file to copy the library to - final File temp = File.createTempFile(libraryName, getLibOsSuffix(), dir); - if (!temp.exists()) { + // create a temporary dir to copy the library to + final Path tempPath = Files.createTempDirectory(dir.toPath(), libraryName); + final File tempDir = tempPath.toFile(); + if (!tempDir.exists()) { return Pair.of(Optional.empty(), null); - } else { - temp.deleteOnExit(); } - Files.copy(is, temp.toPath(), StandardCopyOption.REPLACE_EXISTING); + Path libPath = tempPath.resolve(libraryFileName); + Files.copy(is, libPath, StandardCopyOption.REPLACE_EXISTING); + File libFile = libPath.toFile(); + if (libFile.exists()) { + libFile.deleteOnExit(); + } + List dependentFiles = new ArrayList<>(); for (String fileName : dependentFileNames) { if (is != null) { is.close(); } is = getResourceStream(fileName); - File file = new File(dir, fileName); - Files.copy(is, file.toPath(), StandardCopyOption.REPLACE_EXISTING); + Path path = tempPath.resolve(fileName); + Files.copy(is, path, StandardCopyOption.REPLACE_EXISTING); + File file = path.toFile(); if (file.exists()) { file.deleteOnExit(); } dependentFiles.add(file); } - ShutdownHookManager.get().addShutdownHook(temp::delete, + ShutdownHookManager.get().addShutdownHook( + () -> FileUtil.fullyDelete(tempDir), LIBRARY_SHUTDOWN_HOOK_PRIORITY); - return Pair.of(Optional.of(temp), dependentFiles); + return Pair.of(Optional.of(libFile), dependentFiles); } finally { if (is != null) { is.close(); diff --git a/hadoop-hdds/rocks-native/src/test/java/org/apache/hadoop/hdds/utils/TestNativeLibraryLoader.java b/hadoop-hdds/rocks-native/src/test/java/org/apache/hadoop/hdds/utils/TestNativeLibraryLoader.java index f0074e0a1ac..6e1622ebd7c 100644 --- a/hadoop-hdds/rocks-native/src/test/java/org/apache/hadoop/hdds/utils/TestNativeLibraryLoader.java +++ b/hadoop-hdds/rocks-native/src/test/java/org/apache/hadoop/hdds/utils/TestNativeLibraryLoader.java @@ -18,6 +18,7 @@ package org.apache.hadoop.hdds.utils; +import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.hdds.utils.db.managed.ManagedRawSSTFileReader; import org.apache.ozone.test.tag.Native; import org.junit.jupiter.api.io.TempDir; @@ -28,15 +29,16 @@ import java.io.ByteArrayInputStream; import java.io.File; import java.nio.file.Path; -import java.util.Collections; +import java.util.Arrays; import java.util.HashMap; +import java.util.List; import java.util.Map; import java.util.stream.Stream; import static org.apache.hadoop.hdds.utils.NativeConstants.ROCKS_TOOLS_NATIVE_LIBRARY_NAME; import static org.apache.hadoop.hdds.utils.NativeLibraryLoader.NATIVE_LIB_TMP_DIR; -import static org.junit.jupiter.api.Assertions.assertEquals; -import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.apache.hadoop.hdds.utils.NativeLibraryLoader.getJniLibraryFileName; +import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertTrue; import static org.mockito.Mockito.CALLS_REAL_METHODS; import static org.mockito.Mockito.anyString; @@ -68,21 +70,45 @@ public void testNativeLibraryLoader(String nativeLibraryDirectoryLocation) throw mockedNativeLibraryLoader.when(() -> NativeLibraryLoader.getInstance()).thenReturn(loader); ManagedRawSSTFileReader.loadLibrary(); assertTrue(NativeLibraryLoader.isLibraryLoaded(ROCKS_TOOLS_NATIVE_LIBRARY_NAME)); + } + } + + @ParameterizedTest + @MethodSource("nativeLibraryDirectoryLocations") + public void testDummyLibrary(String nativeLibraryDirectoryLocation) { + Map libraryLoadedMap = new HashMap<>(); + NativeLibraryLoader loader = new NativeLibraryLoader(libraryLoadedMap); + try (MockedStatic mockedNativeLibraryLoader = mockStatic(NativeLibraryLoader.class, + CALLS_REAL_METHODS)) { + mockedNativeLibraryLoader.when(() -> NativeLibraryLoader.getSystemProperty(same(NATIVE_LIB_TMP_DIR))) + .thenReturn(nativeLibraryDirectoryLocation); + mockedNativeLibraryLoader.when(NativeLibraryLoader::getInstance).thenReturn(loader); // Mocking to force copy random bytes to create a lib file to // nativeLibraryDirectoryLocation. But load library will fail. mockedNativeLibraryLoader.when(() -> NativeLibraryLoader.getResourceStream(anyString())) .thenReturn(new ByteArrayInputStream(new byte[]{0, 1, 2, 3})); String dummyLibraryName = "dummy_lib"; - NativeLibraryLoader.getInstance().loadLibrary(dummyLibraryName, Collections.emptyList()); - NativeLibraryLoader.isLibraryLoaded(dummyLibraryName); + List dependencies = Arrays.asList("dep1", "dep2"); + File absDir = new File(nativeLibraryDirectoryLocation == null ? "" : nativeLibraryDirectoryLocation) + .getAbsoluteFile(); + + NativeLibraryLoader.getInstance().loadLibrary(dummyLibraryName, dependencies); + // Checking if the resource with random was copied to a temp file. - File[] libPath = new File(nativeLibraryDirectoryLocation == null ? "" : nativeLibraryDirectoryLocation) - .getAbsoluteFile().listFiles((dir, name) -> name.startsWith(dummyLibraryName) && - name.endsWith(NativeLibraryLoader.getLibOsSuffix())); - assertNotNull(libPath); - assertEquals(1, libPath.length); - assertTrue(libPath[0].delete()); + File[] libPath = absDir + .listFiles((dir, name) -> name.startsWith(dummyLibraryName)); + assertThat(libPath) + .isNotNull() + .isNotEmpty(); + assertThat(libPath[0]) + .isDirectory(); + try { + assertThat(new File(libPath[0], getJniLibraryFileName(dummyLibraryName))) + .isFile(); + dependencies.forEach(dep -> assertThat(new File(libPath[0], dep)).isFile()); + } finally { + FileUtil.fullyDelete(libPath[0]); + } } - } } diff --git a/hadoop-hdds/rocksdb-checkpoint-differ/pom.xml b/hadoop-hdds/rocksdb-checkpoint-differ/pom.xml index 3e535c5f5f2..c4284a4e85d 100644 --- a/hadoop-hdds/rocksdb-checkpoint-differ/pom.xml +++ b/hadoop-hdds/rocksdb-checkpoint-differ/pom.xml @@ -20,11 +20,11 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> org.apache.ozone hdds - 1.5.0-SNAPSHOT + 2.0.0-SNAPSHOT rocksdb-checkpoint-differ - 1.5.0-SNAPSHOT + 2.0.0-SNAPSHOT RocksDB Checkpoint Differ RocksDB Checkpoint Differ jar diff --git a/hadoop-hdds/rocksdb-checkpoint-differ/src/main/java/org/apache/ozone/compaction/log/CompactionFileInfo.java b/hadoop-hdds/rocksdb-checkpoint-differ/src/main/java/org/apache/ozone/compaction/log/CompactionFileInfo.java index fa0d1f5491d..2d67d5003ae 100644 --- a/hadoop-hdds/rocksdb-checkpoint-differ/src/main/java/org/apache/ozone/compaction/log/CompactionFileInfo.java +++ b/hadoop-hdds/rocksdb-checkpoint-differ/src/main/java/org/apache/ozone/compaction/log/CompactionFileInfo.java @@ -20,7 +20,9 @@ import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Preconditions; +import org.apache.hadoop.hdds.StringUtils; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; +import org.rocksdb.LiveFileMetaData; import java.util.Objects; @@ -128,6 +130,16 @@ public Builder setColumnFamily(String columnFamily) { return this; } + public Builder setValues(LiveFileMetaData fileMetaData) { + if (fileMetaData != null) { + String columnFamilyName = StringUtils.bytes2String(fileMetaData.columnFamilyName()); + String startRangeValue = StringUtils.bytes2String(fileMetaData.smallestKey()); + String endRangeValue = StringUtils.bytes2String(fileMetaData.largestKey()); + this.setColumnFamily(columnFamilyName).setStartRange(startRangeValue).setEndRange(endRangeValue); + } + return this; + } + public CompactionFileInfo build() { if ((startRange != null || endRange != null || columnFamily != null) && (startRange == null || endRange == null || columnFamily == null)) { diff --git a/hadoop-hdds/rocksdb-checkpoint-differ/src/main/java/org/apache/ozone/compaction/log/CompactionLogEntry.java b/hadoop-hdds/rocksdb-checkpoint-differ/src/main/java/org/apache/ozone/compaction/log/CompactionLogEntry.java index c27763b9788..04980821ba9 100644 --- a/hadoop-hdds/rocksdb-checkpoint-differ/src/main/java/org/apache/ozone/compaction/log/CompactionLogEntry.java +++ b/hadoop-hdds/rocksdb-checkpoint-differ/src/main/java/org/apache/ozone/compaction/log/CompactionLogEntry.java @@ -38,7 +38,8 @@ public final class CompactionLogEntry implements private static final Codec CODEC = new DelegatedCodec<>( Proto2Codec.get(CompactionLogEntryProto.getDefaultInstance()), CompactionLogEntry::getFromProtobuf, - CompactionLogEntry::getProtobuf); + CompactionLogEntry::getProtobuf, + CompactionLogEntry.class); public static Codec getCodec() { return CODEC; diff --git a/hadoop-hdds/rocksdb-checkpoint-differ/src/main/java/org/apache/ozone/rocksdiff/CompactionNode.java b/hadoop-hdds/rocksdb-checkpoint-differ/src/main/java/org/apache/ozone/rocksdiff/CompactionNode.java index f8133e6b92f..45a21970966 100644 --- a/hadoop-hdds/rocksdb-checkpoint-differ/src/main/java/org/apache/ozone/rocksdiff/CompactionNode.java +++ b/hadoop-hdds/rocksdb-checkpoint-differ/src/main/java/org/apache/ozone/rocksdiff/CompactionNode.java @@ -17,6 +17,8 @@ */ package org.apache.ozone.rocksdiff; +import org.apache.ozone.compaction.log.CompactionFileInfo; + /** * Node in the compaction DAG that represents an SST file. */ @@ -48,6 +50,11 @@ public CompactionNode(String file, long numKeys, long seqNum, this.columnFamily = columnFamily; } + public CompactionNode(CompactionFileInfo compactionFileInfo) { + this(compactionFileInfo.getFileName(), -1, -1, compactionFileInfo.getStartKey(), + compactionFileInfo.getEndKey(), compactionFileInfo.getColumnFamily()); + } + @Override public String toString() { return String.format("Node{%s}", fileName); diff --git a/hadoop-hdds/rocksdb-checkpoint-differ/src/main/java/org/apache/ozone/rocksdiff/RocksDBCheckpointDiffer.java b/hadoop-hdds/rocksdb-checkpoint-differ/src/main/java/org/apache/ozone/rocksdiff/RocksDBCheckpointDiffer.java index 08a013fc7c7..930c2a269b5 100644 --- a/hadoop-hdds/rocksdb-checkpoint-differ/src/main/java/org/apache/ozone/rocksdiff/RocksDBCheckpointDiffer.java +++ b/hadoop-hdds/rocksdb-checkpoint-differ/src/main/java/org/apache/ozone/rocksdiff/RocksDBCheckpointDiffer.java @@ -26,14 +26,16 @@ import java.io.FileNotFoundException; import java.util.Arrays; import java.util.Collections; +import java.util.HashMap; import java.util.Objects; +import java.util.Optional; import java.util.concurrent.ConcurrentMap; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; import com.google.protobuf.InvalidProtocolBufferException; import org.apache.commons.collections.CollectionUtils; -import org.apache.commons.collections.MapUtils; +import org.apache.commons.io.FilenameUtils; import org.apache.commons.lang3.tuple.Pair; import org.apache.hadoop.hdds.StringUtils; import org.apache.hadoop.hdds.conf.ConfigurationSource; @@ -42,11 +44,9 @@ import org.apache.hadoop.hdds.utils.Scheduler; import org.apache.hadoop.hdds.utils.db.managed.ManagedDBOptions; import org.apache.hadoop.hdds.utils.db.managed.ManagedOptions; -import org.apache.hadoop.hdds.utils.db.managed.ManagedReadOptions; import org.apache.hadoop.hdds.utils.db.managed.ManagedRocksIterator; import org.apache.hadoop.hdds.utils.db.managed.ManagedRocksDB; import org.apache.hadoop.hdds.utils.db.managed.ManagedSstFileReader; -import org.apache.hadoop.hdds.utils.db.managed.ManagedSstFileReaderIterator; import org.apache.ozone.compaction.log.CompactionFileInfo; import org.apache.ozone.compaction.log.CompactionLogEntry; import org.apache.ozone.rocksdb.util.RdbUtil; @@ -74,7 +74,6 @@ import java.util.ArrayList; import java.util.Comparator; import java.util.HashSet; -import java.util.Iterator; import java.util.List; import java.util.Map; import java.util.Set; @@ -174,6 +173,7 @@ public class RocksDBCheckpointDiffer implements AutoCloseable, private ColumnFamilyHandle compactionLogTableCFHandle; private ManagedRocksDB activeRocksDB; + private ConcurrentMap inflightCompactions; /** * For snapshot diff calculation we only need to track following column @@ -245,6 +245,7 @@ public class RocksDBCheckpointDiffer implements AutoCloseable, } else { this.scheduler = null; } + this.inflightCompactions = new ConcurrentHashMap<>(); } private String createCompactionLogDir(String metadataDirName, @@ -463,7 +464,7 @@ public void onCompactionBegin(RocksDB db, return; } } - + inflightCompactions.putAll(toFileInfoList(compactionJobInfo.inputFiles(), db)); for (String file : compactionJobInfo.inputFiles()) { createLink(Paths.get(sstBackupDir, new File(file).getName()), Paths.get(file)); @@ -484,17 +485,21 @@ public void onCompactionCompleted(RocksDB db, } long trxId = db.getLatestSequenceNumber(); - + Map inputFileCompactions = toFileInfoList(compactionJobInfo.inputFiles(), db); CompactionLogEntry.Builder builder; - try (ManagedOptions options = new ManagedOptions(); - ManagedReadOptions readOptions = new ManagedReadOptions()) { - builder = new CompactionLogEntry.Builder(trxId, - System.currentTimeMillis(), - toFileInfoList(compactionJobInfo.inputFiles(), options, - readOptions), - toFileInfoList(compactionJobInfo.outputFiles(), options, - readOptions)); - } + builder = new CompactionLogEntry.Builder(trxId, + System.currentTimeMillis(), + inputFileCompactions.keySet().stream() + .map(inputFile -> { + if (!inflightCompactions.containsKey(inputFile)) { + LOG.warn("Input file not found in inflightCompactionsMap : {} which should have been added on " + + "compactionBeginListener.", + inputFile); + } + return inflightCompactions.getOrDefault(inputFile, inputFileCompactions.get(inputFile)); + }) + .collect(Collectors.toList()), + new ArrayList<>(toFileInfoList(compactionJobInfo.outputFiles(), db).values())); if (LOG.isDebugEnabled()) { builder = builder.setCompactionReason( @@ -502,7 +507,6 @@ public void onCompactionCompleted(RocksDB db, } CompactionLogEntry compactionLogEntry = builder.build(); - synchronized (this) { if (closed) { return; @@ -521,6 +525,9 @@ public void onCompactionCompleted(RocksDB db, populateCompactionDAG(compactionLogEntry.getInputFileInfoList(), compactionLogEntry.getOutputFileInfoList(), compactionLogEntry.getDbSequenceNumber()); + for (String inputFile : inputFileCompactions.keySet()) { + inflightCompactions.remove(inputFile); + } } } }; @@ -640,7 +647,7 @@ private String trimSSTFilename(String filename) { * @param rocksDB open rocksDB instance. * @return a list of SST files (without extension) in the DB. */ - public HashSet readRocksDBLiveFiles(ManagedRocksDB rocksDB) { + public Set readRocksDBLiveFiles(ManagedRocksDB rocksDB) { HashSet liveFiles = new HashSet<>(); final List cfs = Arrays.asList( @@ -789,7 +796,7 @@ private void preconditionChecksForLoadAllCompactionLogs() { * and appends the extension '.sst'. */ private String getSSTFullPath(String sstFilenameWithoutExtension, - String dbPath) { + String... dbPaths) { // Try to locate the SST in the backup dir first final Path sstPathInBackupDir = Paths.get(sstBackupDir, @@ -800,11 +807,13 @@ private String getSSTFullPath(String sstFilenameWithoutExtension, // SST file does not exist in the SST backup dir, this means the SST file // has not gone through any compactions yet and is only available in the - // src DB directory - final Path sstPathInDBDir = Paths.get(dbPath, - sstFilenameWithoutExtension + SST_FILE_EXTENSION); - if (Files.exists(sstPathInDBDir)) { - return sstPathInDBDir.toString(); + // src DB directory or destDB directory + for (String dbPath : dbPaths) { + final Path sstPathInDBDir = Paths.get(dbPath, + sstFilenameWithoutExtension + SST_FILE_EXTENSION); + if (Files.exists(sstPathInDBDir)) { + return sstPathInDBDir.toString(); + } } // TODO: More graceful error handling? @@ -825,25 +834,23 @@ private String getSSTFullPath(String sstFilenameWithoutExtension, * e.g. ["/path/to/sstBackupDir/000050.sst", * "/path/to/sstBackupDir/000060.sst"] */ - public synchronized List getSSTDiffListWithFullPath( - DifferSnapshotInfo src, - DifferSnapshotInfo dest, - String sstFilesDirForSnapDiffJob - ) throws IOException { + public synchronized Optional> getSSTDiffListWithFullPath(DifferSnapshotInfo src, + DifferSnapshotInfo dest, + String sstFilesDirForSnapDiffJob) { - List sstDiffList = getSSTDiffList(src, dest); + Optional> sstDiffList = getSSTDiffList(src, dest); - return sstDiffList.stream() + return sstDiffList.map(diffList -> diffList.stream() .map( sst -> { - String sstFullPath = getSSTFullPath(sst, src.getDbPath()); + String sstFullPath = getSSTFullPath(sst, src.getDbPath(), dest.getDbPath()); Path link = Paths.get(sstFilesDirForSnapDiffJob, sst + SST_FILE_EXTENSION); Path srcFile = Paths.get(sstFullPath); createLink(link, srcFile); return link.toString(); }) - .collect(Collectors.toList()); + .collect(Collectors.toList())); } /** @@ -857,10 +864,8 @@ public synchronized List getSSTDiffListWithFullPath( * @param dest destination snapshot * @return A list of SST files without extension. e.g. ["000050", "000060"] */ - public synchronized List getSSTDiffList( - DifferSnapshotInfo src, - DifferSnapshotInfo dest - ) throws IOException { + public synchronized Optional> getSSTDiffList(DifferSnapshotInfo src, + DifferSnapshotInfo dest) { // TODO: Reject or swap if dest is taken after src, once snapshot chain // integration is done. @@ -894,29 +899,18 @@ public synchronized List getSSTDiffList( LOG.debug("{}", logSB); } - if (src.getTablePrefixes() != null && !src.getTablePrefixes().isEmpty()) { - filterRelevantSstFilesFullPath(fwdDAGDifferentFiles, - src.getTablePrefixes()); + // Check if the DAG traversal was able to reach all the destination SST files. + for (String destSnapFile : destSnapFiles) { + if (!fwdDAGSameFiles.contains(destSnapFile) && !fwdDAGDifferentFiles.contains(destSnapFile)) { + return Optional.empty(); + } } - return new ArrayList<>(fwdDAGDifferentFiles); - } - - /** - * construct absolute sst file path first and - * filter the files. - */ - public void filterRelevantSstFilesFullPath(Set inputFiles, - Map tableToPrefixMap) throws IOException { - for (Iterator fileIterator = - inputFiles.iterator(); fileIterator.hasNext();) { - String filename = fileIterator.next(); - String filepath = getAbsoluteSstFilePath(filename); - if (!RocksDiffUtils.doesSstFileContainKeyRange(filepath, - tableToPrefixMap)) { - fileIterator.remove(); - } + if (src.getTablePrefixes() != null && !src.getTablePrefixes().isEmpty()) { + RocksDiffUtils.filterRelevantSstFiles(fwdDAGDifferentFiles, src.getTablePrefixes(), compactionNodeMap, + src.getRocksDB(), dest.getRocksDB()); } + return Optional.of(new ArrayList<>(fwdDAGDifferentFiles)); } /** @@ -939,10 +933,6 @@ synchronized void internalGetSSTDiffList( Preconditions.checkArgument(sameFiles.isEmpty(), "Set must be empty"); Preconditions.checkArgument(differentFiles.isEmpty(), "Set must be empty"); - // Use source snapshot's table prefix. At this point Source and target's - // table prefix should be same. - Map columnFamilyToPrefixMap = src.getTablePrefixes(); - for (String fileName : srcSnapFiles) { if (destSnapFiles.contains(fileName)) { LOG.debug("Source '{}' and destination '{}' share the same SST '{}'", @@ -1006,15 +996,6 @@ synchronized void internalGetSSTDiffList( } for (CompactionNode nextNode : successors) { - if (shouldSkipNode(nextNode, columnFamilyToPrefixMap)) { - LOG.debug("Skipping next node: '{}' with startKey: '{}' and " + - "endKey: '{}' because it doesn't have keys related to " + - "columnFamilyToPrefixMap: '{}'.", - nextNode.getFileName(), nextNode.getStartKey(), - nextNode.getEndKey(), columnFamilyToPrefixMap); - continue; - } - if (sameFiles.contains(nextNode.getFileName()) || differentFiles.contains(nextNode.getFileName())) { LOG.debug("Skipping known processed SST: {}", @@ -1485,86 +1466,22 @@ public void pngPrintMutableGraph(String filePath, GraphType graphType) graph.generateImage(filePath); } - private List toFileInfoList(List sstFiles, - ManagedOptions options, - ManagedReadOptions readOptions - ) { + private Map toFileInfoList(List sstFiles, RocksDB db) { if (CollectionUtils.isEmpty(sstFiles)) { - return Collections.emptyList(); + return Collections.emptyMap(); } - - List response = new ArrayList<>(); - + Map liveFileMetaDataMap = ManagedRocksDB.getLiveMetadataForSSTFiles(db); + Map response = new HashMap<>(); for (String sstFile : sstFiles) { - CompactionFileInfo fileInfo = toFileInfo(sstFile, options, readOptions); - response.add(fileInfo); + String fileName = FilenameUtils.getBaseName(sstFile); + CompactionFileInfo fileInfo = + new CompactionFileInfo.Builder(fileName).setValues(liveFileMetaDataMap.get(fileName)).build(); + response.put(sstFile, fileInfo); } return response; } - private CompactionFileInfo toFileInfo(String sstFile, - ManagedOptions options, - ManagedReadOptions readOptions) { - final int fileNameOffset = sstFile.lastIndexOf("/") + 1; - String fileName = sstFile.substring(fileNameOffset, - sstFile.length() - SST_FILE_EXTENSION_LENGTH); - CompactionFileInfo.Builder fileInfoBuilder = - new CompactionFileInfo.Builder(fileName); - - try (ManagedSstFileReader fileReader = new ManagedSstFileReader(options)) { - fileReader.open(sstFile); - String columnFamily = StringUtils.bytes2String(fileReader.getTableProperties().getColumnFamilyName()); - try (ManagedSstFileReaderIterator iterator = - ManagedSstFileReaderIterator.managed(fileReader.newIterator(readOptions))) { - iterator.get().seekToFirst(); - String startKey = StringUtils.bytes2String(iterator.get().key()); - iterator.get().seekToLast(); - String endKey = StringUtils.bytes2String(iterator.get().key()); - fileInfoBuilder.setStartRange(startKey) - .setEndRange(endKey) - .setColumnFamily(columnFamily); - } - } catch (RocksDBException rocksDBException) { - // Ideally it should not happen. If it does just log the exception. - // And let the compaction complete without the exception. - // Throwing exception in compaction listener could fail the RocksDB. - // In case of exception, compaction node will be missing start key, - // end key and column family. And during diff calculation it will - // continue the traversal as it was before HDDS-8940. - LOG.warn("Failed to read SST file: {}.", sstFile, rocksDBException); - } - return fileInfoBuilder.build(); - } - - @VisibleForTesting - boolean shouldSkipNode(CompactionNode node, - Map columnFamilyToPrefixMap) { - // This is for backward compatibility. Before the compaction log table - // migration, startKey, endKey and columnFamily information is not persisted - // in compaction log files. - // Also for the scenario when there is an exception in reading SST files - // for the file node. - if (node.getStartKey() == null || node.getEndKey() == null || - node.getColumnFamily() == null) { - LOG.debug("Compaction node with fileName: {} doesn't have startKey, " + - "endKey and columnFamily details.", node.getFileName()); - return false; - } - - if (MapUtils.isEmpty(columnFamilyToPrefixMap)) { - LOG.debug("Provided columnFamilyToPrefixMap is null or empty."); - return false; - } - - if (!columnFamilyToPrefixMap.containsKey(node.getColumnFamily())) { - LOG.debug("SstFile node: {} is for columnFamily: {} while filter map " + - "contains columnFamilies: {}.", node.getFileName(), - node.getColumnFamily(), columnFamilyToPrefixMap.keySet()); - return true; - } - - String keyPrefix = columnFamilyToPrefixMap.get(node.getColumnFamily()); - return !RocksDiffUtils.isKeyWithPrefixPresent(keyPrefix, node.getStartKey(), - node.getEndKey()); + ConcurrentMap getInflightCompactions() { + return inflightCompactions; } } diff --git a/hadoop-hdds/rocksdb-checkpoint-differ/src/main/java/org/apache/ozone/rocksdiff/RocksDiffUtils.java b/hadoop-hdds/rocksdb-checkpoint-differ/src/main/java/org/apache/ozone/rocksdiff/RocksDiffUtils.java index e116868410f..6f044e165a0 100644 --- a/hadoop-hdds/rocksdb-checkpoint-differ/src/main/java/org/apache/ozone/rocksdiff/RocksDiffUtils.java +++ b/hadoop-hdds/rocksdb-checkpoint-differ/src/main/java/org/apache/ozone/rocksdiff/RocksDiffUtils.java @@ -17,22 +17,22 @@ */ package org.apache.ozone.rocksdiff; +import com.google.common.annotations.VisibleForTesting; +import org.apache.commons.collections.MapUtils; +import org.apache.commons.io.FilenameUtils; import org.apache.commons.lang3.StringUtils; -import org.apache.hadoop.hdds.utils.db.managed.ManagedOptions; -import org.apache.hadoop.hdds.utils.db.managed.ManagedReadOptions; -import org.apache.hadoop.hdds.utils.db.managed.ManagedSstFileReader; -import org.apache.hadoop.hdds.utils.db.managed.ManagedSstFileReaderIterator; -import org.rocksdb.TableProperties; -import org.rocksdb.RocksDBException; +import org.apache.hadoop.hdds.utils.db.managed.ManagedRocksDB; +import org.apache.ozone.compaction.log.CompactionFileInfo; +import org.rocksdb.LiveFileMetaData; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import java.io.IOException; +import java.util.Collections; +import java.util.HashMap; import java.util.Iterator; import java.util.Map; import java.util.Set; -import static java.nio.charset.StandardCharsets.UTF_8; import static org.apache.hadoop.ozone.OzoneConsts.OM_KEY_PREFIX; /** @@ -73,44 +73,68 @@ public static String constructBucketKey(String keyName) { } public static void filterRelevantSstFiles(Set inputFiles, - Map tableToPrefixMap) throws IOException { + Map tableToPrefixMap, + ManagedRocksDB... dbs) { + filterRelevantSstFiles(inputFiles, tableToPrefixMap, Collections.emptyMap(), dbs); + } + + /** + * Filter sst files based on prefixes. + */ + public static void filterRelevantSstFiles(Set inputFiles, + Map tableToPrefixMap, + Map preExistingCompactionNodes, + ManagedRocksDB... dbs) { + Map liveFileMetaDataMap = new HashMap<>(); + int dbIdx = 0; for (Iterator fileIterator = inputFiles.iterator(); fileIterator.hasNext();) { - String filepath = fileIterator.next(); - if (!RocksDiffUtils.doesSstFileContainKeyRange(filepath, - tableToPrefixMap)) { + String filename = FilenameUtils.getBaseName(fileIterator.next()); + while (!preExistingCompactionNodes.containsKey(filename) && !liveFileMetaDataMap.containsKey(filename) + && dbIdx < dbs.length) { + liveFileMetaDataMap.putAll(dbs[dbIdx].getLiveMetadataForSSTFiles()); + dbIdx += 1; + } + CompactionNode compactionNode = preExistingCompactionNodes.get(filename); + if (compactionNode == null) { + compactionNode = new CompactionNode(new CompactionFileInfo.Builder(filename) + .setValues(liveFileMetaDataMap.get(filename)).build()); + } + if (shouldSkipNode(compactionNode, tableToPrefixMap)) { fileIterator.remove(); } } } - public static boolean doesSstFileContainKeyRange(String filepath, - Map tableToPrefixMap) throws IOException { - - try ( - ManagedOptions options = new ManagedOptions(); - ManagedSstFileReader sstFileReader = new ManagedSstFileReader(options)) { - sstFileReader.open(filepath); - TableProperties properties = sstFileReader.getTableProperties(); - String tableName = new String(properties.getColumnFamilyName(), UTF_8); - if (tableToPrefixMap.containsKey(tableName)) { - String prefix = tableToPrefixMap.get(tableName); + @VisibleForTesting + static boolean shouldSkipNode(CompactionNode node, + Map columnFamilyToPrefixMap) { + // This is for backward compatibility. Before the compaction log table + // migration, startKey, endKey and columnFamily information is not persisted + // in compaction log files. + // Also for the scenario when there is an exception in reading SST files + // for the file node. + if (node.getStartKey() == null || node.getEndKey() == null || + node.getColumnFamily() == null) { + LOG.debug("Compaction node with fileName: {} doesn't have startKey, " + + "endKey and columnFamily details.", node.getFileName()); + return false; + } - try ( - ManagedReadOptions readOptions = new ManagedReadOptions(); - ManagedSstFileReaderIterator iterator = ManagedSstFileReaderIterator.managed( - sstFileReader.newIterator(readOptions))) { - iterator.get().seek(prefix.getBytes(UTF_8)); - String seekResultKey = new String(iterator.get().key(), UTF_8); - return seekResultKey.startsWith(prefix); - } - } + if (MapUtils.isEmpty(columnFamilyToPrefixMap)) { + LOG.debug("Provided columnFamilyToPrefixMap is null or empty."); return false; - } catch (RocksDBException e) { - LOG.error("Failed to read SST File ", e); - throw new IOException(e); } - } + if (!columnFamilyToPrefixMap.containsKey(node.getColumnFamily())) { + LOG.debug("SstFile node: {} is for columnFamily: {} while filter map " + + "contains columnFamilies: {}.", node.getFileName(), + node.getColumnFamily(), columnFamilyToPrefixMap.keySet()); + return true; + } + String keyPrefix = columnFamilyToPrefixMap.get(node.getColumnFamily()); + return !isKeyWithPrefixPresent(keyPrefix, node.getStartKey(), + node.getEndKey()); + } } diff --git a/hadoop-hdds/rocksdb-checkpoint-differ/src/test/java/org/apache/ozone/rocksdiff/TestRocksDBCheckpointDiffer.java b/hadoop-hdds/rocksdb-checkpoint-differ/src/test/java/org/apache/ozone/rocksdiff/TestRocksDBCheckpointDiffer.java index 0164e3a23bd..4f04abb8b5b 100644 --- a/hadoop-hdds/rocksdb-checkpoint-differ/src/test/java/org/apache/ozone/rocksdiff/TestRocksDBCheckpointDiffer.java +++ b/hadoop-hdds/rocksdb-checkpoint-differ/src/test/java/org/apache/ozone/rocksdiff/TestRocksDBCheckpointDiffer.java @@ -21,6 +21,7 @@ import static java.util.Arrays.asList; import static java.util.concurrent.TimeUnit.MINUTES; +import com.google.common.collect.ImmutableMap; import com.google.common.collect.ImmutableSet; import com.google.common.graph.GraphBuilder; import java.io.File; @@ -38,6 +39,7 @@ import java.util.HashSet; import java.util.List; import java.util.Map; +import java.util.Optional; import java.util.Set; import java.util.UUID; import java.util.concurrent.ConcurrentHashMap; @@ -48,10 +50,12 @@ import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeoutException; import java.util.function.Consumer; +import java.util.function.Function; import java.util.stream.Collectors; import java.util.stream.Stream; import com.google.common.graph.MutableGraph; +import org.apache.commons.io.FilenameUtils; import org.apache.commons.lang3.RandomStringUtils; import org.apache.hadoop.hdds.StringUtils; import org.apache.hadoop.hdds.conf.ConfigurationSource; @@ -67,14 +71,18 @@ import org.apache.hadoop.ozone.lock.BootstrapStateHandler; import org.apache.ozone.compaction.log.CompactionFileInfo; import org.apache.ozone.compaction.log.CompactionLogEntry; +import org.apache.ozone.rocksdb.util.RdbUtil; import org.apache.ozone.rocksdiff.RocksDBCheckpointDiffer.NodeComparator; import org.apache.ozone.test.GenericTestUtils; import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; import org.junit.jupiter.params.ParameterizedTest; import org.junit.jupiter.params.provider.Arguments; import org.junit.jupiter.params.provider.MethodSource; +import org.mockito.MockedStatic; +import org.mockito.Mockito; import org.rocksdb.ColumnFamilyDescriptor; import org.rocksdb.ColumnFamilyHandle; import org.rocksdb.LiveFileMetaData; @@ -101,6 +109,8 @@ import static org.junit.jupiter.api.Assertions.assertThrows; import static org.junit.jupiter.api.Assertions.assertTrue; import static org.junit.jupiter.api.Assertions.fail; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.anyString; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; @@ -231,6 +241,29 @@ public void cleanUp() { } } + private static List getPrunedCompactionEntries(boolean prune, Map metadata) { + List entries = new ArrayList<>(); + if (!prune) { + entries.add(createCompactionEntry(1, + now(), + Arrays.asList("1", "2"), + Arrays.asList("4", "5"), metadata)); + } + entries.addAll(Arrays.asList(createCompactionEntry(2, + now(), + Arrays.asList("4", "5"), + Collections.singletonList("10"), metadata), + createCompactionEntry(3, + now(), + Arrays.asList("3", "13", "14"), + Arrays.asList("6", "7"), metadata), + createCompactionEntry(4, + now(), + Arrays.asList("6", "7"), + Collections.singletonList("11"), metadata))); + return entries; + } + /** * Test cases for testGetSSTDiffListWithoutDB. */ @@ -306,13 +339,19 @@ private static Stream casesGetSSTDiffListWithoutDB() { ); DifferSnapshotInfo snapshotInfo1 = new DifferSnapshotInfo( - "/path/to/dbcp1", UUID.randomUUID(), 3008L, null, null); + "/path/to/dbcp1", UUID.randomUUID(), 3008L, null, Mockito.mock(ManagedRocksDB.class)); DifferSnapshotInfo snapshotInfo2 = new DifferSnapshotInfo( - "/path/to/dbcp2", UUID.randomUUID(), 14980L, null, null); + "/path/to/dbcp2", UUID.randomUUID(), 14980L, null, Mockito.mock(ManagedRocksDB.class)); DifferSnapshotInfo snapshotInfo3 = new DifferSnapshotInfo( - "/path/to/dbcp3", UUID.randomUUID(), 17975L, null, null); + "/path/to/dbcp3", UUID.randomUUID(), 17975L, null, Mockito.mock(ManagedRocksDB.class)); DifferSnapshotInfo snapshotInfo4 = new DifferSnapshotInfo( - "/path/to/dbcp4", UUID.randomUUID(), 18000L, null, null); + "/path/to/dbcp4", UUID.randomUUID(), 18000L, null, Mockito.mock(ManagedRocksDB.class)); + + Map prefixMap = ImmutableMap.of("col1", "c", "col2", "d"); + DifferSnapshotInfo snapshotInfo5 = new DifferSnapshotInfo( + "/path/to/dbcp2", UUID.randomUUID(), 0L, prefixMap, Mockito.mock(ManagedRocksDB.class)); + DifferSnapshotInfo snapshotInfo6 = new DifferSnapshotInfo( + "/path/to/dbcp2", UUID.randomUUID(), 100L, prefixMap, Mockito.mock(ManagedRocksDB.class)); Set snapshotSstFiles1 = ImmutableSet.of("000059", "000053"); Set snapshotSstFiles2 = ImmutableSet.of("000088", "000059", @@ -342,7 +381,9 @@ private static Stream casesGetSSTDiffListWithoutDB() { ImmutableSet.of("000059", "000053"), ImmutableSet.of("000066", "000105", "000080", "000087", "000073", "000095"), - false), + ImmutableSet.of("000066", "000105", "000080", "000087", "000073", + "000095"), + false, Collections.emptyMap()), Arguments.of("Test 2: Compaction log file crafted input: " + "One source ('to' snapshot) SST file is never compacted " + "(newly flushed)", @@ -354,7 +395,8 @@ private static Stream casesGetSSTDiffListWithoutDB() { snapshotSstFiles3, ImmutableSet.of("000088", "000105", "000059", "000053", "000095"), ImmutableSet.of("000108"), - false), + ImmutableSet.of("000108"), + false, Collections.emptyMap()), Arguments.of("Test 3: Compaction log file crafted input: " + "Same SST files found during SST expansion", compactionLog, @@ -365,7 +407,8 @@ private static Stream casesGetSSTDiffListWithoutDB() { snapshotSstFiles1Alt1, ImmutableSet.of("000066", "000059", "000053"), ImmutableSet.of("000080", "000087", "000073", "000095"), - false), + ImmutableSet.of("000080", "000087", "000073", "000095"), + false, Collections.emptyMap()), Arguments.of("Test 4: Compaction log file crafted input: " + "Skipping known processed SST.", compactionLog, @@ -376,7 +419,8 @@ private static Stream casesGetSSTDiffListWithoutDB() { snapshotSstFiles1Alt2, Collections.emptySet(), Collections.emptySet(), - true), + Collections.emptySet(), + true, Collections.emptyMap()), Arguments.of("Test 5: Compaction log file hit snapshot" + " generation early exit condition", compactionLog, @@ -387,7 +431,8 @@ private static Stream casesGetSSTDiffListWithoutDB() { snapshotSstFiles1, ImmutableSet.of("000059", "000053"), ImmutableSet.of("000066", "000080", "000087", "000073", "000062"), - false), + ImmutableSet.of("000066", "000080", "000087", "000073", "000062"), + false, Collections.emptyMap()), Arguments.of("Test 6: Compaction log table regular case. " + "Expands expandable SSTs in the initial diff.", null, @@ -399,7 +444,9 @@ private static Stream casesGetSSTDiffListWithoutDB() { ImmutableSet.of("000059", "000053"), ImmutableSet.of("000066", "000105", "000080", "000087", "000073", "000095"), - false), + ImmutableSet.of("000066", "000105", "000080", "000087", "000073", + "000095"), + false, Collections.emptyMap()), Arguments.of("Test 7: Compaction log table crafted input: " + "One source ('to' snapshot) SST file is never compacted " + "(newly flushed)", @@ -411,7 +458,8 @@ private static Stream casesGetSSTDiffListWithoutDB() { snapshotSstFiles3, ImmutableSet.of("000088", "000105", "000059", "000053", "000095"), ImmutableSet.of("000108"), - false), + ImmutableSet.of("000108"), + false, Collections.emptyMap()), Arguments.of("Test 8: Compaction log table crafted input: " + "Same SST files found during SST expansion", null, @@ -422,7 +470,8 @@ private static Stream casesGetSSTDiffListWithoutDB() { snapshotSstFiles1Alt1, ImmutableSet.of("000066", "000059", "000053"), ImmutableSet.of("000080", "000087", "000073", "000095"), - false), + ImmutableSet.of("000080", "000087", "000073", "000095"), + false, Collections.emptyMap()), Arguments.of("Test 9: Compaction log table crafted input: " + "Skipping known processed SST.", null, @@ -433,7 +482,8 @@ private static Stream casesGetSSTDiffListWithoutDB() { snapshotSstFiles1Alt2, Collections.emptySet(), Collections.emptySet(), - true), + Collections.emptySet(), + true, Collections.emptyMap()), Arguments.of("Test 10: Compaction log table hit snapshot " + "generation early exit condition", null, @@ -444,7 +494,64 @@ private static Stream casesGetSSTDiffListWithoutDB() { snapshotSstFiles1, ImmutableSet.of("000059", "000053"), ImmutableSet.of("000066", "000080", "000087", "000073", "000062"), - false) + ImmutableSet.of("000066", "000080", "000087", "000073", "000062"), + false, Collections.emptyMap()), + Arguments.of("Test 11: Older Compaction log got pruned and source snapshot delta files would be " + + "unreachable", + null, + getPrunedCompactionEntries(false, Collections.emptyMap()), + snapshotInfo6, + snapshotInfo5, + ImmutableSet.of("10", "11", "8", "9", "12"), + ImmutableSet.of("1", "3", "13", "14"), + ImmutableSet.of("1", "3", "13", "14"), + ImmutableSet.of("2", "8", "9", "12"), + ImmutableSet.of("2", "8", "9", "12"), + false, Collections.emptyMap()), + Arguments.of("Test 12: Older Compaction log got pruned and source snapshot delta files would be " + + "unreachable", + null, + getPrunedCompactionEntries(true, Collections.emptyMap()), + snapshotInfo6, + snapshotInfo5, + ImmutableSet.of("10", "11", "8", "9", "12"), + ImmutableSet.of("1", "3", "13", "14"), + ImmutableSet.of("3", "13", "14"), + ImmutableSet.of("4", "5", "8", "9", "12"), + null, + false, Collections.emptyMap()), + Arguments.of("Test 13: Compaction log to test filtering logic based on range and column family", + null, + getPrunedCompactionEntries(false, + new HashMap() {{ + put("1", new String[]{"a", "c", "col1"}); + put("3", new String[]{"a", "d", "col2"}); + put("13", new String[]{"a", "c", "col13"}); + put("14", new String[]{"a", "c", "col1"}); + put("2", new String[]{"a", "c", "col1"}); + put("4", new String[]{"a", "b", "col1"}); + put("5", new String[]{"b", "b", "col1"}); + put("10", new String[]{"a", "b", "col1"}); + put("8", new String[]{"a", "b", "col1"}); + put("6", new String[]{"a", "z", "col13"}); + put("7", new String[]{"a", "z", "col13"}); + }}), + snapshotInfo6, + snapshotInfo5, + ImmutableSet.of("10", "11", "8", "9", "12", "15"), + ImmutableSet.of("1", "3", "13", "14"), + ImmutableSet.of("1", "13", "3", "14"), + ImmutableSet.of("2", "8", "9", "12", "15"), + ImmutableSet.of("2", "9", "12"), + false, + ImmutableMap.of( + "2", new String[]{"a", "b", "col1"}, + "12", new String[]{"a", "d", "col2"}, + "8", new String[]{"a", "b", "col1"}, + "9", new String[]{"a", "c", "col1"}, + "15", new String[]{"a", "z", "col13"} + )) + ); } @@ -464,48 +571,94 @@ public void testGetSSTDiffListWithoutDB(String description, Set destSnapshotSstFiles, Set expectedSameSstFiles, Set expectedDiffSstFiles, - boolean expectingException) { - - boolean exceptionThrown = false; - - if (compactionLog != null) { - // Construct DAG from compaction log input - Arrays.stream(compactionLog.split("\n")).forEach( - rocksDBCheckpointDiffer::processCompactionLogLine); - } else if (compactionLogEntries != null) { - compactionLogEntries.forEach(entry -> - rocksDBCheckpointDiffer.addToCompactionLogTable(entry)); - } else { - throw new IllegalArgumentException("One of compactionLog and " + - "compactionLogEntries should be non-null."); - } - rocksDBCheckpointDiffer.loadAllCompactionLogs(); - - Set actualSameSstFiles = new HashSet<>(); - Set actualDiffSstFiles = new HashSet<>(); - - try { - rocksDBCheckpointDiffer.internalGetSSTDiffList( - srcSnapshot, - destSnapshot, - srcSnapshotSstFiles, - destSnapshotSstFiles, - actualSameSstFiles, - actualDiffSstFiles); - } catch (RuntimeException rtEx) { - if (!expectingException) { - fail("Unexpected exception thrown in test."); + Set expectedSSTDiffFiles, + boolean expectingException, + Map metaDataMap) { + try (MockedStatic mockedRocksdiffUtil = Mockito.mockStatic(RocksDiffUtils.class, + Mockito.CALLS_REAL_METHODS)) { + mockedRocksdiffUtil.when(() -> RocksDiffUtils.constructBucketKey(anyString())).thenAnswer(i -> i.getArgument(0)); + boolean exceptionThrown = false; + if (compactionLog != null) { + // Construct DAG from compaction log input + Arrays.stream(compactionLog.split("\n")).forEach( + rocksDBCheckpointDiffer::processCompactionLogLine); + } else if (compactionLogEntries != null) { + compactionLogEntries.forEach(entry -> + rocksDBCheckpointDiffer.addToCompactionLogTable(entry)); } else { - exceptionThrown = true; + throw new IllegalArgumentException("One of compactionLog and " + + "compactionLogEntries should be non-null."); + } + rocksDBCheckpointDiffer.loadAllCompactionLogs(); + + Set actualSameSstFiles = new HashSet<>(); + Set actualDiffSstFiles = new HashSet<>(); + + try { + rocksDBCheckpointDiffer.internalGetSSTDiffList( + srcSnapshot, + destSnapshot, + srcSnapshotSstFiles, + destSnapshotSstFiles, + actualSameSstFiles, + actualDiffSstFiles); + } catch (RuntimeException rtEx) { + if (!expectingException) { + fail("Unexpected exception thrown in test."); + } else { + exceptionThrown = true; + } } - } - // Check same and different SST files result - assertEquals(expectedSameSstFiles, actualSameSstFiles); - assertEquals(expectedDiffSstFiles, actualDiffSstFiles); + if (expectingException && !exceptionThrown) { + fail("Expecting exception but none thrown."); + } - if (expectingException && !exceptionThrown) { - fail("Expecting exception but none thrown."); + // Check same and different SST files result + assertEquals(expectedSameSstFiles, actualSameSstFiles); + assertEquals(expectedDiffSstFiles, actualDiffSstFiles); + try (MockedStatic mockedHandler = Mockito.mockStatic(RdbUtil.class, Mockito.CALLS_REAL_METHODS)) { + RocksDB rocksDB = Mockito.mock(RocksDB.class); + Mockito.when(rocksDB.getName()).thenReturn("dummy"); + Mockito.when(srcSnapshot.getRocksDB().get()).thenReturn(rocksDB); + Mockito.when(destSnapshot.getRocksDB().get()).thenReturn(rocksDB); + Mockito.when(srcSnapshot.getRocksDB().getLiveMetadataForSSTFiles()) + .thenAnswer(invocation -> srcSnapshotSstFiles.stream().filter(metaDataMap::containsKey).map(file -> { + LiveFileMetaData liveFileMetaData = Mockito.mock(LiveFileMetaData.class); + String[] metaData = metaDataMap.get(file); + Mockito.when(liveFileMetaData.fileName()).thenReturn("/" + file + SST_FILE_EXTENSION); + Mockito.when(liveFileMetaData.smallestKey()).thenReturn(metaData[0].getBytes(UTF_8)); + Mockito.when(liveFileMetaData.largestKey()).thenReturn(metaData[1].getBytes(UTF_8)); + Mockito.when(liveFileMetaData.columnFamilyName()).thenReturn(metaData[2].getBytes(UTF_8)); + return liveFileMetaData; + }).collect(Collectors.toMap(liveFileMetaData -> FilenameUtils.getBaseName(liveFileMetaData.fileName()), + Function.identity()))); + mockedHandler.when(() -> RdbUtil.getLiveSSTFilesForCFs(any(), any())) + .thenAnswer(i -> { + Set sstFiles = i.getArgument(0).equals(srcSnapshot.getRocksDB()) ? srcSnapshotSstFiles + : destSnapshotSstFiles; + return sstFiles.stream().map(fileName -> { + LiveFileMetaData liveFileMetaData = Mockito.mock(LiveFileMetaData.class); + Mockito.when(liveFileMetaData.fileName()).thenReturn("/" + fileName + SST_FILE_EXTENSION); + return liveFileMetaData; + }).collect(Collectors.toList()); + }); + try { + Assertions.assertEquals(Optional.ofNullable(expectedSSTDiffFiles) + .map(files -> files.stream().sorted().collect(Collectors.toList())).orElse(null), + rocksDBCheckpointDiffer.getSSTDiffList(srcSnapshot, destSnapshot) + .map(i -> i.stream().sorted().collect(Collectors.toList())).orElse(null)); + } catch (RuntimeException rtEx) { + if (!expectingException) { + fail("Unexpected exception thrown in test."); + } else { + exceptionThrown = true; + } + } + } + if (expectingException && !exceptionThrown) { + fail("Expecting exception but none thrown."); + } } } @@ -539,7 +692,12 @@ void testDifferWithDB() throws Exception { "000017.sst", "000019.sst", "000021.sst", "000023.sst", "000024.sst", "000026.sst", "000029.sst")); } - + rocksDBCheckpointDiffer.getForwardCompactionDAG().nodes().stream().forEach(compactionNode -> { + Assertions.assertNotNull(compactionNode.getStartKey()); + Assertions.assertNotNull(compactionNode.getEndKey()); + }); + GenericTestUtils.waitFor(() -> rocksDBCheckpointDiffer.getInflightCompactions().isEmpty(), 1000, + 10000); if (LOG.isDebugEnabled()) { rocksDBCheckpointDiffer.dumpCompactionNodeTable(); } @@ -587,7 +745,7 @@ void diffAllSnapshots(RocksDBCheckpointDiffer differ) int index = 0; for (DifferSnapshotInfo snap : snapshots) { // Returns a list of SST files to be fed into RocksDiff - List sstDiffList = differ.getSSTDiffList(src, snap); + List sstDiffList = differ.getSSTDiffList(src, snap).orElse(Collections.emptyList()); LOG.info("SST diff list from '{}' to '{}': {}", src.getDbPath(), snap.getDbPath(), sstDiffList); @@ -1452,19 +1610,30 @@ private static Stream sstFilePruningScenarios() { ); } - private static CompactionLogEntry createCompactionEntry( - long dbSequenceNumber, - long compactionTime, - List inputFiles, - List outputFiles - ) { + private static CompactionLogEntry createCompactionEntry(long dbSequenceNumber, + long compactionTime, + List inputFiles, + List outputFiles) { + return createCompactionEntry(dbSequenceNumber, compactionTime, inputFiles, outputFiles, Collections.emptyMap()); + } + + private static CompactionLogEntry createCompactionEntry(long dbSequenceNumber, + long compactionTime, + List inputFiles, + List outputFiles, + Map metadata) { return new CompactionLogEntry.Builder(dbSequenceNumber, compactionTime, - toFileInfoList(inputFiles), toFileInfoList(outputFiles)).build(); + toFileInfoList(inputFiles, metadata), toFileInfoList(outputFiles, metadata)).build(); } - private static List toFileInfoList(List files) { + private static List toFileInfoList(List files, + Map metadata) { return files.stream() - .map(fileName -> new CompactionFileInfo.Builder(fileName).build()) + .map(fileName -> new CompactionFileInfo.Builder(fileName) + .setStartRange(Optional.ofNullable(metadata.get(fileName)).map(meta -> meta[0]).orElse(null)) + .setEndRange(Optional.ofNullable(metadata.get(fileName)).map(meta -> meta[1]).orElse(null)) + .setColumnFamily(Optional.ofNullable(metadata.get(fileName)).map(meta -> meta[2]).orElse(null)) + .build()) .collect(Collectors.toList()); } @@ -1794,7 +1963,7 @@ public void testShouldSkipNode(Map columnFamilyToPrefixMap, .getCompactionNodeMap().values().stream() .sorted(Comparator.comparing(CompactionNode::getFileName)) .map(node -> - rocksDBCheckpointDiffer.shouldSkipNode(node, + RocksDiffUtils.shouldSkipNode(node, columnFamilyToPrefixMap)) .collect(Collectors.toList()); @@ -1831,7 +2000,7 @@ public void testShouldSkipNodeEdgeCase( rocksDBCheckpointDiffer.loadAllCompactionLogs(); - assertEquals(expectedResponse, rocksDBCheckpointDiffer.shouldSkipNode(node, + assertEquals(expectedResponse, RocksDiffUtils.shouldSkipNode(node, columnFamilyToPrefixMap)); } diff --git a/hadoop-hdds/rocksdb-checkpoint-differ/src/test/java/org/apache/ozone/rocksdiff/TestRocksDiffUtils.java b/hadoop-hdds/rocksdb-checkpoint-differ/src/test/java/org/apache/ozone/rocksdiff/TestRocksDiffUtils.java index 67233676f0b..ef92aa2c17c 100644 --- a/hadoop-hdds/rocksdb-checkpoint-differ/src/test/java/org/apache/ozone/rocksdiff/TestRocksDiffUtils.java +++ b/hadoop-hdds/rocksdb-checkpoint-differ/src/test/java/org/apache/ozone/rocksdiff/TestRocksDiffUtils.java @@ -18,10 +18,32 @@ package org.apache.ozone.rocksdiff; +import com.google.common.collect.ImmutableMap; +import org.apache.hadoop.hdds.utils.db.managed.ManagedRocksDB; +import org.assertj.core.util.Sets; +import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.Test; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.Arguments; +import org.junit.jupiter.params.provider.MethodSource; +import org.mockito.MockedStatic; +import org.mockito.Mockito; +import org.rocksdb.LiveFileMetaData; +import org.rocksdb.RocksDB; + +import java.nio.charset.StandardCharsets; +import java.util.Arrays; +import java.util.Collections; +import java.util.List; +import java.util.Set; +import java.util.stream.Collectors; +import java.util.stream.IntStream; +import java.util.stream.Stream; import static org.junit.jupiter.api.Assertions.assertFalse; import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.junit.jupiter.params.provider.Arguments.arguments; +import static org.mockito.ArgumentMatchers.anyString; /** * Class to test RocksDiffUtils. @@ -54,4 +76,103 @@ public void testFilterFunction() { "/volume/bucket/key-1", "/volume/bucket2/key-97")); } + + public static Stream values() { + return Stream.of( + arguments("validColumnFamily", "invalidColumnFamily", "a", "d", "b", "f"), + arguments("validColumnFamily", "invalidColumnFamily", "a", "d", "e", "f"), + arguments("validColumnFamily", "invalidColumnFamily", "a", "d", "a", "f"), + arguments("validColumnFamily", "validColumnFamily", "a", "d", "e", "g"), + arguments("validColumnFamily", "validColumnFamily", "e", "g", "a", "d"), + arguments("validColumnFamily", "validColumnFamily", "b", "b", "e", "g"), + arguments("validColumnFamily", "validColumnFamily", "a", "d", "e", "e") + ); + } + + @ParameterizedTest + @MethodSource("values") + public void testFilterRelevantSstFilesWithPreExistingCompactionInfo(String validSSTColumnFamilyName, + String invalidColumnFamilyName, + String validSSTFileStartRange, + String validSSTFileEndRange, + String invalidSSTFileStartRange, + String invalidSSTFileEndRange) { + try (MockedStatic mockedHandler = Mockito.mockStatic(RocksDiffUtils.class, + Mockito.CALLS_REAL_METHODS)) { + mockedHandler.when(() -> RocksDiffUtils.constructBucketKey(anyString())).thenAnswer(i -> i.getArgument(0)); + String validSstFile = "filePath/validSSTFile.sst"; + String invalidSstFile = "filePath/invalidSSTFile.sst"; + String untrackedSstFile = "filePath/untrackedSSTFile.sst"; + String expectedPrefix = String.valueOf((char)(((int)validSSTFileEndRange.charAt(0) + + validSSTFileStartRange.charAt(0)) / 2)); + Set sstFile = Sets.newTreeSet(validSstFile, invalidSstFile, untrackedSstFile); + RocksDiffUtils.filterRelevantSstFiles(sstFile, ImmutableMap.of(validSSTColumnFamilyName, expectedPrefix), + ImmutableMap.of("validSSTFile", new CompactionNode(validSstFile, 0, 0, validSSTFileStartRange, + validSSTFileEndRange, validSSTColumnFamilyName), "invalidSSTFile", + new CompactionNode(invalidSstFile, 0, 0, invalidSSTFileStartRange, + invalidSSTFileEndRange, invalidColumnFamilyName))); + Assertions.assertEquals(Sets.newTreeSet(validSstFile, untrackedSstFile), sstFile); + } + + } + + private LiveFileMetaData getMockedLiveFileMetadata(String columnFamilyName, String startRange, + String endRange, + String name) { + LiveFileMetaData liveFileMetaData = Mockito.mock(LiveFileMetaData.class); + Mockito.when(liveFileMetaData.largestKey()).thenReturn(endRange.getBytes(StandardCharsets.UTF_8)); + Mockito.when(liveFileMetaData.columnFamilyName()).thenReturn(columnFamilyName.getBytes(StandardCharsets.UTF_8)); + Mockito.when(liveFileMetaData.smallestKey()).thenReturn(startRange.getBytes(StandardCharsets.UTF_8)); + Mockito.when(liveFileMetaData.fileName()).thenReturn("basePath/" + name + ".sst"); + return liveFileMetaData; + } + + @ParameterizedTest + @MethodSource("values") + public void testFilterRelevantSstFilesFromDB(String validSSTColumnFamilyName, + String invalidColumnFamilyName, + String validSSTFileStartRange, + String validSSTFileEndRange, + String invalidSSTFileStartRange, + String invalidSSTFileEndRange) { + try (MockedStatic mockedHandler = Mockito.mockStatic(RocksDiffUtils.class, + Mockito.CALLS_REAL_METHODS)) { + mockedHandler.when(() -> RocksDiffUtils.constructBucketKey(anyString())).thenAnswer(i -> i.getArgument(0)); + for (int numberOfDBs = 1; numberOfDBs < 10; numberOfDBs++) { + String validSstFile = "filePath/validSSTFile.sst"; + String invalidSstFile = "filePath/invalidSSTFile.sst"; + String untrackedSstFile = "filePath/untrackedSSTFile.sst"; + int expectedDBKeyIndex = numberOfDBs / 2; + ManagedRocksDB[] rocksDBs = + IntStream.range(0, numberOfDBs).mapToObj(i -> Mockito.mock(ManagedRocksDB.class)) + .collect(Collectors.toList()).toArray(new ManagedRocksDB[numberOfDBs]); + for (int i = 0; i < numberOfDBs; i++) { + ManagedRocksDB managedRocksDB = rocksDBs[i]; + RocksDB mockedRocksDB = Mockito.mock(RocksDB.class); + Mockito.when(managedRocksDB.get()).thenReturn(mockedRocksDB); + if (i == expectedDBKeyIndex) { + LiveFileMetaData validLiveFileMetaData = getMockedLiveFileMetadata(validSSTColumnFamilyName, + validSSTFileStartRange, validSSTFileEndRange, "validSSTFile"); + LiveFileMetaData invalidLiveFileMetaData = getMockedLiveFileMetadata(invalidColumnFamilyName, + invalidSSTFileStartRange, invalidSSTFileEndRange, "invalidSSTFile"); + List liveFileMetaDatas = Arrays.asList(validLiveFileMetaData, invalidLiveFileMetaData); + Mockito.when(mockedRocksDB.getLiveFilesMetaData()).thenReturn(liveFileMetaDatas); + } else { + Mockito.when(mockedRocksDB.getLiveFilesMetaData()).thenReturn(Collections.emptyList()); + } + Mockito.when(managedRocksDB.getLiveMetadataForSSTFiles()) + .thenAnswer(invocation -> ManagedRocksDB.getLiveMetadataForSSTFiles(mockedRocksDB)); + } + + String expectedPrefix = String.valueOf((char)(((int)validSSTFileEndRange.charAt(0) + + validSSTFileStartRange.charAt(0)) / 2)); + Set sstFile = Sets.newTreeSet(validSstFile, invalidSstFile, untrackedSstFile); + RocksDiffUtils.filterRelevantSstFiles(sstFile, ImmutableMap.of(validSSTColumnFamilyName, expectedPrefix), + Collections.emptyMap(), rocksDBs); + Assertions.assertEquals(Sets.newTreeSet(validSstFile, untrackedSstFile), sstFile); + } + + } + + } } diff --git a/hadoop-hdds/server-scm/pom.xml b/hadoop-hdds/server-scm/pom.xml index 32408e8904b..4c2e40c3759 100644 --- a/hadoop-hdds/server-scm/pom.xml +++ b/hadoop-hdds/server-scm/pom.xml @@ -20,15 +20,16 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> org.apache.ozone hdds - 1.5.0-SNAPSHOT + 2.0.0-SNAPSHOT hdds-server-scm - 1.5.0-SNAPSHOT + 2.0.0-SNAPSHOT Apache Ozone Distributed Data Store Storage Container Manager Server Apache Ozone HDDS SCM Server jar + false diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DatanodeDeletedBlockTransactions.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DatanodeDeletedBlockTransactions.java index e485fcc98d9..99fd9c7b431 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DatanodeDeletedBlockTransactions.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DatanodeDeletedBlockTransactions.java @@ -47,7 +47,8 @@ void addTransactionToDN(UUID dnID, DeletedBlocksTransaction tx) { blocksDeleted += tx.getLocalIDCount(); if (SCMBlockDeletingService.LOG.isDebugEnabled()) { SCMBlockDeletingService.LOG - .debug("Transaction added: {} <- TX({})", dnID, tx.getTxID()); + .debug("Transaction added: {} <- TX({}), DN {} <- blocksDeleted Add {}.", + dnID, tx.getTxID(), dnID, tx.getLocalIDCount()); } } diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DeletedBlockLogImpl.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DeletedBlockLogImpl.java index 9d5377b9e3e..45d6a024938 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DeletedBlockLogImpl.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DeletedBlockLogImpl.java @@ -35,7 +35,6 @@ import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.CommandStatus; import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerBlocksDeletionACKProto; -import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerBlocksDeletionACKProto.DeleteBlockTransactionResult; import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.DeletedBlocksTransaction; import org.apache.hadoop.hdds.scm.command.CommandStatusReportHandler.DeleteBlockStatus; import org.apache.hadoop.hdds.scm.container.ContainerInfo; @@ -138,6 +137,7 @@ public List getFailedTransactions(int count, } } } else { + iter.seek(startTxId); while (iter.hasNext() && failedTXs.size() < count) { DeletedBlocksTransaction delTX = iter.next().getValue(); if (delTX.getCount() == -1 && delTX.getTxID() >= startTxId) { @@ -200,20 +200,6 @@ private DeletedBlocksTransaction constructNewTransaction( .build(); } - private boolean isTransactionFailed(DeleteBlockTransactionResult result) { - if (LOG.isDebugEnabled()) { - LOG.debug( - "Got block deletion ACK from datanode, TXIDs={}, " + "success={}", - result.getTxID(), result.getSuccess()); - } - if (!result.getSuccess()) { - LOG.warn("Got failed ACK for TXID={}, prepare to resend the " - + "TX in next interval", result.getTxID()); - return true; - } - return false; - } - @Override public int getNumOfValidTransactions() throws IOException { lock.lock(); @@ -300,26 +286,46 @@ private void getTransaction(DeletedBlocksTransaction tx, .setCount(transactionStatusManager.getOrDefaultRetryCount( tx.getTxID(), 0)) .build(); + for (ContainerReplica replica : replicas) { DatanodeDetails details = replica.getDatanodeDetails(); - if (!dnList.contains(details)) { - continue; - } if (!transactionStatusManager.isDuplication( details, updatedTxn.getTxID(), commandStatus)) { transactions.addTransactionToDN(details.getUuid(), updatedTxn); + metrics.incrProcessedTransaction(); } } } private Boolean checkInadequateReplica(Set replicas, - DeletedBlocksTransaction txn) throws ContainerNotFoundException { + DeletedBlocksTransaction txn, + Set dnList) throws ContainerNotFoundException { ContainerInfo containerInfo = containerManager .getContainer(ContainerID.valueOf(txn.getContainerID())); ReplicationManager replicationManager = scmContext.getScm().getReplicationManager(); ContainerHealthResult result = replicationManager .getContainerReplicationHealth(containerInfo, replicas); + + // We have made an improvement here, and we expect that all replicas + // of the Container being sent will be included in the dnList. + // This change benefits ACK confirmation and improves deletion speed. + // The principle behind it is that + // DN can receive the command to delete a certain Container at the same time and provide + // feedback to SCM at roughly the same time. + // This avoids the issue of deletion blocking, + // where some replicas of a Container are deleted while others do not receive the delete command. + long containerId = txn.getContainerID(); + for (ContainerReplica replica : replicas) { + DatanodeDetails datanodeDetails = replica.getDatanodeDetails(); + if (!dnList.contains(datanodeDetails)) { + DatanodeDetails dnDetail = replica.getDatanodeDetails(); + LOG.debug("Skip Container = {}, because DN = {} is not in dnList.", + containerId, dnDetail.getUuid()); + return true; + } + } + return result.getHealthState() != ContainerHealthResult.HealthState.HEALTHY; } @@ -345,6 +351,7 @@ public DatanodeDeletedBlockTransactions getTransactions( .getCommandStatusByTxId(dnList.stream(). map(DatanodeDetails::getUuid).collect(Collectors.toSet())); ArrayList txIDs = new ArrayList<>(); + metrics.setNumBlockDeletionTransactionDataNodes(dnList.size()); // Here takes block replica count as the threshold to avoid the case // that part of replicas committed the TXN and recorded in the // SCMDeletedBlockTransactionStatusManager, while they are counted @@ -358,23 +365,25 @@ public DatanodeDeletedBlockTransactions getTransactions( // HDDS-7126. When container is under replicated, it is possible // that container is deleted, but transactions are not deleted. if (containerManager.getContainer(id).isDeleted()) { - LOG.warn("Container: " + id + " was deleted for the " + - "transaction: " + txn); + LOG.warn("Container: {} was deleted for the " + + "transaction: {}.", id, txn); txIDs.add(txn.getTxID()); } else if (txn.getCount() > -1 && txn.getCount() <= maxRetry && !containerManager.getContainer(id).isOpen()) { Set replicas = containerManager .getContainerReplicas( ContainerID.valueOf(txn.getContainerID())); - if (checkInadequateReplica(replicas, txn)) { + if (checkInadequateReplica(replicas, txn, dnList)) { + metrics.incrSkippedTransaction(); continue; } getTransaction( txn, transactions, dnList, replicas, commandStatus); + } else if (txn.getCount() >= maxRetry || containerManager.getContainer(id).isOpen()) { + metrics.incrSkippedTransaction(); } } catch (ContainerNotFoundException ex) { - LOG.warn("Container: " + id + " was not found for the transaction: " - + txn); + LOG.warn("Container: {} was not found for the transaction: {}.", id, txn); txIDs.add(txn.getTxID()); } } diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DeletedBlockLogStateManagerImpl.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DeletedBlockLogStateManagerImpl.java index ceeb2786135..6e6440c324b 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DeletedBlockLogStateManagerImpl.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DeletedBlockLogStateManagerImpl.java @@ -144,7 +144,9 @@ public void seekToLast() { @Override public TypedTable.KeyValue seek( Long key) throws IOException { - throw new UnsupportedOperationException("seek"); + iter.seek(key); + findNext(); + return nextTx; } @Override diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/SCMBlockDeletingService.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/SCMBlockDeletingService.java index 7271d9dcba6..e6fc45cb5ee 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/SCMBlockDeletingService.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/SCMBlockDeletingService.java @@ -203,9 +203,10 @@ public EmptyTaskResult call() throws Exception { } } LOG.info("Totally added {} blocks to be deleted for" - + " {} datanodes, task elapsed time: {}ms", + + " {} datanodes / {} totalnodes, task elapsed time: {}ms", transactions.getBlocksDeleted(), transactions.getDatanodeTransactionMap().size(), + included.size(), Time.monotonicNow() - startTime); deletedBlockLog.incrementCount(new ArrayList<>(processedTxIDs)); } catch (NotLeaderException nle) { diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/ScmBlockDeletingServiceMetrics.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/ScmBlockDeletingServiceMetrics.java index 2cadca1d92a..6637bd18329 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/ScmBlockDeletingServiceMetrics.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/ScmBlockDeletingServiceMetrics.java @@ -24,6 +24,7 @@ import org.apache.hadoop.metrics2.annotation.Metrics; import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem; import org.apache.hadoop.metrics2.lib.MutableCounterLong; +import org.apache.hadoop.metrics2.lib.MutableGaugeLong; /** * Metrics related to Block Deleting Service running in SCM. @@ -76,6 +77,15 @@ public final class ScmBlockDeletingServiceMetrics { @Metric(about = "The number of created txs which are added into DB.") private MutableCounterLong numBlockDeletionTransactionCreated; + @Metric(about = "The number of skipped transactions") + private MutableCounterLong numSkippedTransactions; + + @Metric(about = "The number of processed transactions") + private MutableCounterLong numProcessedTransactions; + + @Metric(about = "The number of dataNodes of delete transactions.") + private MutableGaugeLong numBlockDeletionTransactionDataNodes; + private ScmBlockDeletingServiceMetrics() { } @@ -130,6 +140,18 @@ public void incrBlockDeletionTransactionCreated(long count) { this.numBlockDeletionTransactionCreated.incr(count); } + public void incrSkippedTransaction() { + this.numSkippedTransactions.incr(); + } + + public void incrProcessedTransaction() { + this.numProcessedTransactions.incr(); + } + + public void setNumBlockDeletionTransactionDataNodes(long dataNodes) { + this.numBlockDeletionTransactionDataNodes.set(dataNodes); + } + public long getNumBlockDeletionCommandSent() { return numBlockDeletionCommandSent.value(); } @@ -162,6 +184,18 @@ public long getNumBlockDeletionTransactionCreated() { return numBlockDeletionTransactionCreated.value(); } + public long getNumSkippedTransactions() { + return numSkippedTransactions.value(); + } + + public long getNumProcessedTransactions() { + return numProcessedTransactions.value(); + } + + public long getNumBlockDeletionTransactionDataNodes() { + return numBlockDeletionTransactionDataNodes.value(); + } + @Override public String toString() { StringBuffer buffer = new StringBuffer(); diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerManagerImpl.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerManagerImpl.java index 00aee0f62c2..d61f9ee366b 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerManagerImpl.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerManagerImpl.java @@ -86,6 +86,8 @@ public class ContainerManagerImpl implements ContainerManager { @SuppressWarnings("java:S2245") // no need for secure random private final Random random = new Random(); + private int maxCountOfContainerList; + /** * */ @@ -115,6 +117,10 @@ public ContainerManagerImpl( .getInt(ScmConfigKeys.OZONE_SCM_PIPELINE_OWNER_CONTAINER_COUNT, ScmConfigKeys.OZONE_SCM_PIPELINE_OWNER_CONTAINER_COUNT_DEFAULT); + this.maxCountOfContainerList = conf + .getInt(ScmConfigKeys.OZONE_SCM_CONTAINER_LIST_MAX_COUNT, + ScmConfigKeys.OZONE_SCM_CONTAINER_LIST_MAX_COUNT_DEFAULT); + this.scmContainerManagerMetrics = SCMContainerManagerMetrics.create(); } diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerReportHandler.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerReportHandler.java index daadcd824ec..36a51c4e3ca 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerReportHandler.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerReportHandler.java @@ -26,11 +26,13 @@ .StorageContainerDatanodeProtocolProtos.ContainerReportsProto; import org.apache.hadoop.hdds.scm.ScmConfig; import org.apache.hadoop.hdds.scm.container.report.ContainerReportValidator; +import org.apache.hadoop.hdds.scm.events.SCMEvents; import org.apache.hadoop.hdds.scm.ha.SCMContext; import org.apache.hadoop.hdds.scm.node.NodeManager; import org.apache.hadoop.hdds.scm.node.states.NodeNotFoundException; import org.apache.hadoop.hdds.scm.server.SCMDatanodeHeartbeatDispatcher .ContainerReportFromDatanode; +import org.apache.hadoop.hdds.scm.server.SCMDatanodeProtocolServer; import org.apache.hadoop.hdds.server.events.EventHandler; import org.apache.hadoop.hdds.server.events.EventPublisher; import org.apache.hadoop.ozone.common.statemachine.InvalidStateTransitionException; @@ -199,6 +201,11 @@ public void onMessage(final ContainerReportFromDatanode reportFromDatanode, // list processMissingReplicas(datanodeDetails, expectedContainersInDatanode); containerManager.notifyContainerReportProcessing(true, true); + if (reportFromDatanode.isRegister()) { + publisher.fireEvent(SCMEvents.CONTAINER_REGISTRATION_REPORT, + new SCMDatanodeProtocolServer.NodeRegistrationContainerReport(datanodeDetails, + reportFromDatanode.getReport())); + } } } catch (NodeNotFoundException ex) { containerManager.notifyContainerReportProcessing(true, false); diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/AbstractFindTargetGreedy.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/AbstractFindTargetGreedy.java index dd2d1c57894..df45ffd9b62 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/AbstractFindTargetGreedy.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/AbstractFindTargetGreedy.java @@ -32,11 +32,11 @@ import org.slf4j.Logger; import java.util.Collection; -import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Set; import java.util.UUID; +import java.util.concurrent.ConcurrentHashMap; import java.util.stream.Collectors; /** @@ -56,7 +56,7 @@ protected AbstractFindTargetGreedy( ContainerManager containerManager, PlacementPolicyValidateProxy placementPolicyValidateProxy, NodeManager nodeManager) { - sizeEnteringNode = new HashMap<>(); + sizeEnteringNode = new ConcurrentHashMap<>(); this.containerManager = containerManager; this.placementPolicyValidateProxy = placementPolicyValidateProxy; this.nodeManager = nodeManager; @@ -283,4 +283,9 @@ NodeManager getNodeManager() { public Map getSizeEnteringNodes() { return sizeEnteringNode; } + + @Override + public void clearSizeEnteringNodes() { + sizeEnteringNode.clear(); + } } diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/ContainerBalancer.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/ContainerBalancer.java index 3dddd67bd8a..2f6b8a7f814 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/ContainerBalancer.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/ContainerBalancer.java @@ -183,17 +183,19 @@ public ContainerBalancerTask.Status getBalancerStatus() { * @return balancer status info if balancer started */ public ContainerBalancerStatusInfo getBalancerStatusInfo() throws IOException { - if (isBalancerRunning()) { - ContainerBalancerConfigurationProto configProto = readConfiguration(ContainerBalancerConfigurationProto.class); - return new ContainerBalancerStatusInfo( - this.startedAt, - configProto, - task.getCurrentIterationsStatistic() - ); - } else { + lock.lock(); + try { + if (isBalancerRunning()) { + return new ContainerBalancerStatusInfo( + this.startedAt, + config.toProtobufBuilder().setShouldRun(true).build(), + task.getCurrentIterationsStatistic() + ); + } return null; + } finally { + lock.unlock(); } - } /** * Checks if ContainerBalancer is in valid state to call stop. diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/ContainerBalancerMetrics.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/ContainerBalancerMetrics.java index 6446089db35..3e164cb0bba 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/ContainerBalancerMetrics.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/ContainerBalancerMetrics.java @@ -40,6 +40,9 @@ public final class ContainerBalancerMetrics { " in the latest iteration.") private MutableCounterLong dataSizeMovedGBInLatestIteration; + @Metric(about = "Amount of bytes that the Container Balancer moved in the latest iteration.") + private MutableCounterLong dataSizeMovedBytesInLatestIteration; + @Metric(about = "Number of completed container moves performed by " + "Container Balancer in the latest iteration.") private MutableCounterLong numContainerMovesCompletedInLatestIteration; @@ -131,14 +134,16 @@ void incrementNumContainerMovesScheduledInLatestIteration(long valueToAdd) { this.numContainerMovesScheduledInLatestIteration.incr(valueToAdd); } + /** + * Reset the number of containers scheduled to move in the last iteration. + */ public void resetNumContainerMovesScheduledInLatestIteration() { numContainerMovesScheduledInLatestIteration.incr( -getNumContainerMovesScheduledInLatestIteration()); } /** - * Gets the amount of data moved by Container Balancer in the latest - * iteration. + * Retrieves the amount of data moved by the Container Balancer in the latest iteration. * @return size in GB */ public long getDataSizeMovedGBInLatestIteration() { @@ -154,6 +159,29 @@ public void resetDataSizeMovedGBInLatestIteration() { -getDataSizeMovedGBInLatestIteration()); } + /** + * Retrieves the amount of data moved by the Container Balancer in the latest iteration. + * @return size in bytes + */ + public long getDataSizeMovedInLatestIteration() { + return dataSizeMovedBytesInLatestIteration.value(); + } + + /** + * Increment the amount of data moved in the last iteration. + * @param bytes bytes to add + */ + public void incrementDataSizeMovedInLatestIteration(long bytes) { + this.dataSizeMovedBytesInLatestIteration.incr(bytes); + } + + /** + * Reset the amount of data moved in the last iteration. + */ + public void resetDataSizeMovedInLatestIteration() { + dataSizeMovedBytesInLatestIteration.incr(-getDataSizeMovedInLatestIteration()); + } + /** * Gets the number of container moves performed by Container Balancer in the * latest iteration. @@ -163,11 +191,6 @@ public long getNumContainerMovesCompletedInLatestIteration() { return numContainerMovesCompletedInLatestIteration.value(); } - public void incrementNumContainerMovesCompletedInLatestIteration( - long valueToAdd) { - this.numContainerMovesCompletedInLatestIteration.incr(valueToAdd); - } - public void incrementCurrentIterationContainerMoveMetric( MoveManager.MoveResult result, long valueToAdd) { if (result == null) { @@ -204,9 +227,11 @@ public void incrementCurrentIterationContainerMoveMetric( } } + /** + * Reset the number of containers moved in the last iteration. + */ public void resetNumContainerMovesCompletedInLatestIteration() { - numContainerMovesCompletedInLatestIteration.incr( - -getNumContainerMovesCompletedInLatestIteration()); + numContainerMovesCompletedInLatestIteration.incr(-getNumContainerMovesCompletedInLatestIteration()); } /** @@ -218,14 +243,19 @@ public long getNumContainerMovesTimeoutInLatestIteration() { return numContainerMovesTimeoutInLatestIteration.value(); } + /** + * Increases the number of timeout container moves in the latest iteration. + */ public void incrementNumContainerMovesTimeoutInLatestIteration( long valueToAdd) { this.numContainerMovesTimeoutInLatestIteration.incr(valueToAdd); } + /** + * Reset the number of timeout container moves in the latest iteration. + */ public void resetNumContainerMovesTimeoutInLatestIteration() { - numContainerMovesTimeoutInLatestIteration.incr( - -getNumContainerMovesTimeoutInLatestIteration()); + numContainerMovesTimeoutInLatestIteration.incr(-getNumContainerMovesTimeoutInLatestIteration()); } /** diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/ContainerBalancerStatusInfo.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/ContainerBalancerStatusInfo.java index cbe8385e53a..a0552142b3b 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/ContainerBalancerStatusInfo.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/ContainerBalancerStatusInfo.java @@ -19,9 +19,11 @@ package org.apache.hadoop.hdds.scm.container.balancer; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; +import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos; import java.time.OffsetDateTime; import java.util.List; +import java.util.stream.Collectors; /** * Info about balancer status. @@ -51,4 +53,21 @@ public HddsProtos.ContainerBalancerConfigurationProto getConfiguration() { public List getIterationsStatusInfo() { return iterationsStatusInfo; } + + /** + * Converts an instance into a protobuf-compatible object. + * @return proto representation + */ + public StorageContainerLocationProtocolProtos.ContainerBalancerStatusInfoProto toProto() { + return StorageContainerLocationProtocolProtos.ContainerBalancerStatusInfoProto + .newBuilder() + .setStartedAt(getStartedAt().toEpochSecond()) + .setConfiguration(getConfiguration()) + .addAllIterationsStatusInfo( + getIterationsStatusInfo() + .stream() + .map(ContainerBalancerTaskIterationStatusInfo::toProto) + .collect(Collectors.toList()) + ).build(); + } } diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/ContainerBalancerTask.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/ContainerBalancerTask.java index 19a2f3c2e62..f1eee8c6755 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/ContainerBalancerTask.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/ContainerBalancerTask.java @@ -37,12 +37,12 @@ import org.apache.hadoop.hdds.scm.node.states.NodeNotFoundException; import org.apache.hadoop.hdds.scm.server.StorageContainerManager; import org.apache.hadoop.ozone.OzoneConsts; -import org.apache.hadoop.util.StringUtils; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import java.io.IOException; import java.time.Duration; +import java.time.OffsetDateTime; import java.util.ArrayList; import java.util.Collection; import java.util.Collections; @@ -52,16 +52,23 @@ import java.util.List; import java.util.Map; import java.util.Objects; +import java.util.Queue; import java.util.Set; +import java.util.UUID; import java.util.concurrent.CompletableFuture; import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.ConcurrentLinkedQueue; import java.util.concurrent.ExecutionException; import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeoutException; +import java.util.concurrent.atomic.AtomicBoolean; import java.util.stream.Collectors; +import static java.time.OffsetDateTime.now; +import static java.util.Collections.emptyMap; import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_NODE_REPORT_INTERVAL; import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_NODE_REPORT_INTERVAL_DEFAULT; +import static org.apache.hadoop.util.StringUtils.byteDesc; /** * Container balancer task performs move of containers between over- and @@ -71,6 +78,7 @@ public class ContainerBalancerTask implements Runnable { public static final Logger LOG = LoggerFactory.getLogger(ContainerBalancerTask.class); + public static final long ABSENCE_OF_DURATION = -1L; private NodeManager nodeManager; private ContainerManager containerManager; @@ -100,7 +108,6 @@ public class ContainerBalancerTask implements Runnable { private double lowerLimit; private ContainerBalancerSelectionCriteria selectionCriteria; private volatile Status taskStatus = Status.RUNNING; - /* Since a container can be selected only once during an iteration, these maps use it as a primary key to track source to target pairings. @@ -117,7 +124,9 @@ public class ContainerBalancerTask implements Runnable { private IterationResult iterationResult; private int nextIterationIndex; private boolean delayStart; - private List iterationsStatistic; + private Queue iterationsStatistic; + private OffsetDateTime currentIterationStarted; + private AtomicBoolean isCurrentIterationInProgress = new AtomicBoolean(false); /** * Constructs ContainerBalancerTask with the specified arguments. @@ -166,7 +175,7 @@ public ContainerBalancerTask(StorageContainerManager scm, findTargetStrategy = new FindTargetGreedyByUsageInfo(containerManager, placementPolicyValidateProxy, nodeManager); } - this.iterationsStatistic = new ArrayList<>(); + this.iterationsStatistic = new ConcurrentLinkedQueue<>(); } /** @@ -215,6 +224,10 @@ private void balance() { // leader change or restart int i = nextIterationIndex; for (; i < iterations && isBalancerRunning(); i++) { + currentIterationStarted = now(); + + isCurrentIterationInProgress.compareAndSet(false, true); + // reset some variables and metrics for this iteration resetState(); if (config.getTriggerDuEnable()) { @@ -261,21 +274,29 @@ private void balance() { return; } - IterationResult iR = doIteration(); - saveIterationStatistic(i, iR); + IterationResult currentIterationResult = doIteration(); + ContainerBalancerTaskIterationStatusInfo iterationStatistic = + getIterationStatistic(i + 1, currentIterationResult, getCurrentIterationDuration()); + iterationsStatistic.offer(iterationStatistic); + + isCurrentIterationInProgress.compareAndSet(true, false); + + findTargetStrategy.clearSizeEnteringNodes(); + findSourceStrategy.clearSizeLeavingNodes(); + metrics.incrementNumIterations(1); - LOG.info("Result of this iteration of Container Balancer: {}", iR); + LOG.info("Result of this iteration of Container Balancer: {}", currentIterationResult); // if no new move option is generated, it means the cluster cannot be // balanced anymore; so just stop balancer - if (iR == IterationResult.CAN_NOT_BALANCE_ANY_MORE) { - tryStopWithSaveConfiguration(iR.toString()); + if (currentIterationResult == IterationResult.CAN_NOT_BALANCE_ANY_MORE) { + tryStopWithSaveConfiguration(currentIterationResult.toString()); return; } // persist next iteration index - if (iR == IterationResult.ITERATION_COMPLETED) { + if (currentIterationResult == IterationResult.ITERATION_COMPLETED) { try { saveConfiguration(config, true, i + 1); } catch (IOException | TimeoutException e) { @@ -306,83 +327,143 @@ private void balance() { tryStopWithSaveConfiguration("Completed all iterations."); } - private void saveIterationStatistic(Integer iterationNumber, IterationResult iR) { - ContainerBalancerTaskIterationStatusInfo iterationStatistic = new ContainerBalancerTaskIterationStatusInfo( - iterationNumber, - iR.name(), - getSizeScheduledForMoveInLatestIteration() / OzoneConsts.GB, - metrics.getDataSizeMovedGBInLatestIteration(), - metrics.getNumContainerMovesScheduledInLatestIteration(), - metrics.getNumContainerMovesCompletedInLatestIteration(), - metrics.getNumContainerMovesFailedInLatestIteration(), - metrics.getNumContainerMovesTimeoutInLatestIteration(), - findTargetStrategy.getSizeEnteringNodes() - .entrySet() - .stream() - .filter(Objects::nonNull) - .filter(datanodeDetailsLongEntry -> datanodeDetailsLongEntry.getValue() > 0) - .collect( - Collectors.toMap( - entry -> entry.getKey().getUuid(), - entry -> entry.getValue() / OzoneConsts.GB - ) - ), - findSourceStrategy.getSizeLeavingNodes() - .entrySet() - .stream() - .filter(Objects::nonNull) - .filter(datanodeDetailsLongEntry -> datanodeDetailsLongEntry.getValue() > 0) - .collect( - Collectors.toMap( - entry -> entry.getKey().getUuid(), - entry -> entry.getValue() / OzoneConsts.GB - ) - ) + private ContainerBalancerTaskIterationStatusInfo getIterationStatistic(Integer iterationNumber, + IterationResult currentIterationResult, + long iterationDuration) { + String currentIterationResultName = currentIterationResult == null ? null : currentIterationResult.name(); + Map sizeEnteringDataToNodes = + convertToNodeIdToTrafficMap(findTargetStrategy.getSizeEnteringNodes()); + Map sizeLeavingDataFromNodes = + convertToNodeIdToTrafficMap(findSourceStrategy.getSizeLeavingNodes()); + IterationInfo iterationInfo = new IterationInfo( + iterationNumber, + currentIterationResultName, + iterationDuration ); - iterationsStatistic.add(iterationStatistic); + ContainerMoveInfo containerMoveInfo = new ContainerMoveInfo(metrics); + + DataMoveInfo dataMoveInfo = + getDataMoveInfo(currentIterationResultName, sizeEnteringDataToNodes, sizeLeavingDataFromNodes); + return new ContainerBalancerTaskIterationStatusInfo(iterationInfo, containerMoveInfo, dataMoveInfo); + } + + private DataMoveInfo getDataMoveInfo(String currentIterationResultName, Map sizeEnteringDataToNodes, + Map sizeLeavingDataFromNodes) { + if (currentIterationResultName == null) { + // For unfinished iteration + return new DataMoveInfo( + getSizeScheduledForMoveInLatestIteration(), + sizeActuallyMovedInLatestIteration, + sizeEnteringDataToNodes, + sizeLeavingDataFromNodes + ); + } else { + // For finished iteration + return new DataMoveInfo( + getSizeScheduledForMoveInLatestIteration(), + metrics.getDataSizeMovedInLatestIteration(), + sizeEnteringDataToNodes, + sizeLeavingDataFromNodes + ); + } + } + + private Map convertToNodeIdToTrafficMap(Map nodeTrafficMap) { + return nodeTrafficMap + .entrySet() + .stream() + .filter(Objects::nonNull) + .filter(datanodeDetailsLongEntry -> datanodeDetailsLongEntry.getValue() > 0) + .collect( + Collectors.toMap( + entry -> entry.getKey().getUuid(), + Map.Entry::getValue + ) + ); } + /** + * Get current iteration statistics. + * @return current iteration statistic + */ public List getCurrentIterationsStatistic() { + List resultList = new ArrayList<>(iterationsStatistic); + ContainerBalancerTaskIterationStatusInfo currentIterationStatistic = createCurrentIterationStatistic(); + if (currentIterationStatistic != null) { + resultList.add(currentIterationStatistic); + } + return resultList; + } - int lastIterationNumber = iterationsStatistic.stream() + private ContainerBalancerTaskIterationStatusInfo createCurrentIterationStatistic() { + List resultList = new ArrayList<>(iterationsStatistic); + + int lastIterationNumber = resultList.stream() .mapToInt(ContainerBalancerTaskIterationStatusInfo::getIterationNumber) .max() .orElse(0); + long iterationDuration = getCurrentIterationDuration(); + + if (isCurrentIterationInProgress.get()) { + return getIterationStatistic(lastIterationNumber + 1, null, iterationDuration); + } else { + return null; + } + } - ContainerBalancerTaskIterationStatusInfo currentIterationStatistic = new ContainerBalancerTaskIterationStatusInfo( + private static ContainerBalancerTaskIterationStatusInfo getEmptyCurrentIterationStatistic( + long iterationDuration) { + ContainerMoveInfo containerMoveInfo = new ContainerMoveInfo(0, 0, 0, 0); + DataMoveInfo dataMoveInfo = new DataMoveInfo( + 0, + 0, + emptyMap(), + emptyMap() + ); + IterationInfo iterationInfo = new IterationInfo( + 0, + null, + iterationDuration + ); + return new ContainerBalancerTaskIterationStatusInfo( + iterationInfo, + containerMoveInfo, + dataMoveInfo + ); + } + + private ContainerBalancerTaskIterationStatusInfo getFilledCurrentIterationStatistic(int lastIterationNumber, + long iterationDuration) { + Map sizeEnteringDataToNodes = + convertToNodeIdToTrafficMap(findTargetStrategy.getSizeEnteringNodes()); + Map sizeLeavingDataFromNodes = + convertToNodeIdToTrafficMap(findSourceStrategy.getSizeLeavingNodes()); + + ContainerMoveInfo containerMoveInfo = new ContainerMoveInfo(metrics); + DataMoveInfo dataMoveInfo = new DataMoveInfo( + getSizeScheduledForMoveInLatestIteration(), + sizeActuallyMovedInLatestIteration, + sizeEnteringDataToNodes, + sizeLeavingDataFromNodes + ); + IterationInfo iterationInfo = new IterationInfo( lastIterationNumber + 1, null, - getSizeScheduledForMoveInLatestIteration() / OzoneConsts.GB, - sizeActuallyMovedInLatestIteration / OzoneConsts.GB, - metrics.getNumContainerMovesScheduledInLatestIteration(), - metrics.getNumContainerMovesCompletedInLatestIteration(), - metrics.getNumContainerMovesFailedInLatestIteration(), - metrics.getNumContainerMovesTimeoutInLatestIteration(), - findTargetStrategy.getSizeEnteringNodes() - .entrySet() - .stream() - .filter(Objects::nonNull) - .filter(datanodeDetailsLongEntry -> datanodeDetailsLongEntry.getValue() > 0) - .collect(Collectors.toMap( - entry -> entry.getKey().getUuid(), - entry -> entry.getValue() / OzoneConsts.GB - ) - ), - findSourceStrategy.getSizeLeavingNodes() - .entrySet() - .stream() - .filter(Objects::nonNull) - .filter(datanodeDetailsLongEntry -> datanodeDetailsLongEntry.getValue() > 0) - .collect( - Collectors.toMap( - entry -> entry.getKey().getUuid(), - entry -> entry.getValue() / OzoneConsts.GB - ) - ) + iterationDuration + ); + return new ContainerBalancerTaskIterationStatusInfo( + iterationInfo, + containerMoveInfo, + dataMoveInfo ); - List resultList = new ArrayList<>(iterationsStatistic); - resultList.add(currentIterationStatistic); - return resultList; + } + + private long getCurrentIterationDuration() { + if (currentIterationStarted == null) { + return ABSENCE_OF_DURATION; + } else { + return now().toEpochSecond() - currentIterationStarted.toEpochSecond(); + } } /** @@ -691,7 +772,7 @@ private void checkIterationMoveResults() { moveSelectionToFutureMap.values(); if (!futures.isEmpty()) { CompletableFuture allFuturesResult = CompletableFuture.allOf( - futures.toArray(new CompletableFuture[futures.size()])); + futures.toArray(new CompletableFuture[0])); try { allFuturesResult.get(config.getMoveTimeout().toMillis(), TimeUnit.MILLISECONDS); @@ -708,26 +789,28 @@ private void checkIterationMoveResults() { } } - countDatanodesInvolvedPerIteration = - selectedSources.size() + selectedTargets.size(); - metrics.incrementNumDatanodesInvolvedInLatestIteration( - countDatanodesInvolvedPerIteration); - metrics.incrementNumContainerMovesScheduled( - metrics.getNumContainerMovesScheduledInLatestIteration()); - metrics.incrementNumContainerMovesCompleted( - metrics.getNumContainerMovesCompletedInLatestIteration()); - metrics.incrementNumContainerMovesTimeout( - metrics.getNumContainerMovesTimeoutInLatestIteration()); - metrics.incrementDataSizeMovedGBInLatestIteration( - sizeActuallyMovedInLatestIteration / OzoneConsts.GB); - metrics.incrementDataSizeMovedGB( - metrics.getDataSizeMovedGBInLatestIteration()); - metrics.incrementNumContainerMovesFailed( - metrics.getNumContainerMovesFailedInLatestIteration()); + countDatanodesInvolvedPerIteration = selectedSources.size() + selectedTargets.size(); + + metrics.incrementNumDatanodesInvolvedInLatestIteration(countDatanodesInvolvedPerIteration); + + metrics.incrementNumContainerMovesScheduled(metrics.getNumContainerMovesScheduledInLatestIteration()); + + metrics.incrementNumContainerMovesCompleted(metrics.getNumContainerMovesCompletedInLatestIteration()); + + metrics.incrementNumContainerMovesTimeout(metrics.getNumContainerMovesTimeoutInLatestIteration()); + + metrics.incrementDataSizeMovedGBInLatestIteration(sizeActuallyMovedInLatestIteration / OzoneConsts.GB); + + metrics.incrementDataSizeMovedInLatestIteration(sizeActuallyMovedInLatestIteration); + + metrics.incrementDataSizeMovedGB(metrics.getDataSizeMovedGBInLatestIteration()); + + metrics.incrementNumContainerMovesFailed(metrics.getNumContainerMovesFailedInLatestIteration()); + LOG.info("Iteration Summary. Number of Datanodes involved: {}. Size " + "moved: {} ({} Bytes). Number of Container moves completed: {}.", countDatanodesInvolvedPerIteration, - StringUtils.byteDesc(sizeActuallyMovedInLatestIteration), + byteDesc(sizeActuallyMovedInLatestIteration), sizeActuallyMovedInLatestIteration, metrics.getNumContainerMovesCompletedInLatestIteration()); } @@ -1146,6 +1229,7 @@ private void resetState() { this.sizeScheduledForMoveInLatestIteration = 0; this.sizeActuallyMovedInLatestIteration = 0; metrics.resetDataSizeMovedGBInLatestIteration(); + metrics.resetDataSizeMovedInLatestIteration(); metrics.resetNumContainerMovesScheduledInLatestIteration(); metrics.resetNumContainerMovesCompletedInLatestIteration(); metrics.resetNumContainerMovesTimeoutInLatestIteration(); diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/ContainerBalancerTaskIterationStatusInfo.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/ContainerBalancerTaskIterationStatusInfo.java index 1d597b0ca27..a466d9fd474 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/ContainerBalancerTaskIterationStatusInfo.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/ContainerBalancerTaskIterationStatusInfo.java @@ -18,86 +18,160 @@ package org.apache.hadoop.hdds.scm.container.balancer; +import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos; + +import java.util.List; import java.util.Map; +import java.util.Optional; import java.util.UUID; +import java.util.stream.Collectors; /** * Information about balancer task iteration. */ public class ContainerBalancerTaskIterationStatusInfo { - private final Integer iterationNumber; - private final String iterationResult; - private final long sizeScheduledForMoveGB; - private final long dataSizeMovedGB; - private final long containerMovesScheduled; - private final long containerMovesCompleted; - private final long containerMovesFailed; - private final long containerMovesTimeout; - private final Map sizeEnteringNodesGB; - private final Map sizeLeavingNodesGB; - - @SuppressWarnings("checkstyle:ParameterNumber") + + private final IterationInfo iterationInfo; + private final ContainerMoveInfo containerMoveInfo; + private final DataMoveInfo dataMoveInfo; + public ContainerBalancerTaskIterationStatusInfo( - Integer iterationNumber, - String iterationResult, - long sizeScheduledForMoveGB, - long dataSizeMovedGB, - long containerMovesScheduled, - long containerMovesCompleted, - long containerMovesFailed, - long containerMovesTimeout, - Map sizeEnteringNodesGB, - Map sizeLeavingNodesGB) { - this.iterationNumber = iterationNumber; - this.iterationResult = iterationResult; - this.sizeScheduledForMoveGB = sizeScheduledForMoveGB; - this.dataSizeMovedGB = dataSizeMovedGB; - this.containerMovesScheduled = containerMovesScheduled; - this.containerMovesCompleted = containerMovesCompleted; - this.containerMovesFailed = containerMovesFailed; - this.containerMovesTimeout = containerMovesTimeout; - this.sizeEnteringNodesGB = sizeEnteringNodesGB; - this.sizeLeavingNodesGB = sizeLeavingNodesGB; + IterationInfo iterationInfo, + ContainerMoveInfo containerMoveInfo, + DataMoveInfo dataMoveInfo) { + this.iterationInfo = iterationInfo; + this.containerMoveInfo = containerMoveInfo; + this.dataMoveInfo = dataMoveInfo; } + /** + * Get the number of iterations. + * @return iteration number + */ public Integer getIterationNumber() { - return iterationNumber; + return iterationInfo.getIterationNumber(); } + /** + * Get the iteration result. + * @return iteration result + */ public String getIterationResult() { - return iterationResult; + return iterationInfo.getIterationResult(); } - public long getSizeScheduledForMoveGB() { - return sizeScheduledForMoveGB; + /** + * Get the size of the bytes that are scheduled to move in the iteration. + * @return size in bytes + */ + public long getSizeScheduledForMove() { + return dataMoveInfo.getSizeScheduledForMove(); } - public long getDataSizeMovedGB() { - return dataSizeMovedGB; + /** + * Get the size of the bytes that were moved in the iteration. + * @return size in bytes + */ + public long getDataSizeMoved() { + return dataMoveInfo.getDataSizeMoved(); } + /** + * Get the number of containers scheduled to move. + * @return number of containers scheduled to move + */ public long getContainerMovesScheduled() { - return containerMovesScheduled; + return containerMoveInfo.getContainerMovesScheduled(); } + /** + * Get the number of successfully moved containers. + * @return number of successfully moved containers + */ public long getContainerMovesCompleted() { - return containerMovesCompleted; + return containerMoveInfo.getContainerMovesCompleted(); } + /** + * Get the number of containers that were not moved successfully. + * @return number of unsuccessfully moved containers + */ public long getContainerMovesFailed() { - return containerMovesFailed; + return containerMoveInfo.getContainerMovesFailed(); } + /** + * Get the number of containers moved with a timeout. + * @return number of moved with timeout containers + */ public long getContainerMovesTimeout() { - return containerMovesTimeout; + return containerMoveInfo.getContainerMovesTimeout(); + } + + /** + * Get a map of the node IDs and the corresponding data sizes moved to each node. + * @return nodeId to size entering from node map + */ + public Map getSizeEnteringNodes() { + return dataMoveInfo.getSizeEnteringNodes(); + } + + /** + * Get a map of the node IDs and the corresponding data sizes moved from each node. + * @return nodeId to size leaving from node map + */ + public Map getSizeLeavingNodes() { + return dataMoveInfo.getSizeLeavingNodes(); + } + + /** + * Get the iteration duration. + * @return iteration duration + */ + public Long getIterationDuration() { + return iterationInfo.getIterationDuration(); } - public Map getSizeEnteringNodesGB() { - return sizeEnteringNodesGB; + /** + * Converts an instance into the protobuf compatible object. + * @return proto representation + */ + public StorageContainerLocationProtocolProtos.ContainerBalancerTaskIterationStatusInfoProto toProto() { + return StorageContainerLocationProtocolProtos.ContainerBalancerTaskIterationStatusInfoProto.newBuilder() + .setIterationNumber(getIterationNumber()) + .setIterationResult(Optional.ofNullable(getIterationResult()).orElse("")) + .setIterationDuration(getIterationDuration()) + .setSizeScheduledForMove(getSizeScheduledForMove()) + .setDataSizeMoved(getDataSizeMoved()) + .setContainerMovesScheduled(getContainerMovesScheduled()) + .setContainerMovesCompleted(getContainerMovesCompleted()) + .setContainerMovesFailed(getContainerMovesFailed()) + .setContainerMovesTimeout(getContainerMovesTimeout()) + .addAllSizeEnteringNodes( + mapToProtoNodeTransferInfo(getSizeEnteringNodes()) + ) + .addAllSizeLeavingNodes( + mapToProtoNodeTransferInfo(getSizeLeavingNodes()) + ) + .build(); } - public Map getSizeLeavingNodesGB() { - return sizeLeavingNodesGB; + /** + * Converts an instance into the protobuf compatible object. + * @param nodes node id to node traffic size + * @return node transfer info proto representation + */ + private List mapToProtoNodeTransferInfo( + Map nodes + ) { + return nodes.entrySet() + .stream() + .map(entry -> StorageContainerLocationProtocolProtos.NodeTransferInfoProto.newBuilder() + .setUuid(entry.getKey().toString()) + .setDataVolume(entry.getValue()) + .build() + ) + .collect(Collectors.toList()); } } diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/ContainerMoveInfo.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/ContainerMoveInfo.java new file mode 100644 index 00000000000..caed286480b --- /dev/null +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/ContainerMoveInfo.java @@ -0,0 +1,60 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

    + * http://www.apache.org/licenses/LICENSE-2.0 + *

    + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hdds.scm.container.balancer; + +/** + * Information about moving containers. + */ +public class ContainerMoveInfo { + private final long containerMovesScheduled; + private final long containerMovesCompleted; + private final long containerMovesFailed; + private final long containerMovesTimeout; + + public ContainerMoveInfo(long containerMovesScheduled, long containerMovesCompleted, long containerMovesFailed, + long containerMovesTimeout) { + this.containerMovesScheduled = containerMovesScheduled; + this.containerMovesCompleted = containerMovesCompleted; + this.containerMovesFailed = containerMovesFailed; + this.containerMovesTimeout = containerMovesTimeout; + } + + public ContainerMoveInfo(ContainerBalancerMetrics metrics) { + this.containerMovesScheduled = metrics.getNumContainerMovesScheduledInLatestIteration(); + this.containerMovesCompleted = metrics.getNumContainerMovesCompletedInLatestIteration(); + this.containerMovesFailed = metrics.getNumContainerMovesFailedInLatestIteration(); + this.containerMovesTimeout = metrics.getNumContainerMovesTimeoutInLatestIteration(); + } + + public long getContainerMovesScheduled() { + return containerMovesScheduled; + } + + public long getContainerMovesCompleted() { + return containerMovesCompleted; + } + + public long getContainerMovesFailed() { + return containerMovesFailed; + } + + public long getContainerMovesTimeout() { + return containerMovesTimeout; + } +} diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/DataMoveInfo.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/DataMoveInfo.java new file mode 100644 index 00000000000..cd97011768d --- /dev/null +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/DataMoveInfo.java @@ -0,0 +1,60 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

    + * http://www.apache.org/licenses/LICENSE-2.0 + *

    + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hdds.scm.container.balancer; + +import java.util.Map; +import java.util.UUID; + +/** + * Information about the process of moving data. + */ +public class DataMoveInfo { + private final long sizeScheduledForMove; + private final long dataSizeMoved; + private final Map sizeEnteringNodes; + private final Map sizeLeavingNodes; + + + public DataMoveInfo( + long sizeScheduledForMove, + long dataSizeMoved, + Map sizeEnteringNodes, + Map sizeLeavingNodes) { + this.sizeScheduledForMove = sizeScheduledForMove; + this.dataSizeMoved = dataSizeMoved; + this.sizeEnteringNodes = sizeEnteringNodes; + this.sizeLeavingNodes = sizeLeavingNodes; + } + + public long getSizeScheduledForMove() { + return sizeScheduledForMove; + } + + public long getDataSizeMoved() { + return dataSizeMoved; + } + + public Map getSizeEnteringNodes() { + return sizeEnteringNodes; + } + + public Map getSizeLeavingNodes() { + return sizeLeavingNodes; + } +} diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/FindSourceGreedy.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/FindSourceGreedy.java index 435cc9859a9..9773ae45f50 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/FindSourceGreedy.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/FindSourceGreedy.java @@ -26,11 +26,11 @@ import java.util.ArrayList; import java.util.Collection; -import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.PriorityQueue; import java.util.UUID; +import java.util.concurrent.ConcurrentHashMap; /** * The selection criteria for selecting source datanodes , the containers of @@ -46,7 +46,7 @@ public class FindSourceGreedy implements FindSourceStrategy { private Double lowerLimit; FindSourceGreedy(NodeManager nodeManager) { - sizeLeavingNode = new HashMap<>(); + sizeLeavingNode = new ConcurrentHashMap<>(); potentialSources = new PriorityQueue<>((a, b) -> { double currentUsageOfA = a.calculateUtilization( -sizeLeavingNode.get(a.getDatanodeDetails())); @@ -206,4 +206,9 @@ public void reInitialize(List potentialDataNodes, public Map getSizeLeavingNodes() { return sizeLeavingNode; } + + @Override + public void clearSizeLeavingNodes() { + sizeLeavingNode.clear(); + } } diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/FindSourceStrategy.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/FindSourceStrategy.java index 9e429aaa21d..0043d8509b0 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/FindSourceStrategy.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/FindSourceStrategy.java @@ -87,5 +87,14 @@ void reInitialize(List potentialDataNodes, */ void resetPotentialSources(@Nonnull Collection sources); + /** + * Get a map of the node IDs and the corresponding data sizes moved from each node. + * @return nodeId to size leaving from node map + */ Map getSizeLeavingNodes(); + + /** + * Clear the map of node IDs and their corresponding data sizes that were moved from each node. + */ + void clearSizeLeavingNodes(); } diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/FindTargetStrategy.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/FindTargetStrategy.java index 389ea6e5192..8959fc4ff23 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/FindTargetStrategy.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/FindTargetStrategy.java @@ -70,5 +70,14 @@ void reInitialize(List potentialDataNodes, */ void resetPotentialTargets(@Nonnull Collection targets); + /** + * Get a map of the node IDs and the corresponding data sizes moved to each node. + * @return nodeId to size entering from node map + */ Map getSizeEnteringNodes(); + + /** + * Clear the map of node IDs and their corresponding data sizes that were moved to each node. + */ + void clearSizeEnteringNodes(); } diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/IterationInfo.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/IterationInfo.java new file mode 100644 index 00000000000..615848a097a --- /dev/null +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/IterationInfo.java @@ -0,0 +1,47 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

    + * http://www.apache.org/licenses/LICENSE-2.0 + *

    + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hdds.scm.container.balancer; + +/** + * Information about the iteration. + */ +public class IterationInfo { + + private final Integer iterationNumber; + private final String iterationResult; + private final Long iterationDuration; + + public IterationInfo(Integer iterationNumber, String iterationResult, long iterationDuration) { + this.iterationNumber = iterationNumber; + this.iterationResult = iterationResult; + this.iterationDuration = iterationDuration; + } + + public Integer getIterationNumber() { + return iterationNumber; + } + + public String getIterationResult() { + return iterationResult; + } + + public Long getIterationDuration() { + return iterationDuration; + } +} diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/SCMContainerPlacementRackAware.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/SCMContainerPlacementRackAware.java index 7fec06e7e06..1c2b5a3be39 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/SCMContainerPlacementRackAware.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/SCMContainerPlacementRackAware.java @@ -346,8 +346,7 @@ protected List chooseDatanodesInternalLegacy( return chooseNodes(null, chosenNodes, mutableFavoredNodes, mutableUsedNodes, favorIndex, nodesRequired, mapSizeRequired); } else { - List mutableExcludedNodes = new ArrayList<>(); - mutableExcludedNodes.addAll(excludedNodes); + List mutableExcludedNodes = new ArrayList<>(excludedNodes); // choose node to meet replication requirement // case 1: one excluded node, choose one on the same rack as the excluded // node, choose others on different racks. diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/events/SCMEvents.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/events/SCMEvents.java index 2b2c032cff4..eb7bc6b3ebf 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/events/SCMEvents.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/events/SCMEvents.java @@ -64,6 +64,13 @@ public final class SCMEvents { NodeRegistrationContainerReport.class, "Node_Registration_Container_Report"); + /** + * Event generated on DataNode Registration Container Report. + */ + public static final TypedEvent + CONTAINER_REGISTRATION_REPORT = new TypedEvent<>( + NodeRegistrationContainerReport.class, "Container_Registration_Report"); + /** * ContainerReports are sent out by Datanodes. This report is received by * SCMDatanodeHeartbeatDispatcher and Container_Report Event is generated. diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/InterSCMGrpcService.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/InterSCMGrpcService.java index bd4b56cd8c1..ec95ab66bf1 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/InterSCMGrpcService.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/InterSCMGrpcService.java @@ -18,8 +18,7 @@ package org.apache.hadoop.hdds.scm.ha; import java.io.IOException; - -import com.google.common.base.Preconditions; +import java.util.Objects; import org.apache.hadoop.hdds.protocol.scm.proto.InterSCMProtocolProtos.CopyDBCheckpointRequestProto; import org.apache.hadoop.hdds.protocol.scm.proto.InterSCMProtocolProtos.CopyDBCheckpointResponseProto; @@ -52,12 +51,11 @@ public class InterSCMGrpcService extends private final Table transactionInfoTable; InterSCMGrpcService(final StorageContainerManager scm) throws IOException { - Preconditions.checkNotNull(scm); + Objects.requireNonNull(scm, "scm"); this.scm = scm; this.transactionInfoTable = HAUtils.getTransactionInfoTable( - scm.getScmMetadataStore().getStore(), new SCMDBDefinition()); - provider = - new SCMDBCheckpointProvider(scm.getScmMetadataStore().getStore()); + scm.getScmMetadataStore().getStore(), SCMDBDefinition.get()); + this.provider = new SCMDBCheckpointProvider(scm.getScmMetadataStore().getStore()); } @Override @@ -67,7 +65,7 @@ public void download(CopyDBCheckpointRequestProto request, scm.getScmHAManager().asSCMHADBTransactionBuffer().flush(); TransactionInfo transactionInfo = transactionInfoTable.get(TRANSACTION_INFO_KEY); - Preconditions.checkNotNull(transactionInfo); + Objects.requireNonNull(transactionInfo, "transactionInfo"); SCMGrpcOutputStream outputStream = new SCMGrpcOutputStream(responseObserver, scm.getClusterId(), BUFFER_SIZE); diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMHADBTransactionBufferImpl.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMHADBTransactionBufferImpl.java index f1ee76a198e..cd0346d72f8 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMHADBTransactionBufferImpl.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMHADBTransactionBufferImpl.java @@ -26,6 +26,8 @@ import org.apache.hadoop.hdds.utils.db.BatchOperation; import org.apache.hadoop.hdds.utils.db.Table; import org.apache.ratis.statemachine.SnapshotInfo; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import java.io.IOException; import java.util.concurrent.atomic.AtomicLong; @@ -41,6 +43,8 @@ * operation in DB. */ public class SCMHADBTransactionBufferImpl implements SCMHADBTransactionBuffer { + + public static final Logger LOG = LoggerFactory.getLogger(SCMHADBTransactionBufferImpl.class); private final StorageContainerManager scm; private SCMMetadataStore metadataStore; private BatchOperation currentBatchOperation; @@ -107,6 +111,8 @@ public SnapshotInfo getLatestSnapshot() { @Override public void setLatestSnapshot(SnapshotInfo latestSnapshot) { + LOG.info("{}: Set latest Snapshot to {}", + scm.getScmHAManager().getRatisServer().getDivision().getId(), latestSnapshot); this.latestSnapshot.set(latestSnapshot); } diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMHAManagerImpl.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMHAManagerImpl.java index fc3c1548ba1..5d0ea444ef8 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMHAManagerImpl.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMHAManagerImpl.java @@ -72,6 +72,7 @@ public class SCMHAManagerImpl implements SCMHAManager { private final SCMRatisServer ratisServer; private final ConfigurationSource conf; + private final OzoneConfiguration ozoneConf; private final SecurityConfig securityConfig; private final DBTransactionBuffer transactionBuffer; private final SCMSnapshotProvider scmSnapshotProvider; @@ -89,6 +90,7 @@ public SCMHAManagerImpl(final ConfigurationSource conf, final SecurityConfig securityConfig, final StorageContainerManager scm) throws IOException { this.conf = conf; + this.ozoneConf = OzoneConfiguration.of(conf); this.securityConfig = securityConfig; this.scm = scm; this.exitManager = new ExitManager(); @@ -128,7 +130,7 @@ public void start() throws IOException { // It will first try to add itself to existing ring final SCMNodeDetails nodeDetails = scm.getSCMHANodeDetails().getLocalNodeDetails(); - final boolean success = HAUtils.addSCM(OzoneConfiguration.of(conf), + final boolean success = HAUtils.addSCM(ozoneConf, new AddSCMRequest.Builder().setClusterId(scm.getClusterId()) .setScmId(scm.getScmId()) .setRatisAddr(nodeDetails @@ -221,17 +223,18 @@ public List getSecretKeysFromLeader(String leaderID) } } + private TransactionInfo getTransactionInfoFromCheckpoint(Path checkpointLocation) throws IOException { + return HAUtils.getTrxnInfoFromCheckpoint( + ozoneConf, checkpointLocation, SCMDBDefinition.get()); + } + @Override public TermIndex verifyCheckpointFromLeader(String leaderId, DBCheckpoint checkpoint) { try { Path checkpointLocation = checkpoint.getCheckpointLocation(); - TransactionInfo checkpointTxnInfo = HAUtils - .getTrxnInfoFromCheckpoint(OzoneConfiguration.of(conf), - checkpointLocation, new SCMDBDefinition()); - - LOG.info("Installing checkpoint with SCMTransactionInfo {}", - checkpointTxnInfo); + final TransactionInfo checkpointTxnInfo = getTransactionInfoFromCheckpoint(checkpointLocation); + LOG.info("{}: Verify checkpoint {} from leader {}", scm.getScmId(), checkpointTxnInfo, leaderId); TermIndex termIndex = getRatisServer().getSCMStateMachine().getLastAppliedTermIndex(); @@ -281,12 +284,9 @@ public TermIndex installCheckpoint(DBCheckpoint dbCheckpoint) throws Exception { Path checkpointLocation = dbCheckpoint.getCheckpointLocation(); - TransactionInfo checkpointTrxnInfo = HAUtils - .getTrxnInfoFromCheckpoint(OzoneConfiguration.of(conf), - checkpointLocation, new SCMDBDefinition()); + final TransactionInfo checkpointTrxnInfo = getTransactionInfoFromCheckpoint(checkpointLocation); - LOG.info("Installing checkpoint with SCMTransactionInfo {}", - checkpointTrxnInfo); + LOG.info("{}: Install checkpoint {}", scm.getScmId(), checkpointTrxnInfo); return installCheckpoint(checkpointLocation, checkpointTrxnInfo); } @@ -457,7 +457,7 @@ public void startServices() throws IOException { // TODO: Fix the metrics ?? final SCMMetadataStore metadataStore = scm.getScmMetadataStore(); - metadataStore.start(OzoneConfiguration.of(conf)); + metadataStore.start(ozoneConf); scm.getSequenceIdGen().reinitialize(metadataStore.getSequenceIdTable()); scm.getPipelineManager().reinitialize(metadataStore.getPipelineTable()); scm.getContainerManager().reinitialize(metadataStore.getContainerTable()); diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMHAManagerStub.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMHAManagerStub.java index b9539684ed0..85664dd232f 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMHAManagerStub.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMHAManagerStub.java @@ -23,6 +23,7 @@ import java.util.EnumMap; import java.util.List; import java.util.Map; +import java.util.UUID; import com.google.common.base.Preconditions; import com.google.protobuf.InvalidProtocolBufferException; @@ -170,6 +171,8 @@ private class RatisServerStub implements SCMRatisServer { private Map handlers = new EnumMap<>(RequestType.class); + private RaftPeerId leaderId = RaftPeerId.valueOf(UUID.randomUUID().toString()); + @Override public void start() { } @@ -283,5 +286,10 @@ public boolean removeSCM(RemoveSCMRequest request) throws IOException { public GrpcTlsConfig getGrpcTlsConfig() { return null; } + + @Override + public RaftPeerId getLeaderId() { + return leaderId; + } } } diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMRatisServer.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMRatisServer.java index a786bd2944f..4e883b27a7d 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMRatisServer.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMRatisServer.java @@ -21,6 +21,7 @@ import org.apache.hadoop.hdds.scm.AddSCMRequest; import org.apache.hadoop.hdds.scm.RemoveSCMRequest; import org.apache.ratis.grpc.GrpcTlsConfig; +import org.apache.ratis.protocol.RaftPeerId; import org.apache.ratis.protocol.exceptions.NotLeaderException; import org.apache.ratis.server.RaftServer; @@ -68,4 +69,6 @@ SCMRatisResponse submitRequest(SCMRatisRequest request) GrpcTlsConfig getGrpcTlsConfig(); + RaftPeerId getLeaderId(); + } diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMRatisServerImpl.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMRatisServerImpl.java index 70dffba27ec..0383bf18095 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMRatisServerImpl.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMRatisServerImpl.java @@ -29,6 +29,7 @@ import java.util.concurrent.atomic.AtomicLong; import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Preconditions; +import jakarta.annotation.Nullable; import org.apache.hadoop.hdds.HddsUtils; import org.apache.hadoop.hdds.conf.ConfigurationSource; import org.apache.hadoop.hdds.conf.OzoneConfiguration; @@ -147,6 +148,16 @@ public GrpcTlsConfig getGrpcTlsConfig() { return grpcTlsConfig; } + @Override + @Nullable + public RaftPeerId getLeaderId() { + RaftPeer raftLeaderPeer = getLeader(); + if (raftLeaderPeer != null) { + return raftLeaderPeer.getId(); + } + return null; + } + private static void waitForLeaderToBeReady(RaftServer server, OzoneConfiguration conf, RaftGroup group) throws IOException { boolean ready; diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMStateMachine.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMStateMachine.java index 1128accd2ff..5805fe67e49 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMStateMachine.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMStateMachine.java @@ -137,6 +137,7 @@ public void initialize(RaftServer server, RaftGroupId id, getLifeCycle().startAndTransition(() -> { super.initialize(server, id, raftStorage); storage.init(raftStorage); + LOG.info("{}: initialize {}", server.getId(), id); }); } @@ -149,6 +150,9 @@ public CompletableFuture applyTransaction( final SCMRatisRequest request = SCMRatisRequest.decode( Message.valueOf(trx.getStateMachineLogEntry().getLogData())); + if (LOG.isDebugEnabled()) { + LOG.debug("{}: applyTransaction {}", getId(), TermIndex.valueOf(trx.getLogEntry())); + } try { applyTransactionFuture.complete(process(request)); } catch (SCMException ex) { @@ -389,6 +393,7 @@ public void notifyConfigurationChanged(long term, long index, @Override public void pause() { final LifeCycle lc = getLifeCycle(); + LOG.info("{}: Try to pause from current LifeCycle state {}", getId(), lc); if (lc.getCurrentState() != LifeCycle.State.NEW) { lc.transition(LifeCycle.State.PAUSING); lc.transition(LifeCycle.State.PAUSED); @@ -414,6 +419,8 @@ public void reinitialize() throws IOException { throw new IOException(e); } + LOG.info("{}: SCMStateMachine is reinitializing. newTermIndex = {}", getId(), termIndex); + // re-initialize the DBTransactionBuffer and update the lastAppliedIndex. try { transactionBuffer.init(); diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/metadata/BigIntegerCodec.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/metadata/BigIntegerCodec.java index 5a7e86e99cc..ab753096716 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/metadata/BigIntegerCodec.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/metadata/BigIntegerCodec.java @@ -38,6 +38,11 @@ private BigIntegerCodec() { // singleton } + @Override + public Class getTypeClass() { + return BigInteger.class; + } + @Override public byte[] toPersistedFormat(BigInteger object) throws IOException { return object.toByteArray(); diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/metadata/SCMDBDefinition.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/metadata/SCMDBDefinition.java index 4a280d2103a..ea86fa154af 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/metadata/SCMDBDefinition.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/metadata/SCMDBDefinition.java @@ -41,75 +41,53 @@ * Class defines the structure and types of the scm.db. */ public class SCMDBDefinition extends DBDefinition.WithMap { - public SCMDBDefinition() { - this(COLUMN_FAMILIES); - } - - protected SCMDBDefinition(Map> map) { - super(map); - } - public static final DBColumnFamilyDefinition DELETED_BLOCKS = new DBColumnFamilyDefinition<>( "deletedBlocks", - Long.class, LongCodec.get(), - DeletedBlocksTransaction.class, Proto2Codec.get(DeletedBlocksTransaction.getDefaultInstance())); public static final DBColumnFamilyDefinition VALID_CERTS = new DBColumnFamilyDefinition<>( "validCerts", - BigInteger.class, BigIntegerCodec.get(), - X509Certificate.class, X509CertificateCodec.get()); public static final DBColumnFamilyDefinition VALID_SCM_CERTS = new DBColumnFamilyDefinition<>( "validSCMCerts", - BigInteger.class, BigIntegerCodec.get(), - X509Certificate.class, X509CertificateCodec.get()); public static final DBColumnFamilyDefinition PIPELINES = new DBColumnFamilyDefinition<>( "pipelines", - PipelineID.class, PipelineID.getCodec(), - Pipeline.class, Pipeline.getCodec()); public static final DBColumnFamilyDefinition CONTAINERS = new DBColumnFamilyDefinition<>( "containers", - ContainerID.class, ContainerID.getCodec(), - ContainerInfo.class, ContainerInfo.getCodec()); public static final DBColumnFamilyDefinition TRANSACTIONINFO = new DBColumnFamilyDefinition<>( "scmTransactionInfos", - String.class, StringCodec.get(), - TransactionInfo.class, TransactionInfo.getCodec()); public static final DBColumnFamilyDefinition SEQUENCE_ID = new DBColumnFamilyDefinition<>( "sequenceId", - String.class, StringCodec.get(), - Long.class, LongCodec.get()); public static final DBColumnFamilyDefinition> map) { MOVE = new DBColumnFamilyDefinition<>( "move", - ContainerID.class, ContainerID.getCodec(), - MoveDataNodePair.class, MoveDataNodePair.getCodec()); /** @@ -129,18 +105,14 @@ protected SCMDBDefinition(Map> map) { public static final DBColumnFamilyDefinition META = new DBColumnFamilyDefinition<>( "meta", - String.class, StringCodec.get(), - String.class, StringCodec.get()); public static final DBColumnFamilyDefinition STATEFUL_SERVICE_CONFIG = new DBColumnFamilyDefinition<>( "statefulServiceConfig", - String.class, StringCodec.get(), - ByteString.class, ByteStringCodec.get()); private static final Map> @@ -156,6 +128,16 @@ protected SCMDBDefinition(Map> map) { VALID_CERTS, VALID_SCM_CERTS); + private static final SCMDBDefinition INSTANCE = new SCMDBDefinition(COLUMN_FAMILIES); + + public static SCMDBDefinition get() { + return INSTANCE; + } + + protected SCMDBDefinition(Map> map) { + super(map); + } + @Override public String getName() { return "scm.db"; diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/metadata/SCMMetadataStoreImpl.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/metadata/SCMMetadataStoreImpl.java index ec63076b4a6..6aa993f6077 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/metadata/SCMMetadataStoreImpl.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/metadata/SCMMetadataStoreImpl.java @@ -104,7 +104,7 @@ public SCMMetadataStoreImpl(OzoneConfiguration config) public void start(OzoneConfiguration config) throws IOException { if (this.store == null) { - SCMDBDefinition scmdbDefinition = new SCMDBDefinition(); + final SCMDBDefinition scmdbDefinition = SCMDBDefinition.get(); File metaDir = HAUtils.getMetaDir(scmdbDefinition, configuration); // Check if there is a DB Inconsistent Marker in the metaDir. This // marker indicates that the DB is in an inconsistent state and hence diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/metadata/X509CertificateCodec.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/metadata/X509CertificateCodec.java index 3e7db16c2a0..e0279e8f2f0 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/metadata/X509CertificateCodec.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/metadata/X509CertificateCodec.java @@ -51,6 +51,11 @@ private X509CertificateCodec() { // singleton } + @Override + public Class getTypeClass() { + return X509Certificate.class; + } + @Override public boolean supportCodecBuffer() { return true; diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeDecommissionManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeDecommissionManager.java index d6f0e89c96d..375d68dfe32 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeDecommissionManager.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeDecommissionManager.java @@ -215,9 +215,9 @@ private boolean allPortsMatch(List dns) { if (dns.size() < 2) { return true; } - int port = dns.get(0).getPort(DatanodeDetails.Port.Name.RATIS).getValue(); + int port = dns.get(0).getRatisPort().getValue(); for (int i = 1; i < dns.size(); i++) { - if (dns.get(i).getPort(DatanodeDetails.Port.Name.RATIS).getValue() + if (dns.get(i).getRatisPort().getValue() != port) { return false; } @@ -398,10 +398,12 @@ private synchronized boolean checkIfDecommissionPossible(List d if (opState != NodeOperationalState.IN_SERVICE) { numDecom--; validDns.remove(dn); + LOG.warn("Cannot decommission {} because it is not IN-SERVICE", dn.getHostName()); } } catch (NodeNotFoundException ex) { numDecom--; validDns.remove(dn); + LOG.warn("Cannot decommission {} because it is not found in SCM", dn.getHostName()); } } @@ -430,9 +432,11 @@ private synchronized boolean checkIfDecommissionPossible(List d } int reqNodes = cif.getReplicationConfig().getRequiredNodes(); if ((inServiceTotal - numDecom) < reqNodes) { + int unHealthyTotal = nodeManager.getAllNodes().size() - inServiceTotal; String errorMsg = "Insufficient nodes. Tried to decommission " + dns.size() + - " nodes of which " + numDecom + " nodes were valid. Cluster has " + inServiceTotal + - " IN-SERVICE nodes, " + reqNodes + " of which are required for minimum replication. "; + " nodes out of " + inServiceTotal + " IN-SERVICE HEALTHY and " + unHealthyTotal + + " not IN-SERVICE or not HEALTHY nodes. Cannot decommission as a minimum of " + reqNodes + + " IN-SERVICE HEALTHY nodes are required to maintain replication after decommission. "; LOG.info(errorMsg + "Failing due to datanode : {}, container : {}", dn, cid); errors.add(new DatanodeAdminError("AllHosts", errorMsg)); return false; @@ -552,10 +556,12 @@ private synchronized boolean checkIfMaintenancePossible(List dn if (opState != NodeOperationalState.IN_SERVICE) { numMaintenance--; validDns.remove(dn); + LOG.warn("{} cannot enter maintenance because it is not IN-SERVICE", dn.getHostName()); } } catch (NodeNotFoundException ex) { numMaintenance--; validDns.remove(dn); + LOG.warn("{} cannot enter maintenance because it is not found in SCM", dn.getHostName()); } } @@ -594,9 +600,11 @@ private synchronized boolean checkIfMaintenancePossible(List dn minInService = maintenanceReplicaMinimum; } if ((inServiceTotal - numMaintenance) < minInService) { + int unHealthyTotal = nodeManager.getAllNodes().size() - inServiceTotal; String errorMsg = "Insufficient nodes. Tried to start maintenance for " + dns.size() + - " nodes of which " + numMaintenance + " nodes were valid. Cluster has " + inServiceTotal + - " IN-SERVICE nodes, " + minInService + " of which are required for minimum replication. "; + " nodes out of " + inServiceTotal + " IN-SERVICE HEALTHY and " + unHealthyTotal + + " not IN-SERVICE or not HEALTHY nodes. Cannot enter maintenance mode as a minimum of " + minInService + + " IN-SERVICE HEALTHY nodes are required to maintain replication after maintenance. "; LOG.info(errorMsg + "Failing due to datanode : {}, container : {}", dn, cid); errors.add(new DatanodeAdminError("AllHosts", errorMsg)); return false; diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineReportHandler.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineReportHandler.java index dcc7bdb3fba..b931f122a97 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineReportHandler.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineReportHandler.java @@ -31,6 +31,7 @@ import org.apache.hadoop.hdds.protocol.proto .StorageContainerDatanodeProtocolProtos.PipelineReportsProto; import org.apache.hadoop.hdds.scm.events.SCMEvents; +import org.apache.hadoop.hdds.scm.exceptions.SCMException; import org.apache.hadoop.hdds.scm.ha.SCMContext; import org.apache.hadoop.hdds.scm.safemode.SafeModeManager; import org.apache.hadoop.hdds.scm.server @@ -91,35 +92,45 @@ public void onMessage(PipelineReportFromDatanode pipelineReportFromDatanode, for (PipelineReport report : pipelineReport.getPipelineReportList()) { try { processPipelineReport(report, dn, publisher); - } catch (NotLeaderException ex) { - // Avoid NotLeaderException logging which happens when processing - // pipeline report on followers. } catch (PipelineNotFoundException e) { - LOGGER.error("Could not find pipeline {}", report.getPipelineID()); + handlePipelineNotFoundException(report, dn, publisher); } catch (IOException | TimeoutException e) { - LOGGER.error("Could not process pipeline report={} from dn={}.", - report, dn, e); + // Ignore NotLeaderException logging which happens when processing + // pipeline report on followers. + if (!isNotLeaderException(e)) { + LOGGER.error("Could not process pipeline report={} from dn={}.", + report, dn, e); + } } } } - protected void processPipelineReport(PipelineReport report, - DatanodeDetails dn, EventPublisher publisher) - throws IOException, TimeoutException { - PipelineID pipelineID = PipelineID.getFromProtobuf(report.getPipelineID()); - Pipeline pipeline; - try { - pipeline = pipelineManager.getPipeline(pipelineID); - } catch (PipelineNotFoundException e) { - if (scmContext.isLeader()) { - LOGGER.info("Reported pipeline {} is not found", pipelineID); - SCMCommand< ? > command = new ClosePipelineCommand(pipelineID); + private void handlePipelineNotFoundException(final PipelineReport report, + final DatanodeDetails dn, final EventPublisher publisher) { + final PipelineID pipelineID = PipelineID.getFromProtobuf(report.getPipelineID()); + LOGGER.info("Pipeline {}, reported by datanode {} is not found.", pipelineID, dn); + if (scmContext.isLeader()) { + try { + final SCMCommand command = new ClosePipelineCommand(pipelineID); command.setTerm(scmContext.getTermOfLeader()); publisher.fireEvent(SCMEvents.DATANODE_COMMAND, new CommandForDatanode<>(dn.getUuid(), command)); + } catch (NotLeaderException ex) { + // Do nothing if the leader has changed. } - return; } + } + + private static boolean isNotLeaderException(final Exception e) { + return e instanceof SCMException && ((SCMException) e).getResult().equals( + SCMException.ResultCodes.SCM_NOT_LEADER); + } + + protected void processPipelineReport(PipelineReport report, + DatanodeDetails dn, EventPublisher publisher) + throws IOException, TimeoutException { + final PipelineID pipelineID = PipelineID.getFromProtobuf(report.getPipelineID()); + final Pipeline pipeline = pipelineManager.getPipeline(pipelineID); setReportedDatanode(pipeline, dn); setPipelineLeaderId(report, pipeline, dn); diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/protocol/ScmBlockLocationProtocolServerSideTranslatorPB.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/protocol/ScmBlockLocationProtocolServerSideTranslatorPB.java index e77e2aebb31..c1431845ce1 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/protocol/ScmBlockLocationProtocolServerSideTranslatorPB.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/protocol/ScmBlockLocationProtocolServerSideTranslatorPB.java @@ -24,6 +24,7 @@ import org.apache.hadoop.hdds.annotation.InterfaceAudience; import org.apache.hadoop.hdds.client.ReplicationConfig; import org.apache.hadoop.hdds.protocol.DatanodeDetails; +import org.apache.hadoop.hdds.protocol.DatanodeDetails.Port.Name; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.protocol.proto.ScmBlockLocationProtocolProtos; import org.apache.hadoop.hdds.protocol.proto.ScmBlockLocationProtocolProtos.AllocateBlockResponse; @@ -216,7 +217,7 @@ public AllocateScmBlockResponseProto allocateScmBlock( for (AllocatedBlock block : allocatedBlocks) { builder.addBlocks(AllocateBlockResponse.newBuilder() .setContainerBlockID(block.getBlockID().getProtobuf()) - .setPipeline(block.getPipeline().getProtobufMessage(clientVersion))); + .setPipeline(block.getPipeline().getProtobufMessage(clientVersion, Name.IO_PORTS))); } return builder.build(); diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocolServerSideTranslatorPB.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocolServerSideTranslatorPB.java index 62b27f503e2..80ea82ed521 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocolServerSideTranslatorPB.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocolServerSideTranslatorPB.java @@ -117,6 +117,7 @@ import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.ResetDeletedBlockRetryCountResponseProto; import org.apache.hadoop.hdds.scm.DatanodeAdminError; import org.apache.hadoop.hdds.scm.ScmInfo; +import org.apache.hadoop.hdds.scm.container.ContainerListResult; import org.apache.hadoop.hdds.scm.container.ContainerID; import org.apache.hadoop.hdds.scm.container.ContainerInfo; import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline; @@ -865,21 +866,21 @@ public SCMListContainerResponseProto listContainer( } else if (request.hasFactor()) { factor = request.getFactor(); } - List containerList; + ContainerListResult containerListAndTotalCount; if (factor != null) { // Call from a legacy client - containerList = + containerListAndTotalCount = impl.listContainer(startContainerID, count, state, factor); } else { - containerList = - impl.listContainer(startContainerID, count, state, replicationType, - repConfig); + containerListAndTotalCount = + impl.listContainer(startContainerID, count, state, replicationType, repConfig); } SCMListContainerResponseProto.Builder builder = SCMListContainerResponseProto.newBuilder(); - for (ContainerInfo container : containerList) { + for (ContainerInfo container : containerListAndTotalCount.getContainerInfoList()) { builder.addContainers(container.getProtobuf()); } + builder.setContainerCount(containerListAndTotalCount.getTotalCount()); return builder.build(); } @@ -1009,6 +1010,7 @@ public HddsProtos.GetScmInfoResponseProto getScmInfo( .setClusterId(scmInfo.getClusterId()) .setScmId(scmInfo.getScmId()) .addAllPeerRoles(scmInfo.getRatisPeerRoles()) + .setScmRatisEnabled(scmInfo.getScmRatisEnabled()) .build(); } diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/safemode/ContainerSafeModeRule.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/safemode/ContainerSafeModeRule.java index ae645858a33..bdd7160de4c 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/safemode/ContainerSafeModeRule.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/safemode/ContainerSafeModeRule.java @@ -19,16 +19,25 @@ import java.util.List; import java.util.Map; -import java.util.Optional; +import java.util.UUID; +import java.util.Set; +import java.util.HashSet; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.atomic.AtomicLong; import java.util.stream.Collectors; +import com.google.common.collect.Sets; import org.apache.hadoop.hdds.HddsConfigKeys; +import org.apache.hadoop.hdds.client.ReplicationConfig; import org.apache.hadoop.hdds.conf.ConfigurationSource; +import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState; +import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos; +import org.apache.hadoop.hdds.scm.container.ContainerID; import org.apache.hadoop.hdds.scm.container.ContainerInfo; import org.apache.hadoop.hdds.scm.container.ContainerManager; +import org.apache.hadoop.hdds.scm.container.ContainerNotFoundException; import org.apache.hadoop.hdds.scm.events.SCMEvents; import org.apache.hadoop.hdds.scm.server.SCMDatanodeProtocolServer.NodeRegistrationContainerReport; import org.apache.hadoop.hdds.server.events.EventQueue; @@ -50,12 +59,21 @@ public class ContainerSafeModeRule extends // Required cutoff % for containers with at least 1 reported replica. private double safeModeCutoff; // Containers read from scm db (excluding containers in ALLOCATED state). - private Map containerMap; - private double maxContainer; - - private AtomicLong containerWithMinReplicas = new AtomicLong(0); + private Set ratisContainers; + private Set ecContainers; + private Map> ecContainerDNsMap; + private double ratisMaxContainer; + private double ecMaxContainer; + private AtomicLong ratisContainerWithMinReplicas = new AtomicLong(0); + private AtomicLong ecContainerWithMinReplicas = new AtomicLong(0); private final ContainerManager containerManager; + public ContainerSafeModeRule(String ruleName, EventQueue eventQueue, + ConfigurationSource conf, + ContainerManager containerManager, SCMSafeModeManager manager) { + this(ruleName, eventQueue, conf, containerManager.getContainers(), containerManager, manager); + } + public ContainerSafeModeRule(String ruleName, EventQueue eventQueue, ConfigurationSource conf, List containers, @@ -71,127 +89,268 @@ public ContainerSafeModeRule(String ruleName, EventQueue eventQueue, HddsConfigKeys.HDDS_SCM_SAFEMODE_THRESHOLD_PCT + " value should be >= 0.0 and <= 1.0"); - containerMap = new ConcurrentHashMap<>(); - containers.forEach(container -> { - // There can be containers in OPEN/CLOSING state which were never - // created by the client. We are not considering these containers for - // now. These containers can be handled by tracking pipelines. - - Optional.ofNullable(container.getState()) - .filter(state -> (state == HddsProtos.LifeCycleState.QUASI_CLOSED || - state == HddsProtos.LifeCycleState.CLOSED) - && container.getNumberOfKeys() > 0) - .ifPresent(s -> containerMap.put(container.getContainerID(), - container)); - }); - maxContainer = containerMap.size(); - long cutOff = (long) Math.ceil(maxContainer * safeModeCutoff); - getSafeModeMetrics().setNumContainerWithOneReplicaReportedThreshold(cutOff); + ratisContainers = new HashSet<>(); + ecContainers = new HashSet<>(); + ecContainerDNsMap = new ConcurrentHashMap<>(); - LOG.info("containers with one replica threshold count {}", cutOff); + initializeRule(containers); } @Override protected TypedEvent getEventType() { - return SCMEvents.NODE_REGISTRATION_CONT_REPORT; + return SCMEvents.CONTAINER_REGISTRATION_REPORT; } - @Override protected synchronized boolean validate() { - return getCurrentContainerThreshold() >= safeModeCutoff; + return (getCurrentContainerThreshold() >= safeModeCutoff) && + (getCurrentECContainerThreshold() >= safeModeCutoff); } @VisibleForTesting public synchronized double getCurrentContainerThreshold() { - if (maxContainer == 0) { + if (ratisMaxContainer == 0) { + return 1; + } + return (ratisContainerWithMinReplicas.doubleValue() / ratisMaxContainer); + } + + @VisibleForTesting + public synchronized double getCurrentECContainerThreshold() { + if (ecMaxContainer == 0) { return 1; } - return (containerWithMinReplicas.doubleValue() / maxContainer); + return (ecContainerWithMinReplicas.doubleValue() / ecMaxContainer); + } + + private synchronized double getEcMaxContainer() { + if (ecMaxContainer == 0) { + return 1; + } + return ecMaxContainer; + } + + private synchronized double getRatisMaxContainer() { + if (ratisMaxContainer == 0) { + return 1; + } + return ratisMaxContainer; } @Override protected synchronized void process( NodeRegistrationContainerReport reportsProto) { + DatanodeDetails datanodeDetails = reportsProto.getDatanodeDetails(); + UUID datanodeUUID = datanodeDetails.getUuid(); + StorageContainerDatanodeProtocolProtos.ContainerReportsProto report = reportsProto.getReport(); - reportsProto.getReport().getReportsList().forEach(c -> { - if (containerMap.containsKey(c.getContainerID())) { - if (containerMap.remove(c.getContainerID()) != null) { - containerWithMinReplicas.getAndAdd(1); - getSafeModeMetrics() - .incCurrentContainersWithOneReplicaReportedCount(); - } + report.getReportsList().forEach(c -> { + long containerID = c.getContainerID(); + + + // If it is a Ratis container. + if (ratisContainers.contains(containerID)) { + recordReportedContainer(containerID, Boolean.FALSE); + ratisContainers.remove(containerID); + } + + // If it is an EC container. + if (ecContainers.contains(containerID)) { + putInContainerDNsMap(containerID, ecContainerDNsMap, datanodeUUID); + recordReportedContainer(containerID, Boolean.TRUE); } }); if (scmInSafeMode()) { SCMSafeModeManager.getLogger().info( - "SCM in safe mode. {} % containers have at least one" - + " reported replica.", - (containerWithMinReplicas.doubleValue() / maxContainer) * 100); + "SCM in safe mode. {} % containers [Ratis] have at least one" + + " reported replica, {} % containers [EC] have at N reported replica.", + ((ratisContainerWithMinReplicas.doubleValue() / getRatisMaxContainer()) * 100), + ((ecContainerWithMinReplicas.doubleValue() / getEcMaxContainer()) * 100) + ); + } + } + + /** + * Record the reported Container. + * + * We will differentiate and count according to the type of Container. + * + * @param containerID containerID + * @param isEcContainer true, means ECContainer, false, means not ECContainer. + */ + private void recordReportedContainer(long containerID, boolean isEcContainer) { + + int uuids = 1; + if (isEcContainer && ecContainerDNsMap.containsKey(containerID)) { + uuids = ecContainerDNsMap.get(containerID).size(); + } + + int minReplica = getMinReplica(containerID); + if (uuids >= minReplica) { + if (isEcContainer) { + getSafeModeMetrics() + .incCurrentContainersWithECDataReplicaReportedCount(); + ecContainerWithMinReplicas.getAndAdd(1); + } else { + ratisContainerWithMinReplicas.getAndAdd(1); + getSafeModeMetrics() + .incCurrentContainersWithOneReplicaReportedCount(); + } + } + } + + /** + * Get the minimum replica. + * + * If it is a Ratis Contianer, the minimum copy is 1. + * If it is an EC Container, the minimum copy will be the number of Data in replicationConfig. + * + * @param pContainerID containerID + * @return MinReplica. + */ + private int getMinReplica(long pContainerID) { + + try { + ContainerID containerID = ContainerID.valueOf(pContainerID); + ContainerInfo container = containerManager.getContainer(containerID); + ReplicationConfig replicationConfig = container.getReplicationConfig(); + return replicationConfig.getMinimumNodes(); + } catch (ContainerNotFoundException e) { + LOG.error("containerId = {} not found.", pContainerID, e); + } catch (Exception e) { + LOG.error("containerId = {} not found.", pContainerID, e); } + + return 1; + } + + private void putInContainerDNsMap(long containerID, Map> containerDNsMap, + UUID datanodeUUID) { + containerDNsMap.computeIfAbsent(containerID, key -> Sets.newHashSet()); + containerDNsMap.get(containerID).add(datanodeUUID); } @Override protected synchronized void cleanup() { - containerMap.clear(); + ratisContainers.clear(); + ecContainers.clear(); + ecContainerDNsMap.clear(); } @Override public String getStatusText() { - List sampleContainers = containerMap.keySet() - .stream() - .limit(SAMPLE_CONTAINER_DISPLAY_LIMIT) - .collect(Collectors.toList()); - String status = String.format("%% of containers with at least one reported" - + " replica (=%1.2f) >= safeModeCutoff (=%1.2f)", + // ratis container + String status = String.format( + "%1.2f%% of [Ratis] Containers(%s / %s) with at least one reported replica (=%1.2f) >= " + + "safeModeCutoff (=%1.2f);", + (ratisContainerWithMinReplicas.doubleValue() / getRatisMaxContainer()) * 100, + ratisContainerWithMinReplicas, (long) getRatisMaxContainer(), getCurrentContainerThreshold(), this.safeModeCutoff); - if (!sampleContainers.isEmpty()) { + Set sampleRatisContainers = ratisContainers.stream(). + limit(SAMPLE_CONTAINER_DISPLAY_LIMIT). + collect(Collectors.toSet()); + + if (!sampleRatisContainers.isEmpty()) { String sampleContainerText = - "Sample containers not satisfying the criteria : " + sampleContainers; + "Sample Ratis Containers not satisfying the criteria : " + sampleRatisContainers + ";"; status = status.concat("\n").concat(sampleContainerText); } + // ec container + String ecStatus = String.format( + "%1.2f%% of [EC] Containers(%s / %s) with at least N reported replica (=%1.2f) >= " + + "safeModeCutoff (=%1.2f);", + (ecContainerWithMinReplicas.doubleValue() / getEcMaxContainer()) * 100, + ecContainerWithMinReplicas, (long) getEcMaxContainer(), + getCurrentECContainerThreshold(), this.safeModeCutoff); + status = status.concat("\n").concat(ecStatus); + + Set sampleEcContainers = ecContainerDNsMap.entrySet().stream(). + filter(entry -> { + Long containerId = entry.getKey(); + int minReplica = getMinReplica(containerId); + Set allReplicas = entry.getValue(); + if (allReplicas.size() >= minReplica) { + return false; + } + return true; + }). + map(Map.Entry::getKey). + limit(SAMPLE_CONTAINER_DISPLAY_LIMIT). + collect(Collectors.toSet()); + + if (!sampleEcContainers.isEmpty()) { + String sampleECContainerText = + "Sample EC Containers not satisfying the criteria : " + sampleEcContainers + ";"; + status = status.concat("\n").concat(sampleECContainerText); + } + return status; } @Override public synchronized void refresh(boolean forceRefresh) { + List containers = containerManager.getContainers(); if (forceRefresh) { - reInitializeRule(); + initializeRule(containers); } else { if (!validate()) { - reInitializeRule(); + initializeRule(containers); } } } - private void reInitializeRule() { - containerMap.clear(); - containerManager.getContainers().forEach(container -> { + private boolean checkContainerState(LifeCycleState state) { + if (state == LifeCycleState.QUASI_CLOSED || state == LifeCycleState.CLOSED) { + return true; + } + return false; + } + + private void initializeRule(List containers) { + + // Clean up the related data in the map. + ratisContainers.clear(); + ecContainers.clear(); + + // Iterate through the container list to + // get the minimum replica count for each container. + containers.forEach(container -> { // There can be containers in OPEN/CLOSING state which were never // created by the client. We are not considering these containers for // now. These containers can be handled by tracking pipelines. - Optional.ofNullable(container.getState()) - .filter(state -> (state == HddsProtos.LifeCycleState.QUASI_CLOSED || - state == HddsProtos.LifeCycleState.CLOSED) - && container.getNumberOfKeys() > 0) - .ifPresent(s -> containerMap.put(container.getContainerID(), - container)); + LifeCycleState containerState = container.getState(); + HddsProtos.ReplicationType replicationType = container.getReplicationType(); + + if (checkContainerState(containerState) && container.getNumberOfKeys() > 0) { + // If it's of type Ratis + if (replicationType.equals(HddsProtos.ReplicationType.RATIS)) { + ratisContainers.add(container.getContainerID()); + } + + // If it's of type EC + if (replicationType.equals(HddsProtos.ReplicationType.EC)) { + ecContainers.add(container.getContainerID()); + } + } }); - maxContainer = containerMap.size(); - long cutOff = (long) Math.ceil(maxContainer * safeModeCutoff); + ratisMaxContainer = ratisContainers.size(); + ecMaxContainer = ecContainers.size(); - LOG.info("Refreshed one replica container threshold {}, " + - "currentThreshold {}", cutOff, containerWithMinReplicas.get()); - getSafeModeMetrics() - .setNumContainerWithOneReplicaReportedThreshold(cutOff); - } + long ratisCutOff = (long) Math.ceil(ratisMaxContainer * safeModeCutoff); + long ecCutOff = (long) Math.ceil(ecMaxContainer * safeModeCutoff); + + getSafeModeMetrics().setNumContainerWithOneReplicaReportedThreshold(ratisCutOff); + getSafeModeMetrics().setNumContainerWithECDataReplicaReportedThreshold(ecCutOff); + LOG.info("Refreshed Containers with one replica threshold count {}, " + + "with ec n replica threshold count {}.", ratisCutOff, ecCutOff); + } } diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/safemode/SCMSafeModeManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/safemode/SCMSafeModeManager.java index a5ecdb23425..78ce994af73 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/safemode/SCMSafeModeManager.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/safemode/SCMSafeModeManager.java @@ -23,6 +23,7 @@ import java.util.Map; import java.util.Set; import java.util.concurrent.atomic.AtomicBoolean; +import java.util.stream.Collectors; import org.apache.commons.lang3.tuple.Pair; import org.apache.hadoop.hdds.HddsConfigKeys; @@ -90,7 +91,7 @@ public class SCMSafeModeManager implements SafeModeManager { private AtomicBoolean preCheckComplete = new AtomicBoolean(false); private AtomicBoolean forceExitSafeMode = new AtomicBoolean(false); - private Map exitRules = new HashMap(1); + private Map exitRules = new HashMap<>(1); private Set preCheckRules = new HashSet<>(1); private ConfigurationSource config; private static final String CONT_EXIT_RULE = "ContainerSafeModeRule"; @@ -110,6 +111,8 @@ public class SCMSafeModeManager implements SafeModeManager { private final SafeModeMetrics safeModeMetrics; + + // TODO: Remove allContainers argument. (HDDS-11795) public SCMSafeModeManager(ConfigurationSource conf, List allContainers, ContainerManager containerManager, PipelineManager pipelineManager, @@ -126,30 +129,17 @@ public SCMSafeModeManager(ConfigurationSource conf, if (isSafeModeEnabled) { this.safeModeMetrics = SafeModeMetrics.create(); - ContainerSafeModeRule containerSafeModeRule = - new ContainerSafeModeRule(CONT_EXIT_RULE, eventQueue, config, - allContainers, containerManager, this); - DataNodeSafeModeRule dataNodeSafeModeRule = - new DataNodeSafeModeRule(DN_EXIT_RULE, eventQueue, config, this); - exitRules.put(CONT_EXIT_RULE, containerSafeModeRule); - exitRules.put(DN_EXIT_RULE, dataNodeSafeModeRule); - preCheckRules.add(DN_EXIT_RULE); - if (conf.getBoolean( - HddsConfigKeys.HDDS_SCM_SAFEMODE_PIPELINE_AVAILABILITY_CHECK, - HddsConfigKeys.HDDS_SCM_SAFEMODE_PIPELINE_AVAILABILITY_CHECK_DEFAULT) - && pipelineManager != null) { - HealthyPipelineSafeModeRule healthyPipelineSafeModeRule = - new HealthyPipelineSafeModeRule(HEALTHY_PIPELINE_EXIT_RULE, - eventQueue, pipelineManager, - this, config, scmContext); - OneReplicaPipelineSafeModeRule oneReplicaPipelineSafeModeRule = - new OneReplicaPipelineSafeModeRule( - ATLEAST_ONE_DATANODE_REPORTED_PIPELINE_EXIT_RULE, eventQueue, - pipelineManager, this, conf); - exitRules.put(HEALTHY_PIPELINE_EXIT_RULE, healthyPipelineSafeModeRule); - exitRules.put(ATLEAST_ONE_DATANODE_REPORTED_PIPELINE_EXIT_RULE, - oneReplicaPipelineSafeModeRule); - } + + // TODO: Remove the cyclic ("this") dependency (HDDS-11797) + SafeModeRuleFactory.initialize(config, scmContext, eventQueue, + this, pipelineManager, containerManager); + SafeModeRuleFactory factory = SafeModeRuleFactory.getInstance(); + + exitRules = factory.getSafeModeRules().stream().collect( + Collectors.toMap(SafeModeExitRule::getRuleName, rule -> rule)); + + preCheckRules = factory.getPreCheckRules().stream() + .map(SafeModeExitRule::getRuleName).collect(Collectors.toSet()); } else { this.safeModeMetrics = null; exitSafeMode(eventQueue, true); @@ -341,6 +331,17 @@ public double getCurrentContainerThreshold() { .getCurrentContainerThreshold(); } + @VisibleForTesting + public double getCurrentECContainerThreshold() { + return ((ContainerSafeModeRule) exitRules.get(CONT_EXIT_RULE)) + .getCurrentECContainerThreshold(); + } + + @VisibleForTesting + public ContainerSafeModeRule getContainerSafeModeRule() { + return (ContainerSafeModeRule) exitRules.get(CONT_EXIT_RULE); + } + @VisibleForTesting public HealthyPipelineSafeModeRule getHealthyPipelineSafeModeRule() { return (HealthyPipelineSafeModeRule) diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/safemode/SafeModeMetrics.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/safemode/SafeModeMetrics.java index 02bc10ba6e4..44c77ac3de8 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/safemode/SafeModeMetrics.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/safemode/SafeModeMetrics.java @@ -36,8 +36,12 @@ public class SafeModeMetrics { // These all values will be set to some values when safemode is enabled. private @Metric MutableGaugeLong numContainerWithOneReplicaReportedThreshold; + private @Metric MutableGaugeLong + numContainerWithECDataReplicaReportedThreshold; private @Metric MutableCounterLong currentContainersWithOneReplicaReportedCount; + private @Metric MutableCounterLong + currentContainersWithECDataReplicaReportedCount; // When hdds.scm.safemode.pipeline-availability.check is set then only // below metrics will have some values, otherwise they will be zero. @@ -75,10 +79,18 @@ public void setNumContainerWithOneReplicaReportedThreshold(long val) { this.numContainerWithOneReplicaReportedThreshold.set(val); } + public void setNumContainerWithECDataReplicaReportedThreshold(long val) { + this.numContainerWithECDataReplicaReportedThreshold.set(val); + } + public void incCurrentContainersWithOneReplicaReportedCount() { this.currentContainersWithOneReplicaReportedCount.incr(); } + public void incCurrentContainersWithECDataReplicaReportedCount() { + this.currentContainersWithECDataReplicaReportedCount.incr(); + } + MutableGaugeLong getNumHealthyPipelinesThreshold() { return numHealthyPipelinesThreshold; } @@ -100,6 +112,10 @@ MutableGaugeLong getNumContainerWithOneReplicaReportedThreshold() { return numContainerWithOneReplicaReportedThreshold; } + MutableGaugeLong getNumContainerWithECDataReplicaReportedThreshold() { + return numContainerWithECDataReplicaReportedThreshold; + } + MutableCounterLong getCurrentContainersWithOneReplicaReportedCount() { return currentContainersWithOneReplicaReportedCount; } diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/safemode/SafeModeRuleFactory.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/safemode/SafeModeRuleFactory.java new file mode 100644 index 00000000000..8e75f51b962 --- /dev/null +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/safemode/SafeModeRuleFactory.java @@ -0,0 +1,131 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements.  See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership.  The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License.  You may obtain a copy of the License at + * + *      http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hdds.scm.safemode; + + +import org.apache.hadoop.hdds.HddsConfigKeys; +import org.apache.hadoop.hdds.conf.ConfigurationSource; +import org.apache.hadoop.hdds.scm.container.ContainerManager; +import org.apache.hadoop.hdds.scm.ha.SCMContext; +import org.apache.hadoop.hdds.scm.pipeline.PipelineManager; +import org.apache.hadoop.hdds.server.events.EventQueue; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.util.ArrayList; +import java.util.List; + +/** + * Factory to create SafeMode rules. + */ +public final class SafeModeRuleFactory { + + + private static final Logger LOG = LoggerFactory.getLogger(SafeModeRuleFactory.class); + + // TODO: Move the rule names to respective rules. (HDDS-11798) + private static final String CONT_EXIT_RULE = "ContainerSafeModeRule"; + private static final String DN_EXIT_RULE = "DataNodeSafeModeRule"; + private static final String HEALTHY_PIPELINE_EXIT_RULE = + "HealthyPipelineSafeModeRule"; + private static final String ATLEAST_ONE_DATANODE_REPORTED_PIPELINE_EXIT_RULE = + "AtleastOneDatanodeReportedRule"; + + private final ConfigurationSource config; + private final SCMContext scmContext; + private final EventQueue eventQueue; + + // TODO: Remove dependency on safeModeManager (HDDS-11797) + private final SCMSafeModeManager safeModeManager; + private final PipelineManager pipelineManager; + private final ContainerManager containerManager; + + private final List> safeModeRules; + private final List> preCheckRules; + + private static SafeModeRuleFactory instance; + + private SafeModeRuleFactory(final ConfigurationSource config, + final SCMContext scmContext, + final EventQueue eventQueue, + final SCMSafeModeManager safeModeManager, + final PipelineManager pipelineManager, + final ContainerManager containerManager) { + this.config = config; + this.scmContext = scmContext; + this.eventQueue = eventQueue; + this.safeModeManager = safeModeManager; + this.pipelineManager = pipelineManager; + this.containerManager = containerManager; + this.safeModeRules = new ArrayList<>(); + this.preCheckRules = new ArrayList<>(); + loadRules(); + } + + private void loadRules() { + // TODO: Use annotation to load the rules. (HDDS-11730) + safeModeRules.add(new ContainerSafeModeRule(CONT_EXIT_RULE, eventQueue, config, + containerManager, safeModeManager)); + SafeModeExitRule dnRule = new DataNodeSafeModeRule(DN_EXIT_RULE, eventQueue, config, safeModeManager); + safeModeRules.add(dnRule); + preCheckRules.add(dnRule); + + // TODO: Move isRuleEnabled check to the Rule implementation. (HDDS-11799) + if (config.getBoolean( + HddsConfigKeys.HDDS_SCM_SAFEMODE_PIPELINE_AVAILABILITY_CHECK, + HddsConfigKeys.HDDS_SCM_SAFEMODE_PIPELINE_AVAILABILITY_CHECK_DEFAULT) + && pipelineManager != null) { + + safeModeRules.add(new HealthyPipelineSafeModeRule(HEALTHY_PIPELINE_EXIT_RULE, + eventQueue, pipelineManager, safeModeManager, config, scmContext)); + safeModeRules.add(new OneReplicaPipelineSafeModeRule( + ATLEAST_ONE_DATANODE_REPORTED_PIPELINE_EXIT_RULE, eventQueue, + pipelineManager, safeModeManager, config)); + } + + } + + public static synchronized SafeModeRuleFactory getInstance() { + if (instance != null) { + return instance; + } + throw new IllegalStateException("SafeModeRuleFactory not initialized," + + " call initialize method before getInstance."); + } + + // TODO: Refactor and reduce the arguments. (HDDS-11800) + public static synchronized void initialize( + final ConfigurationSource config, + final SCMContext scmContext, + final EventQueue eventQueue, + final SCMSafeModeManager safeModeManager, + final PipelineManager pipelineManager, + final ContainerManager containerManager) { + instance = new SafeModeRuleFactory(config, scmContext, eventQueue, + safeModeManager, pipelineManager, containerManager); + } + + public List> getSafeModeRules() { + return safeModeRules; + } + + public List> getPreCheckRules() { + return preCheckRules; + } +} diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/security/SecretKeyManagerService.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/security/SecretKeyManagerService.java index 6b77350cc8c..50c7401dbb0 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/security/SecretKeyManagerService.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/security/SecretKeyManagerService.java @@ -158,7 +158,6 @@ public void stop() { } public static boolean isSecretKeyEnable(SecurityConfig conf) { - return conf.isSecurityEnabled() && - (conf.isBlockTokenEnabled() || conf.isContainerTokenEnabled()); + return conf.isSecurityEnabled(); } } diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMClientProtocolServer.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMClientProtocolServer.java index 3bf8d9c55ca..e8796716fd9 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMClientProtocolServer.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMClientProtocolServer.java @@ -37,10 +37,8 @@ import org.apache.hadoop.hdds.protocol.proto.ReconfigureProtocolProtos.ReconfigureProtocolService; import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos; import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.ContainerBalancerStatusInfoResponseProto; -import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.ContainerBalancerTaskIterationStatusInfo; import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.DecommissionScmResponseProto; import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.DecommissionScmResponseProto.Builder; -import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.NodeTransferInfo; import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.StartContainerBalancerResponseProto; import org.apache.hadoop.hdds.protocolPB.ReconfigureProtocolPB; import org.apache.hadoop.hdds.protocolPB.ReconfigureProtocolServerSideTranslatorPB; @@ -48,6 +46,7 @@ import org.apache.hadoop.hdds.scm.DatanodeAdminError; import org.apache.hadoop.hdds.scm.FetchMetrics; import org.apache.hadoop.hdds.scm.ScmInfo; +import org.apache.hadoop.hdds.scm.container.ContainerListResult; import org.apache.hadoop.hdds.scm.container.ContainerID; import org.apache.hadoop.hdds.scm.container.ContainerInfo; import org.apache.hadoop.hdds.scm.container.ContainerNotFoundException; @@ -109,6 +108,7 @@ import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; +import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Optional; @@ -421,11 +421,12 @@ private boolean hasRequiredReplicas(ContainerInfo contInfo) { * @param startContainerID start containerID. * @param count count must be {@literal >} 0. * - * @return a list of pipeline. + * @return a list of containers capped by max count allowed + * in "ozone.scm.container.list.max.count" and total number of containers. * @throws IOException */ @Override - public List listContainer(long startContainerID, + public ContainerListResult listContainer(long startContainerID, int count) throws IOException { return listContainer(startContainerID, count, null, null, null); } @@ -437,11 +438,12 @@ public List listContainer(long startContainerID, * @param count count must be {@literal >} 0. * @param state Container with this state will be returned. * - * @return a list of pipeline. + * @return a list of containers capped by max count allowed + * in "ozone.scm.container.list.max.count" and total number of containers. * @throws IOException */ @Override - public List listContainer(long startContainerID, + public ContainerListResult listContainer(long startContainerID, int count, HddsProtos.LifeCycleState state) throws IOException { return listContainer(startContainerID, count, state, null, null); } @@ -453,53 +455,36 @@ public List listContainer(long startContainerID, * @param count count must be {@literal >} 0. * @param state Container with this state will be returned. * @param factor Container factor. - * @return a list of pipeline. + * @return a list of containers capped by max count allowed + * in "ozone.scm.container.list.max.count" and total number of containers. * @throws IOException */ @Override @Deprecated - public List listContainer(long startContainerID, + public ContainerListResult listContainer(long startContainerID, int count, HddsProtos.LifeCycleState state, HddsProtos.ReplicationFactor factor) throws IOException { + return listContainerInternal(startContainerID, count, state, factor, null, null); + } + + private ContainerListResult listContainerInternal(long startContainerID, int count, + HddsProtos.LifeCycleState state, + HddsProtos.ReplicationFactor factor, + HddsProtos.ReplicationType replicationType, + ReplicationConfig repConfig) throws IOException { boolean auditSuccess = true; - Map auditMap = Maps.newHashMap(); - auditMap.put("startContainerID", String.valueOf(startContainerID)); - auditMap.put("count", String.valueOf(count)); - if (state != null) { - auditMap.put("state", state.name()); - } - if (factor != null) { - auditMap.put("factor", factor.name()); - } + Map auditMap = buildAuditMap(startContainerID, count, state, factor, replicationType, repConfig); + try { - final ContainerID containerId = ContainerID.valueOf(startContainerID); - if (state != null) { - if (factor != null) { - return scm.getContainerManager().getContainers(state).stream() - .filter(info -> info.containerID().getId() >= startContainerID) - //Filtering EC replication type as EC will not have factor. - .filter(info -> info - .getReplicationType() != HddsProtos.ReplicationType.EC) - .filter(info -> (info.getReplicationFactor() == factor)) - .sorted().limit(count).collect(Collectors.toList()); - } else { - return scm.getContainerManager().getContainers(state).stream() - .filter(info -> info.containerID().getId() >= startContainerID) - .sorted().limit(count).collect(Collectors.toList()); - } - } else { - if (factor != null) { - return scm.getContainerManager().getContainers().stream() - .filter(info -> info.containerID().getId() >= startContainerID) - //Filtering EC replication type as EC will not have factor. - .filter(info -> info - .getReplicationType() != HddsProtos.ReplicationType.EC) - .filter(info -> info.getReplicationFactor() == factor) - .sorted().limit(count).collect(Collectors.toList()); - } else { - return scm.getContainerManager().getContainers(containerId, count); - } - } + Stream containerStream = + buildContainerStream(factor, replicationType, repConfig, getBaseContainerStream(state)); + List containerInfos = + containerStream.filter(info -> info.containerID().getId() >= startContainerID) + .sorted().collect(Collectors.toList()); + List limitedContainers = + containerInfos.stream().limit(count).collect(Collectors.toList()); + long totalCount = (long) containerInfos.size(); + return new ContainerListResult(limitedContainers, totalCount); } catch (Exception ex) { auditSuccess = false; AUDIT.logReadFailure( @@ -513,74 +498,74 @@ public List listContainer(long startContainerID, } } - /** - * Lists a range of containers and get their info. - * - * @param startContainerID start containerID. - * @param count count must be {@literal >} 0. - * @param state Container with this state will be returned. - * @param repConfig Replication Config for the container. - * @return a list of pipeline. - * @throws IOException - */ - @Override - public List listContainer(long startContainerID, - int count, HddsProtos.LifeCycleState state, + private Stream buildContainerStream(HddsProtos.ReplicationFactor factor, HddsProtos.ReplicationType replicationType, - ReplicationConfig repConfig) throws IOException { - boolean auditSuccess = true; - Map auditMap = Maps.newHashMap(); + ReplicationConfig repConfig, + Stream containerStream) { + if (factor != null) { + containerStream = containerStream.filter(info -> info.getReplicationType() != HddsProtos.ReplicationType.EC) + .filter(info -> info.getReplicationFactor() == factor); + } else if (repConfig != null) { + // If we have repConfig filter by it, as it includes repType too. + // Otherwise, we may have a filter just for repType, eg all EC containers + // without filtering on their replication scheme + containerStream = containerStream + .filter(info -> info.getReplicationConfig().equals(repConfig)); + } else if (replicationType != null) { + containerStream = containerStream.filter(info -> info.getReplicationType() == replicationType); + } + return containerStream; + } + + private Stream getBaseContainerStream(HddsProtos.LifeCycleState state) { + if (state != null) { + return scm.getContainerManager().getContainers(state).stream(); + } else { + return scm.getContainerManager().getContainers().stream(); + } + } + + private Map buildAuditMap(long startContainerID, int count, + HddsProtos.LifeCycleState state, + HddsProtos.ReplicationFactor factor, + HddsProtos.ReplicationType replicationType, + ReplicationConfig repConfig) { + Map auditMap = new HashMap<>(); auditMap.put("startContainerID", String.valueOf(startContainerID)); auditMap.put("count", String.valueOf(count)); if (state != null) { auditMap.put("state", state.name()); } + if (factor != null) { + auditMap.put("factor", factor.name()); + } if (replicationType != null) { auditMap.put("replicationType", replicationType.toString()); } if (repConfig != null) { auditMap.put("replicationConfig", repConfig.toString()); } - try { - final ContainerID containerId = ContainerID.valueOf(startContainerID); - if (state == null && replicationType == null && repConfig == null) { - // Not filters, so just return everything - return scm.getContainerManager().getContainers(containerId, count); - } - List containerList; - if (state != null) { - containerList = scm.getContainerManager().getContainers(state); - } else { - containerList = scm.getContainerManager().getContainers(); - } + return auditMap; + } - Stream containerStream = containerList.stream() - .filter(info -> info.containerID().getId() >= startContainerID); - // If we have repConfig filter by it, as it includes repType too. - // Otherwise, we may have a filter just for repType, eg all EC containers - // without filtering on their replication scheme - if (repConfig != null) { - containerStream = containerStream - .filter(info -> info.getReplicationConfig().equals(repConfig)); - } else if (replicationType != null) { - containerStream = containerStream - .filter(info -> info.getReplicationType() == replicationType); - } - return containerStream.sorted() - .limit(count) - .collect(Collectors.toList()); - } catch (Exception ex) { - auditSuccess = false; - AUDIT.logReadFailure( - buildAuditMessageForFailure(SCMAction.LIST_CONTAINER, auditMap, ex)); - throw ex; - } finally { - if (auditSuccess) { - AUDIT.logReadSuccess( - buildAuditMessageForSuccess(SCMAction.LIST_CONTAINER, auditMap)); - } - } + /** + * Lists a range of containers and get their info. + * + * @param startContainerID start containerID. + * @param count count must be {@literal >} 0. + * @param state Container with this state will be returned. + * @param repConfig Replication Config for the container. + * @return a list of containers capped by max count allowed + * in "ozone.scm.container.list.max.count" and total number of containers. + * @throws IOException + */ + @Override + public ContainerListResult listContainer(long startContainerID, + int count, HddsProtos.LifeCycleState state, + HddsProtos.ReplicationType replicationType, + ReplicationConfig repConfig) throws IOException { + return listContainerInternal(startContainerID, count, state, null, replicationType, repConfig); } @Override @@ -841,6 +826,7 @@ public ScmInfo getScmInfo() { if (scm.getScmHAManager().getRatisServer() != null) { builder.setRatisPeerRoles( scm.getScmHAManager().getRatisServer().getRatisRoles()); + builder.setScmRatisEnabled(true); } else { // In case, there is no ratis, there is no ratis role. // This will just print the hostname with ratis port as the default @@ -848,6 +834,7 @@ public ScmInfo getScmInfo() { String address = scm.getSCMHANodeDetails().getLocalNodeDetails() .getRatisHostPortStr(); builder.setRatisPeerRoles(Arrays.asList(address)); + builder.setScmRatisEnabled(false); } return builder.build(); } catch (Exception ex) { @@ -1230,48 +1217,7 @@ public ContainerBalancerStatusInfoResponseProto getContainerBalancerStatusInfo() return ContainerBalancerStatusInfoResponseProto .newBuilder() .setIsRunning(true) - .setContainerBalancerStatusInfo(StorageContainerLocationProtocolProtos.ContainerBalancerStatusInfo - .newBuilder() - .setStartedAt(balancerStatusInfo.getStartedAt().toEpochSecond()) - .setConfiguration(balancerStatusInfo.getConfiguration()) - .addAllIterationsStatusInfo( - balancerStatusInfo.getIterationsStatusInfo() - .stream() - .map( - info -> ContainerBalancerTaskIterationStatusInfo.newBuilder() - .setIterationNumber(info.getIterationNumber()) - .setIterationResult(Optional.ofNullable(info.getIterationResult()).orElse("")) - .setSizeScheduledForMoveGB(info.getSizeScheduledForMoveGB()) - .setDataSizeMovedGB(info.getDataSizeMovedGB()) - .setContainerMovesScheduled(info.getContainerMovesScheduled()) - .setContainerMovesCompleted(info.getContainerMovesCompleted()) - .setContainerMovesFailed(info.getContainerMovesFailed()) - .setContainerMovesTimeout(info.getContainerMovesTimeout()) - .addAllSizeEnteringNodesGB( - info.getSizeEnteringNodesGB().entrySet() - .stream() - .map(entry -> NodeTransferInfo.newBuilder() - .setUuid(entry.getKey().toString()) - .setDataVolumeGB(entry.getValue()) - .build() - ) - .collect(Collectors.toList()) - ) - .addAllSizeLeavingNodesGB( - info.getSizeLeavingNodesGB().entrySet() - .stream() - .map(entry -> NodeTransferInfo.newBuilder() - .setUuid(entry.getKey().toString()) - .setDataVolumeGB(entry.getValue()) - .build() - ) - .collect(Collectors.toList()) - ) - .build() - ) - .collect(Collectors.toList()) - ) - ) + .setContainerBalancerStatusInfo(balancerStatusInfo.toProto()) .build(); } } diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMDatanodeHeartbeatDispatcher.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMDatanodeHeartbeatDispatcher.java index 6f5429a853b..5a4dc505d84 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMDatanodeHeartbeatDispatcher.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMDatanodeHeartbeatDispatcher.java @@ -306,12 +306,20 @@ public static class ContainerReportFromDatanode extends ReportFromDatanode implements ContainerReport, IEventInfo { private long createTime = Time.monotonicNow(); + // Used to identify whether container reporting is from a registration. + private boolean isRegister = false; public ContainerReportFromDatanode(DatanodeDetails datanodeDetails, ContainerReportsProto report) { super(datanodeDetails, report); } + public ContainerReportFromDatanode(DatanodeDetails datanodeDetails, + ContainerReportsProto report, boolean isRegister) { + super(datanodeDetails, report); + this.isRegister = isRegister; + } + @Override public boolean equals(Object o) { return this == o; @@ -331,6 +339,10 @@ public long getCreateTime() { return createTime; } + public boolean isRegister() { + return isRegister; + } + @Override public String getEventId() { return getDatanodeDetails().toString() + ", {type: " + getType() diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMDatanodeProtocolServer.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMDatanodeProtocolServer.java index 7c6f0fbbddf..b230e3c12f7 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMDatanodeProtocolServer.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMDatanodeProtocolServer.java @@ -253,7 +253,7 @@ public SCMRegisteredResponseProto register( == SCMRegisteredResponseProto.ErrorCode.success) { eventPublisher.fireEvent(CONTAINER_REPORT, new SCMDatanodeHeartbeatDispatcher.ContainerReportFromDatanode( - datanodeDetails, containerReportsProto)); + datanodeDetails, containerReportsProto, true)); eventPublisher.fireEvent(SCMEvents.NODE_REGISTRATION_CONT_REPORT, new NodeRegistrationContainerReport(datanodeDetails, containerReportsProto)); diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMMXBean.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMMXBean.java index 75a5193116c..f54ec30985b 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMMXBean.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMMXBean.java @@ -84,4 +84,11 @@ public interface SCMMXBean extends ServiceRuntimeInfo { String getRatisLogDirectory(); String getRocksDbDirectory(); + + /** + * Gets the SCM hostname. + * + * @return the SCM hostname for the datanode. + */ + String getHostname(); } diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java index 8f7a7c2f9f1..a53a568eaf0 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java @@ -333,6 +333,8 @@ public final class StorageContainerManager extends ServiceRuntimeInfoImpl private Clock systemClock; private DNSToSwitchMapping dnsToSwitchMapping; + private String scmHostName; + /** * Creates a new StorageContainerManager. Configuration will be * updated with information on the actual listening addresses used @@ -457,6 +459,7 @@ private StorageContainerManager(OzoneConfiguration conf, // Emit initial safe mode status, as now handlers are registered. scmSafeModeManager.emitSafeModeStatus(); + scmHostName = HddsUtils.getHostName(conf); registerMXBean(); registerMetricsSource(this); @@ -1600,8 +1603,9 @@ public void start() throws IOException { setStartTime(); - // At this point leader is not known - scmHAMetricsUpdate(null); + RaftPeerId leaderId = SCMHAUtils.isSCMHAEnabled(configuration) + ? getScmHAManager().getRatisServer().getLeaderId() : null; + scmHAMetricsUpdate(Objects.toString(leaderId, null)); if (scmCertificateClient != null) { // In case root CA certificate is rotated during this SCM is offline @@ -2068,6 +2072,10 @@ public StatefulServiceStateManager getStatefulServiceStateManager() { return statefulServiceStateManager; } + @Override + public String getNamespace() { + return scmHANodeDetails.getLocalNodeDetails().getServiceId(); + } /** * Get the safe mode status of all rules. * @@ -2223,6 +2231,11 @@ public String getRocksDbDirectory() { return String.valueOf(ServerUtils.getScmDbDir(configuration)); } + @Override + public String getHostname() { + return scmHostName; + } + public Collection getScmAdminUsernames() { return scmAdmins.getAdminUsernames(); } @@ -2299,7 +2312,6 @@ public void scmHAMetricsUpdate(String leaderId) { // unregister, in case metrics already exist // so that the metric tags will get updated. SCMHAMetrics.unRegister(); - scmHAMetrics = SCMHAMetrics.create(getScmId(), leaderId); } diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/upgrade/FinalizationManagerImpl.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/upgrade/FinalizationManagerImpl.java index a3ea5189c3d..520a550e293 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/upgrade/FinalizationManagerImpl.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/upgrade/FinalizationManagerImpl.java @@ -183,7 +183,6 @@ public void onLeaderReady() { /** * Builds a {@link FinalizationManagerImpl}. */ - @SuppressWarnings("checkstyle:hiddenfield") public static class Builder { private OzoneConfiguration conf; private HDDSLayoutVersionManager versionManager; @@ -196,14 +195,14 @@ public Builder() { executor = new DefaultUpgradeFinalizationExecutor<>(); } - public Builder setConfiguration(OzoneConfiguration conf) { - this.conf = conf; + public Builder setConfiguration(OzoneConfiguration configuration) { + this.conf = configuration; return this; } public Builder setLayoutVersionManager( - HDDSLayoutVersionManager versionManager) { - this.versionManager = versionManager; + HDDSLayoutVersionManager layoutVersionManager) { + this.versionManager = layoutVersionManager; return this; } @@ -212,8 +211,8 @@ public Builder setStorage(SCMStorageConfig storage) { return this; } - public Builder setHAManager(SCMHAManager scmHAManager) { - this.scmHAManager = scmHAManager; + public Builder setHAManager(SCMHAManager haManager) { + this.scmHAManager = haManager; return this; } @@ -224,8 +223,8 @@ public Builder setFinalizationStore( } public Builder setFinalizationExecutor( - UpgradeFinalizationExecutor executor) { - this.executor = executor; + UpgradeFinalizationExecutor finalizationExecutor) { + this.executor = finalizationExecutor; return this; } diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/upgrade/SCMUpgradeFinalizationContext.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/upgrade/SCMUpgradeFinalizationContext.java index 95c85a4c744..d166f8774f6 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/upgrade/SCMUpgradeFinalizationContext.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/upgrade/SCMUpgradeFinalizationContext.java @@ -80,7 +80,6 @@ public SCMStorageConfig getStorage() { /** * Builds an {@link SCMUpgradeFinalizationContext}. */ - @SuppressWarnings("checkstyle:hiddenfield") public static final class Builder { private PipelineManager pipelineManager; private NodeManager nodeManager; @@ -120,13 +119,13 @@ public Builder setStorage(SCMStorageConfig storage) { } public Builder setLayoutVersionManager( - HDDSLayoutVersionManager versionManager) { - this.versionManager = versionManager; + HDDSLayoutVersionManager layoutVersionManager) { + this.versionManager = layoutVersionManager; return this; } - public Builder setConfiguration(OzoneConfiguration conf) { - this.conf = conf; + public Builder setConfiguration(OzoneConfiguration configuration) { + this.conf = configuration; return this; } diff --git a/hadoop-hdds/server-scm/src/main/resources/webapps/scm/scm-overview.html b/hadoop-hdds/server-scm/src/main/resources/webapps/scm/scm-overview.html index 2748716e67f..7bfe405850e 100644 --- a/hadoop-hdds/server-scm/src/main/resources/webapps/scm/scm-overview.html +++ b/hadoop-hdds/server-scm/src/main/resources/webapps/scm/scm-overview.html @@ -387,7 +387,7 @@

    Safemode rules statuses

    - + diff --git a/hadoop-hdds/server-scm/src/main/resources/webapps/scm/scm.js b/hadoop-hdds/server-scm/src/main/resources/webapps/scm/scm.js index fc216c06862..eca79852e43 100644 --- a/hadoop-hdds/server-scm/src/main/resources/webapps/scm/scm.js +++ b/hadoop-hdds/server-scm/src/main/resources/webapps/scm/scm.js @@ -24,7 +24,7 @@ require: { overview: "^overview" }, - controller: function ($http,$scope) { + controller: function ($http,$scope,$sce) { var ctrl = this; $scope.reverse = false; $scope.columnName = "hostname"; @@ -140,6 +140,14 @@ $scope.lastIndex = Math.ceil(nodeStatusCopy.length / $scope.RecordsToDisplay); $scope.nodeStatus = nodeStatusCopy.slice(0, $scope.RecordsToDisplay); + $scope.formatValue = function(value) { + if (value && value.includes(';')) { + return $sce.trustAsHtml(value.replace('/;/g', '
    ')); + } else { + return $sce.trustAsHtml(value); + } + }; + ctrl.nodemanagermetrics.NodeStatistics.forEach(({key, value}) => { if(key == "Min") { $scope.statistics.nodes.usages.min = value; diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/HddsTestUtils.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/HddsTestUtils.java index fe5459764c9..787f83e1a83 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/HddsTestUtils.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/HddsTestUtils.java @@ -828,10 +828,36 @@ public static Pipeline getRandomPipeline() { */ public static List getContainerInfo(int numContainers) { List containerInfoList = new ArrayList<>(); + RatisReplicationConfig ratisReplicationConfig = + RatisReplicationConfig.getInstance(ReplicationFactor.THREE); for (int i = 0; i < numContainers; i++) { ContainerInfo.Builder builder = new ContainerInfo.Builder(); containerInfoList.add(builder .setContainerID(RandomUtils.nextLong()) + .setReplicationConfig(ratisReplicationConfig) + .build()); + } + return containerInfoList; + } + + /** + * Generate EC Container data. + * + * @param numContainers number of ContainerInfo to be included in list. + * @param data Data block Num. + * @param parity Parity block Num. + * @return {@literal List} + */ + public static List getECContainerInfo(int numContainers, int data, int parity) { + List containerInfoList = new ArrayList<>(); + ECReplicationConfig eCReplicationConfig = new ECReplicationConfig(data, parity); + for (int i = 0; i < numContainers; i++) { + ContainerInfo.Builder builder = new ContainerInfo.Builder(); + containerInfoList.add(builder + .setContainerID(RandomUtils.nextLong()) + .setOwner("test-owner") + .setPipelineID(PipelineID.randomId()) + .setReplicationConfig(eCReplicationConfig) .build()); } return containerInfoList; diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestBlockManager.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestBlockManager.java index 6438b6f8d49..621c9297e7e 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestBlockManager.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestBlockManager.java @@ -22,15 +22,15 @@ import java.time.Clock; import java.time.ZoneId; import java.time.ZoneOffset; +import java.util.ArrayList; import java.util.List; import java.util.Map; -import java.util.ArrayList; import java.util.concurrent.CompletableFuture; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeoutException; -import java.util.concurrent.ConcurrentHashMap; -import java.util.concurrent.ExecutorService; import org.apache.hadoop.hdds.HddsConfigKeys; import org.apache.hadoop.hdds.client.RatisReplicationConfig; @@ -39,30 +39,30 @@ import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor; import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMCommandProto; -import org.apache.hadoop.hdds.scm.ScmConfigKeys; import org.apache.hadoop.hdds.scm.HddsTestUtils; -import org.apache.hadoop.hdds.scm.container.replication.ContainerReplicaPendingOps; -import org.apache.hadoop.hdds.scm.ha.SCMHAManagerStub; -import org.apache.hadoop.hdds.scm.ha.SequenceIdGenerator; -import org.apache.hadoop.hdds.scm.ha.SCMServiceManager; -import org.apache.hadoop.hdds.scm.ha.SCMContext; -import org.apache.hadoop.hdds.scm.ha.SCMHAManager; -import org.apache.hadoop.hdds.scm.node.NodeStatus; +import org.apache.hadoop.hdds.scm.ScmConfigKeys; import org.apache.hadoop.hdds.scm.container.CloseContainerEventHandler; import org.apache.hadoop.hdds.scm.container.ContainerID; -import org.apache.hadoop.hdds.scm.container.ContainerManagerImpl; import org.apache.hadoop.hdds.scm.container.ContainerManager; +import org.apache.hadoop.hdds.scm.container.ContainerManagerImpl; import org.apache.hadoop.hdds.scm.container.MockNodeManager; import org.apache.hadoop.hdds.scm.container.common.helpers.AllocatedBlock; import org.apache.hadoop.hdds.scm.container.common.helpers.ExcludeList; +import org.apache.hadoop.hdds.scm.container.replication.ContainerReplicaPendingOps; import org.apache.hadoop.hdds.scm.events.SCMEvents; +import org.apache.hadoop.hdds.scm.ha.SCMContext; +import org.apache.hadoop.hdds.scm.ha.SCMHAManager; +import org.apache.hadoop.hdds.scm.ha.SCMHAManagerStub; +import org.apache.hadoop.hdds.scm.ha.SCMServiceManager; +import org.apache.hadoop.hdds.scm.ha.SequenceIdGenerator; import org.apache.hadoop.hdds.scm.metadata.SCMMetadataStore; import org.apache.hadoop.hdds.scm.metadata.SCMMetadataStoreImpl; +import org.apache.hadoop.hdds.scm.node.NodeStatus; import org.apache.hadoop.hdds.scm.pipeline.MockRatisPipelineProvider; import org.apache.hadoop.hdds.scm.pipeline.Pipeline; import org.apache.hadoop.hdds.scm.pipeline.PipelineID; -import org.apache.hadoop.hdds.scm.pipeline.PipelineProvider; import org.apache.hadoop.hdds.scm.pipeline.PipelineManagerImpl; +import org.apache.hadoop.hdds.scm.pipeline.PipelineProvider; import org.apache.hadoop.hdds.scm.safemode.SCMSafeModeManager; import org.apache.hadoop.hdds.scm.safemode.SCMSafeModeManager.SafeModeStatus; import org.apache.hadoop.hdds.scm.server.SCMConfigurator; @@ -76,21 +76,19 @@ import org.apache.hadoop.ozone.protocol.commands.CommandForDatanode; import org.apache.hadoop.ozone.protocol.commands.CreatePipelineCommand; import org.apache.ozone.test.GenericTestUtils; - -import static org.apache.hadoop.ozone.OzoneConsts.GB; -import static org.apache.hadoop.ozone.OzoneConsts.MB; -import static org.junit.jupiter.api.Assertions.assertEquals; -import static org.junit.jupiter.api.Assertions.assertNotEquals; -import static org.junit.jupiter.api.Assertions.assertNotNull; -import static org.junit.jupiter.api.Assertions.assertThrows; - import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.Timeout; import org.junit.jupiter.api.io.TempDir; +import static org.apache.hadoop.ozone.OzoneConsts.GB; +import static org.apache.hadoop.ozone.OzoneConsts.MB; import static org.assertj.core.api.Assertions.assertThat; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNotEquals; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertThrows; /** * Tests for SCM Block Manager. @@ -273,7 +271,7 @@ void testAllocateBlockInParallel() throws Exception { } CompletableFuture - .allOf(futureList.toArray(new CompletableFuture[futureList.size()])) + .allOf(futureList.toArray(new CompletableFuture[0])) .get(); } diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestDeletedBlockLog.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestDeletedBlockLog.java index 03500529ff9..c8e2f267aff 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestDeletedBlockLog.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestDeletedBlockLog.java @@ -265,7 +265,7 @@ private void commitTransactions( List transactionResults) throws IOException { commitTransactions(transactionResults, - dnList.toArray(new DatanodeDetails[3])); + dnList.toArray(new DatanodeDetails[0])); } private void commitTransactions( diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerManagerImpl.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerManagerImpl.java index 83791c3257d..5e951a6d680 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerManagerImpl.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerManagerImpl.java @@ -77,8 +77,7 @@ public class TestContainerManagerImpl { @BeforeEach void setUp() throws Exception { final OzoneConfiguration conf = SCMTestUtils.getConf(testDir); - dbStore = DBStoreBuilder.createDBStore( - conf, new SCMDBDefinition()); + dbStore = DBStoreBuilder.createDBStore(conf, SCMDBDefinition.get()); scmhaManager = SCMHAManagerStub.getInstance(true); nodeManager = new MockNodeManager(true, 10); sequenceIdGen = new SequenceIdGenerator( diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerReportHandler.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerReportHandler.java index f7a731fe117..94b9b2f3f2b 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerReportHandler.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerReportHandler.java @@ -100,8 +100,7 @@ void setup() throws IOException, InvalidStateTransitionException { final OzoneConfiguration conf = SCMTestUtils.getConf(testDir); nodeManager = new MockNodeManager(true, 10); containerManager = mock(ContainerManager.class); - dbStore = DBStoreBuilder.createDBStore( - conf, new SCMDBDefinition()); + dbStore = DBStoreBuilder.createDBStore(conf, SCMDBDefinition.get()); scmhaManager = SCMHAManagerStub.getInstance(true); pipelineManager = new MockPipelineManager(dbStore, scmhaManager, nodeManager); diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerStateManager.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerStateManager.java index a7043d02642..157a65c7014 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerStateManager.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerStateManager.java @@ -75,8 +75,7 @@ public void init() throws IOException, TimeoutException { OzoneConfiguration conf = new OzoneConfiguration(); scmhaManager = SCMHAManagerStub.getInstance(true); conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, testDir.getAbsolutePath()); - dbStore = DBStoreBuilder.createDBStore( - conf, new SCMDBDefinition()); + dbStore = DBStoreBuilder.createDBStore(conf, SCMDBDefinition.get()); pipelineManager = mock(PipelineManager.class); pipeline = Pipeline.newBuilder().setState(Pipeline.PipelineState.CLOSED) .setId(PipelineID.randomId()) diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestIncrementalContainerReportHandler.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestIncrementalContainerReportHandler.java index 9abbda81934..314cb02ad72 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestIncrementalContainerReportHandler.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestIncrementalContainerReportHandler.java @@ -130,8 +130,7 @@ public void setup() throws IOException, InvalidStateTransitionException, new SCMNodeManager(conf, storageConfig, eventQueue, clusterMap, scmContext, versionManager); scmhaManager = SCMHAManagerStub.getInstance(true); - dbStore = DBStoreBuilder.createDBStore( - conf, new SCMDBDefinition()); + dbStore = DBStoreBuilder.createDBStore(conf, SCMDBDefinition.get()); pipelineManager = new MockPipelineManager(dbStore, scmhaManager, nodeManager); @@ -785,7 +784,7 @@ public void testECReplicaIndexValidation() throws NodeNotFoundException, IOException, TimeoutException { List dns = IntStream.range(0, 5) .mapToObj(i -> randomDatanodeDetails()).collect(Collectors.toList()); - dns.stream().forEach(dn -> nodeManager.register(dn, null, null)); + dns.forEach(dn -> nodeManager.register(dn, null, null)); ECReplicationConfig replicationConfig = new ECReplicationConfig(3, 2); final ContainerInfo container = getECContainer(LifeCycleState.CLOSED, PipelineID.randomId(), replicationConfig); diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestUnknownContainerReport.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestUnknownContainerReport.java index 9ea4ea45b56..a573573a67b 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestUnknownContainerReport.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestUnknownContainerReport.java @@ -78,8 +78,7 @@ public void setup() throws IOException { final OzoneConfiguration conf = SCMTestUtils.getConf(testDir); this.nodeManager = new MockNodeManager(true, 10); this.containerManager = mock(ContainerManager.class); - dbStore = DBStoreBuilder.createDBStore( - conf, new SCMDBDefinition()); + dbStore = DBStoreBuilder.createDBStore(conf, SCMDBDefinition.get()); scmhaManager = SCMHAManagerStub.getInstance(true); pipelineManager = new MockPipelineManager(dbStore, scmhaManager, nodeManager); diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/balancer/ContainerBalancerConfigBuilder.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/balancer/ContainerBalancerConfigBuilder.java new file mode 100644 index 00000000000..fc4bc9fb05c --- /dev/null +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/balancer/ContainerBalancerConfigBuilder.java @@ -0,0 +1,46 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hdds.scm.container.balancer; + +import org.apache.hadoop.hdds.conf.OzoneConfiguration; + +class ContainerBalancerConfigBuilder { + private static final int DATANODE_COUNT_LIMIT_FOR_SMALL_CLUSTER = 15; + + private final ContainerBalancerConfiguration config; + + ContainerBalancerConfigBuilder(int nodeCount) { + this(new OzoneConfiguration(), nodeCount); + } + + ContainerBalancerConfigBuilder(OzoneConfiguration ozoneConfig, int nodeCount) { + config = ozoneConfig.getObject(ContainerBalancerConfiguration.class); + config.setIterations(1); + config.setThreshold(10); + config.setMaxSizeToMovePerIteration(50 * TestContainerBalancerTask.STORAGE_UNIT); + config.setMaxSizeEnteringTarget(50 * TestContainerBalancerTask.STORAGE_UNIT); + if (nodeCount < DATANODE_COUNT_LIMIT_FOR_SMALL_CLUSTER) { + config.setMaxDatanodesPercentageToInvolvePerIteration(100); + } + } + + ContainerBalancerConfiguration build() { + return config; + } +} diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/balancer/MockedSCM.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/balancer/MockedSCM.java index 0972e57df64..cf213b963cd 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/balancer/MockedSCM.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/balancer/MockedSCM.java @@ -137,11 +137,38 @@ public String toString() { return task; } + public @Nonnull ContainerBalancerTask startBalancerTaskAsync( + @Nonnull ContainerBalancer containerBalancer, + @Nonnull ContainerBalancerConfiguration config, + Boolean withDelay) { + ContainerBalancerTask task = new ContainerBalancerTask(scm, 0, containerBalancer, + containerBalancer.getMetrics(), config, withDelay); + new Thread(task).start(); + return task; + } + public @Nonnull ContainerBalancerTask startBalancerTask(@Nonnull ContainerBalancerConfiguration config) { init(config, new OzoneConfiguration()); return startBalancerTask(new ContainerBalancer(scm), config); } + public @Nonnull ContainerBalancerTask startBalancerTaskAsync(@Nonnull ContainerBalancerConfiguration config, + OzoneConfiguration ozoneConfig, + Boolean withDelay) { + init(config, ozoneConfig); + return startBalancerTaskAsync(new ContainerBalancer(scm), config, withDelay); + } + + public @Nonnull ContainerBalancerTask startBalancerTaskAsync(@Nonnull ContainerBalancerConfiguration config, + Boolean withDelay) { + init(config, new OzoneConfiguration()); + return startBalancerTaskAsync(new ContainerBalancer(scm), config, withDelay); + } + + public int getNodeCount() { + return cluster.getNodeCount(); + } + public void enableLegacyReplicationManager() { mockedReplicaManager.conf.setEnableLegacy(true); } diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/balancer/TestContainerBalancer.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/balancer/TestContainerBalancer.java index 662322b42f5..c7792887471 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/balancer/TestContainerBalancer.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/balancer/TestContainerBalancer.java @@ -44,6 +44,7 @@ import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_NODE_REPORT_INTERVAL; import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_SCM_WAIT_TIME_AFTER_SAFE_MODE_EXIT; import static org.assertj.core.api.Assertions.assertThat; +import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertThrows; import static org.junit.jupiter.api.Assertions.assertThrowsExactly; import static org.junit.jupiter.api.Assertions.assertSame; @@ -257,6 +258,22 @@ public void testDelayedStartOnSCMStatusChange() stopBalancer(); } + @Test + public void testGetBalancerStatusInfo() throws Exception { + startBalancer(balancerConfiguration); + assertSame(ContainerBalancerTask.Status.RUNNING, containerBalancer.getBalancerStatus()); + + // Assert the configuration fields that were explicitly set + ContainerBalancerStatusInfo status = containerBalancer.getBalancerStatusInfo(); + assertEquals(balancerConfiguration.getThreshold(), + Double.parseDouble(status.getConfiguration().getUtilizationThreshold())); + assertEquals(balancerConfiguration.getIterations(), status.getConfiguration().getIterations()); + assertEquals(balancerConfiguration.getTriggerDuEnable(), status.getConfiguration().getTriggerDuBeforeMoveEnable()); + + stopBalancer(); + assertSame(ContainerBalancerTask.Status.STOPPED, containerBalancer.getBalancerStatus()); + } + private void startBalancer(ContainerBalancerConfiguration config) throws IllegalContainerBalancerStateException, IOException, InvalidContainerBalancerConfigurationException, TimeoutException { diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/balancer/TestContainerBalancerDatanodeNodeLimit.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/balancer/TestContainerBalancerDatanodeNodeLimit.java index 7a8f655f067..8441b8023dc 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/balancer/TestContainerBalancerDatanodeNodeLimit.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/balancer/TestContainerBalancerDatanodeNodeLimit.java @@ -73,7 +73,6 @@ */ public class TestContainerBalancerDatanodeNodeLimit { private static final long STORAGE_UNIT = OzoneConsts.GB; - private static final int DATANODE_COUNT_LIMIT_FOR_SMALL_CLUSTER = 15; @BeforeAll public static void setup() { @@ -103,13 +102,8 @@ private static Stream createMockedSCMs() { @ParameterizedTest(name = "MockedSCM #{index}: {0}") @MethodSource("createMockedSCMs") public void containerBalancerShouldObeyMaxDatanodesToInvolveLimit(@Nonnull MockedSCM mockedSCM) { - ContainerBalancerConfiguration config = new OzoneConfiguration().getObject(ContainerBalancerConfiguration.class); - int nodeCount = mockedSCM.getCluster().getNodeCount(); - if (nodeCount < DATANODE_COUNT_LIMIT_FOR_SMALL_CLUSTER) { - config.setMaxDatanodesPercentageToInvolvePerIteration(100); - } - config.setIterations(1); - config.setMaxSizeToMovePerIteration(50 * STORAGE_UNIT); + int nodeCount = mockedSCM.getNodeCount(); + ContainerBalancerConfiguration config = new ContainerBalancerConfigBuilder(nodeCount).build(); ContainerBalancerTask task = mockedSCM.startBalancerTask(config); ContainerBalancerMetrics metrics = task.getMetrics(); @@ -129,12 +123,8 @@ public void containerBalancerShouldObeyMaxDatanodesToInvolveLimit(@Nonnull Mocke public void balancerShouldObeyMaxSizeEnteringTargetLimit(@Nonnull MockedSCM mockedSCM) { OzoneConfiguration ozoneConfig = new OzoneConfiguration(); ozoneConfig.set("ozone.scm.container.size", "1MB"); - ContainerBalancerConfiguration config = balancerConfigByOzoneConfig(ozoneConfig); - if (mockedSCM.getCluster().getNodeCount() < DATANODE_COUNT_LIMIT_FOR_SMALL_CLUSTER) { - config.setMaxDatanodesPercentageToInvolvePerIteration(100); - } - config.setIterations(1); - config.setMaxSizeToMovePerIteration(50 * STORAGE_UNIT); + int nodeCount = mockedSCM.getNodeCount(); + ContainerBalancerConfiguration config = new ContainerBalancerConfigBuilder(ozoneConfig, nodeCount).build(); // No containers should be selected when the limit is just 2 MB. config.setMaxSizeEnteringTarget(2 * OzoneConsts.MB); @@ -147,11 +137,7 @@ public void balancerShouldObeyMaxSizeEnteringTargetLimit(@Nonnull MockedSCM mock assertEquals(0, task.getSizeScheduledForMoveInLatestIteration()); // Some containers should be selected when using default values. - ContainerBalancerConfiguration balancerConfig = balancerConfigByOzoneConfig(new OzoneConfiguration()); - if (mockedSCM.getCluster().getNodeCount() < DATANODE_COUNT_LIMIT_FOR_SMALL_CLUSTER) { - balancerConfig.setMaxDatanodesPercentageToInvolvePerIteration(100); - } - balancerConfig.setIterations(1); + ContainerBalancerConfiguration balancerConfig = new ContainerBalancerConfigBuilder(nodeCount).build(); task = mockedSCM.startBalancerTask(balancerConfig); // Balancer should have identified unbalanced nodes. @@ -167,13 +153,9 @@ public void balancerShouldObeyMaxSizeEnteringTargetLimit(@Nonnull MockedSCM mock public void balancerShouldObeyMaxSizeLeavingSourceLimit(@Nonnull MockedSCM mockedSCM) { OzoneConfiguration ozoneConfig = new OzoneConfiguration(); ozoneConfig.set("ozone.scm.container.size", "1MB"); - ContainerBalancerConfiguration config = balancerConfigByOzoneConfig(ozoneConfig); - if (mockedSCM.getCluster().getNodeCount() < DATANODE_COUNT_LIMIT_FOR_SMALL_CLUSTER) { - config.setMaxDatanodesPercentageToInvolvePerIteration(100); - } - config.setIterations(1); - config.setMaxSizeEnteringTarget(50 * STORAGE_UNIT); - config.setMaxSizeToMovePerIteration(50 * STORAGE_UNIT); + int nodeCount = mockedSCM.getNodeCount(); + ContainerBalancerConfiguration config = new ContainerBalancerConfigBuilder(ozoneConfig, nodeCount).build(); + // No source containers should be selected when the limit is just 2 MB. config.setMaxSizeLeavingSource(2 * OzoneConsts.MB); @@ -186,13 +168,9 @@ public void balancerShouldObeyMaxSizeLeavingSourceLimit(@Nonnull MockedSCM mocke assertEquals(0, task.getSizeScheduledForMoveInLatestIteration()); // Some containers should be selected when using default values. - ContainerBalancerConfiguration newBalancerConfig = balancerConfigByOzoneConfig(new OzoneConfiguration()); - if (mockedSCM.getCluster().getNodeCount() < DATANODE_COUNT_LIMIT_FOR_SMALL_CLUSTER) { - newBalancerConfig.setMaxDatanodesPercentageToInvolvePerIteration(100); - } - newBalancerConfig.setIterations(1); + ContainerBalancerConfiguration balancerConfig = new ContainerBalancerConfigBuilder(ozoneConfig, nodeCount).build(); - task = mockedSCM.startBalancerTask(newBalancerConfig); + task = mockedSCM.startBalancerTask(balancerConfig); // Balancer should have identified unbalanced nodes. assertTrue(stillHaveUnbalancedNodes(task)); // ContainerToSourceMap is not empty due to some containers should be selected @@ -208,18 +186,10 @@ public void balancerShouldObeyMaxSizeLeavingSourceLimit(@Nonnull MockedSCM mocke @ParameterizedTest(name = "MockedSCM #{index}: {0}") @MethodSource("createMockedSCMs") public void initializeIterationShouldUpdateUnBalancedNodesWhenThresholdChanges(@Nonnull MockedSCM mockedSCM) { - ContainerBalancerConfiguration config = new OzoneConfiguration().getObject(ContainerBalancerConfiguration.class); - int nodeCount = mockedSCM.getCluster().getNodeCount(); - if (nodeCount < DATANODE_COUNT_LIMIT_FOR_SMALL_CLUSTER) { - config.setMaxDatanodesPercentageToInvolvePerIteration(100); - } - config.setThreshold(10); - config.setIterations(1); - config.setMaxSizeToMovePerIteration(50 * STORAGE_UNIT); - config.setMaxSizeEnteringTarget(50 * STORAGE_UNIT); + ContainerBalancerConfiguration config = new ContainerBalancerConfigBuilder(mockedSCM.getNodeCount()).build(); // check for random threshold values - for (int i = 0; i < 50; i++) { + for (int i = 0; i < 10; i++) { double randomThreshold = RANDOM.nextDouble() * 100; List expectedUnBalancedNodes = mockedSCM.getCluster().getUnBalancedNodes(randomThreshold); config.setThreshold(randomThreshold); @@ -256,15 +226,7 @@ public void testCalculationOfUtilization(@Nonnull MockedSCM mockedSCM) { @MethodSource("createMockedSCMs") public void testBalancerWithMoveManager(@Nonnull MockedSCM mockedSCM) throws IOException, NodeNotFoundException, TimeoutException { - ContainerBalancerConfiguration config = new OzoneConfiguration().getObject(ContainerBalancerConfiguration.class); - int nodeCount = mockedSCM.getCluster().getNodeCount(); - if (nodeCount < DATANODE_COUNT_LIMIT_FOR_SMALL_CLUSTER) { - config.setMaxDatanodesPercentageToInvolvePerIteration(100); - } - config.setThreshold(10); - config.setIterations(1); - config.setMaxSizeToMovePerIteration(50 * STORAGE_UNIT); - config.setMaxSizeEnteringTarget(50 * STORAGE_UNIT); + ContainerBalancerConfiguration config = new ContainerBalancerConfigBuilder(mockedSCM.getNodeCount()).build(); mockedSCM.disableLegacyReplicationManager(); mockedSCM.startBalancerTask(config); @@ -279,15 +241,7 @@ public void testBalancerWithMoveManager(@Nonnull MockedSCM mockedSCM) @ParameterizedTest(name = "MockedSCM #{index}: {0}") @MethodSource("createMockedSCMs") public void unBalancedNodesListShouldBeEmptyWhenClusterIsBalanced(@Nonnull MockedSCM mockedSCM) { - ContainerBalancerConfiguration config = new OzoneConfiguration().getObject(ContainerBalancerConfiguration.class); - int nodeCount = mockedSCM.getCluster().getNodeCount(); - if (nodeCount < DATANODE_COUNT_LIMIT_FOR_SMALL_CLUSTER) { - config.setMaxDatanodesPercentageToInvolvePerIteration(100); - } - config.setThreshold(10); - config.setIterations(1); - config.setMaxSizeToMovePerIteration(50 * STORAGE_UNIT); - config.setMaxSizeEnteringTarget(50 * STORAGE_UNIT); + ContainerBalancerConfiguration config = new ContainerBalancerConfigBuilder(mockedSCM.getNodeCount()).build(); config.setThreshold(99.99); ContainerBalancerTask task = mockedSCM.startBalancerTask(config); @@ -302,14 +256,8 @@ public void unBalancedNodesListShouldBeEmptyWhenClusterIsBalanced(@Nonnull Mocke public void testMetrics(@Nonnull MockedSCM mockedSCM) throws IOException, NodeNotFoundException { OzoneConfiguration ozoneConfig = new OzoneConfiguration(); ozoneConfig.set("hdds.datanode.du.refresh.period", "1ms"); - ContainerBalancerConfiguration config = balancerConfigByOzoneConfig(ozoneConfig); - int nodeCount = mockedSCM.getCluster().getNodeCount(); - if (nodeCount < DATANODE_COUNT_LIMIT_FOR_SMALL_CLUSTER) { - config.setMaxDatanodesPercentageToInvolvePerIteration(100); - } + ContainerBalancerConfiguration config = new ContainerBalancerConfigBuilder(mockedSCM.getNodeCount()).build(); config.setBalancingInterval(Duration.ofMillis(2)); - config.setThreshold(10); - config.setIterations(1); config.setMaxSizeEnteringTarget(6 * STORAGE_UNIT); // deliberately set max size per iteration to a low value, 6 GB config.setMaxSizeToMovePerIteration(6 * STORAGE_UNIT); @@ -338,15 +286,7 @@ public void testMetrics(@Nonnull MockedSCM mockedSCM) throws IOException, NodeNo @ParameterizedTest(name = "MockedSCM #{index}: {0}") @MethodSource("createMockedSCMs") public void containerBalancerShouldSelectOnlyClosedContainers(@Nonnull MockedSCM mockedSCM) { - ContainerBalancerConfiguration config = balancerConfigByOzoneConfig(new OzoneConfiguration()); - int nodeCount = mockedSCM.getCluster().getNodeCount(); - if (nodeCount < DATANODE_COUNT_LIMIT_FOR_SMALL_CLUSTER) { - config.setMaxDatanodesPercentageToInvolvePerIteration(100); - } - config.setIterations(1); - config.setThreshold(10); - config.setMaxSizeToMovePerIteration(50 * STORAGE_UNIT); - config.setMaxSizeEnteringTarget(50 * STORAGE_UNIT); + ContainerBalancerConfiguration config = new ContainerBalancerConfigBuilder(mockedSCM.getNodeCount()).build(); Map cidToInfoMap = mockedSCM.getCluster().getCidToInfoMap(); // Make all containers open, balancer should not select any of them @@ -380,15 +320,7 @@ public void containerBalancerShouldSelectOnlyClosedContainers(@Nonnull MockedSCM @MethodSource("createMockedSCMs") public void balancerShouldNotSelectNonClosedContainerReplicas(@Nonnull MockedSCM mockedSCM) throws ContainerNotFoundException { - ContainerBalancerConfiguration config = balancerConfigByOzoneConfig(new OzoneConfiguration()); - int nodeCount = mockedSCM.getCluster().getNodeCount(); - if (nodeCount < DATANODE_COUNT_LIMIT_FOR_SMALL_CLUSTER) { - config.setMaxDatanodesPercentageToInvolvePerIteration(100); - } - config.setIterations(1); - config.setThreshold(10); - config.setMaxSizeToMovePerIteration(50 * STORAGE_UNIT); - config.setMaxSizeEnteringTarget(50 * STORAGE_UNIT); + ContainerBalancerConfiguration config = new ContainerBalancerConfigBuilder(mockedSCM.getNodeCount()).build(); // Let's mock such that all replicas have CLOSING state Map> cidToReplicasMap = mockedSCM.getCluster().getCidToReplicasMap(); @@ -418,12 +350,7 @@ public void balancerShouldNotSelectNonClosedContainerReplicas(@Nonnull MockedSCM @ParameterizedTest(name = "MockedSCM #{index}: {0}") @MethodSource("createMockedSCMs") public void containerBalancerShouldObeyMaxSizeToMoveLimit(@Nonnull MockedSCM mockedSCM) { - ContainerBalancerConfiguration config = balancerConfigByOzoneConfig(new OzoneConfiguration()); - int nodeCount = mockedSCM.getCluster().getNodeCount(); - if (nodeCount < DATANODE_COUNT_LIMIT_FOR_SMALL_CLUSTER) { - config.setMaxDatanodesPercentageToInvolvePerIteration(100); - } - config.setIterations(1); + ContainerBalancerConfiguration config = new ContainerBalancerConfigBuilder(mockedSCM.getNodeCount()).build(); config.setThreshold(1); config.setMaxSizeToMovePerIteration(10 * STORAGE_UNIT); config.setMaxSizeEnteringTarget(10 * STORAGE_UNIT); @@ -441,15 +368,7 @@ public void containerBalancerShouldObeyMaxSizeToMoveLimit(@Nonnull MockedSCM moc @ParameterizedTest(name = "MockedSCM #{index}: {0}") @MethodSource("createMockedSCMs") public void targetDatanodeShouldNotAlreadyContainSelectedContainer(@Nonnull MockedSCM mockedSCM) { - ContainerBalancerConfiguration config = balancerConfigByOzoneConfig(new OzoneConfiguration()); - int nodeCount = mockedSCM.getCluster().getNodeCount(); - if (nodeCount < DATANODE_COUNT_LIMIT_FOR_SMALL_CLUSTER) { - config.setMaxDatanodesPercentageToInvolvePerIteration(100); - } - config.setIterations(1); - config.setThreshold(10); - config.setMaxSizeToMovePerIteration(50 * STORAGE_UNIT); - config.setMaxSizeEnteringTarget(50 * STORAGE_UNIT); + ContainerBalancerConfiguration config = new ContainerBalancerConfigBuilder(mockedSCM.getNodeCount()).build(); ContainerBalancerTask task = mockedSCM.startBalancerTask(config); @@ -468,15 +387,7 @@ public void targetDatanodeShouldNotAlreadyContainSelectedContainer(@Nonnull Mock @ParameterizedTest(name = "MockedSCM #{index}: {0}") @MethodSource("createMockedSCMs") public void containerMoveSelectionShouldFollowPlacementPolicy(@Nonnull MockedSCM mockedSCM) { - ContainerBalancerConfiguration config = balancerConfigByOzoneConfig(new OzoneConfiguration()); - int nodeCount = mockedSCM.getCluster().getNodeCount(); - if (nodeCount < DATANODE_COUNT_LIMIT_FOR_SMALL_CLUSTER) { - config.setMaxDatanodesPercentageToInvolvePerIteration(100); - } - config.setIterations(1); - config.setThreshold(10); - config.setMaxSizeToMovePerIteration(50 * STORAGE_UNIT); - config.setMaxSizeEnteringTarget(50 * STORAGE_UNIT); + ContainerBalancerConfiguration config = new ContainerBalancerConfigBuilder(mockedSCM.getNodeCount()).build(); ContainerBalancerTask task = mockedSCM.startBalancerTask(config); @@ -511,15 +422,7 @@ public void containerMoveSelectionShouldFollowPlacementPolicy(@Nonnull MockedSCM @ParameterizedTest(name = "MockedSCM #{index}: {0}") @MethodSource("createMockedSCMs") public void targetDatanodeShouldBeInServiceHealthy(@Nonnull MockedSCM mockedSCM) throws NodeNotFoundException { - ContainerBalancerConfiguration config = balancerConfigByOzoneConfig(new OzoneConfiguration()); - int nodeCount = mockedSCM.getCluster().getNodeCount(); - if (nodeCount < DATANODE_COUNT_LIMIT_FOR_SMALL_CLUSTER) { - config.setMaxDatanodesPercentageToInvolvePerIteration(100); - } - config.setIterations(1); - config.setThreshold(10); - config.setMaxSizeToMovePerIteration(50 * STORAGE_UNIT); - config.setMaxSizeEnteringTarget(50 * STORAGE_UNIT); + ContainerBalancerConfiguration config = new ContainerBalancerConfigBuilder(mockedSCM.getNodeCount()).build(); ContainerBalancerTask task = mockedSCM.startBalancerTask(config); @@ -535,15 +438,7 @@ public void targetDatanodeShouldBeInServiceHealthy(@Nonnull MockedSCM mockedSCM) @MethodSource("createMockedSCMs") public void selectedContainerShouldNotAlreadyHaveBeenSelected(@Nonnull MockedSCM mockedSCM) throws NodeNotFoundException, ContainerNotFoundException, TimeoutException, ContainerReplicaNotFoundException { - ContainerBalancerConfiguration config = balancerConfigByOzoneConfig(new OzoneConfiguration()); - int nodeCount = mockedSCM.getCluster().getNodeCount(); - if (nodeCount < DATANODE_COUNT_LIMIT_FOR_SMALL_CLUSTER) { - config.setMaxDatanodesPercentageToInvolvePerIteration(100); - } - config.setIterations(1); - config.setThreshold(10); - config.setMaxSizeToMovePerIteration(50 * STORAGE_UNIT); - config.setMaxSizeEnteringTarget(50 * STORAGE_UNIT); + ContainerBalancerConfiguration config = new ContainerBalancerConfigBuilder(mockedSCM.getNodeCount()).build(); mockedSCM.enableLegacyReplicationManager(); @@ -570,15 +465,7 @@ public void selectedContainerShouldNotAlreadyHaveBeenSelected(@Nonnull MockedSCM @ParameterizedTest(name = "MockedSCM #{index}: {0}") @MethodSource("createMockedSCMs") public void balancerShouldNotSelectConfiguredExcludeContainers(@Nonnull MockedSCM mockedSCM) { - ContainerBalancerConfiguration config = balancerConfigByOzoneConfig(new OzoneConfiguration()); - int nodeCount = mockedSCM.getCluster().getNodeCount(); - if (nodeCount < DATANODE_COUNT_LIMIT_FOR_SMALL_CLUSTER) { - config.setMaxDatanodesPercentageToInvolvePerIteration(100); - } - config.setIterations(1); - config.setThreshold(10); - config.setMaxSizeToMovePerIteration(50 * STORAGE_UNIT); - config.setMaxSizeEnteringTarget(50 * STORAGE_UNIT); + ContainerBalancerConfiguration config = new ContainerBalancerConfigBuilder(mockedSCM.getNodeCount()).build(); config.setExcludeContainers("1, 4, 5"); ContainerBalancerTask task = mockedSCM.startBalancerTask(config); @@ -589,6 +476,155 @@ public void balancerShouldNotSelectConfiguredExcludeContainers(@Nonnull MockedSC } } + @ParameterizedTest(name = "MockedSCM #{index}: {0}") + @MethodSource("createMockedSCMs") + public void checkIterationResult(@Nonnull MockedSCM mockedSCM) + throws NodeNotFoundException, ContainerNotFoundException, TimeoutException { + ContainerBalancerConfiguration config = new ContainerBalancerConfigBuilder(mockedSCM.getNodeCount()).build(); + config.setMaxSizeEnteringTarget(10 * STORAGE_UNIT); + config.setMaxSizeToMovePerIteration(100 * STORAGE_UNIT); + + mockedSCM.enableLegacyReplicationManager(); + + ContainerBalancerTask task = mockedSCM.startBalancerTask(config); + // According to the setup and configurations, this iteration's result should be ITERATION_COMPLETED. + assertEquals(ContainerBalancerTask.IterationResult.ITERATION_COMPLETED, task.getIterationResult()); + + // Now, limit maxSizeToMovePerIteration but fail all container moves. + // The result should still be ITERATION_COMPLETED. + when(mockedSCM.getReplicationManager() + .move(any(ContainerID.class), any(DatanodeDetails.class), any(DatanodeDetails.class))) + .thenReturn(CompletableFuture.completedFuture(MoveManager.MoveResult.REPLICATION_FAIL_NODE_UNHEALTHY)); + config.setMaxSizeToMovePerIteration(10 * STORAGE_UNIT); + + task = mockedSCM.startBalancerTask(config); + assertEquals(ContainerBalancerTask.IterationResult.ITERATION_COMPLETED, task.getIterationResult()); + + //Try the same but use MoveManager for container move instead of legacy RM. + mockedSCM.disableLegacyReplicationManager(); + task = mockedSCM.startBalancerTask(config); + assertEquals(ContainerBalancerTask.IterationResult.ITERATION_COMPLETED, task.getIterationResult()); + } + + /** + * Tests the situation where some container moves time out because they take longer than "move.timeout". + */ + @ParameterizedTest(name = "MockedSCM #{index}: {0}") + @MethodSource("createMockedSCMs") + public void checkIterationResultTimeout(@Nonnull MockedSCM mockedSCM) + throws NodeNotFoundException, ContainerNotFoundException, TimeoutException, ContainerReplicaNotFoundException { + ContainerBalancerConfiguration config = new ContainerBalancerConfigBuilder(mockedSCM.getNodeCount()).build(); + config.setMaxSizeEnteringTarget(50 * STORAGE_UNIT); + config.setMaxSizeToMovePerIteration(100 * STORAGE_UNIT); + config.setMaxDatanodesPercentageToInvolvePerIteration(100); + config.setMoveTimeout(Duration.ofMillis(50)); + + CompletableFuture completedFuture = + CompletableFuture.completedFuture(MoveManager.MoveResult.COMPLETED); + when(mockedSCM.getReplicationManager() + .move(any(ContainerID.class), any(DatanodeDetails.class), any(DatanodeDetails.class))) + .thenReturn(completedFuture) + .thenAnswer(invocation -> genCompletableFuture(150)); + + mockedSCM.enableLegacyReplicationManager(); + ContainerBalancerTask task = mockedSCM.startBalancerTask(config); + + // According to the setup and configurations, this iteration's result should be ITERATION_COMPLETED. + assertEquals(ContainerBalancerTask.IterationResult.ITERATION_COMPLETED, task.getIterationResult()); + assertEquals(1, task.getMetrics().getNumContainerMovesCompletedInLatestIteration()); + assertThat(task.getMetrics().getNumContainerMovesTimeoutInLatestIteration()).isGreaterThanOrEqualTo(1); + + /* + Test the same but use MoveManager instead of LegacyReplicationManager. + The first move being 10ms falls within the timeout duration of 500ms. It should be successful. The rest should fail. + */ + mockedSCM.disableLegacyReplicationManager(); + when(mockedSCM.getMoveManager() + .move(any(ContainerID.class), any(DatanodeDetails.class), any(DatanodeDetails.class))) + .thenReturn(completedFuture) + .thenAnswer(invocation -> genCompletableFuture(150)); + + task = mockedSCM.startBalancerTask(config); + assertEquals(ContainerBalancerTask.IterationResult.ITERATION_COMPLETED, task.getIterationResult()); + assertEquals(1, task.getMetrics().getNumContainerMovesCompletedInLatestIteration()); + assertThat(task.getMetrics().getNumContainerMovesTimeoutInLatestIteration()).isGreaterThanOrEqualTo(1); + } + + @ParameterizedTest(name = "MockedSCM #{index}: {0}") + @MethodSource("createMockedSCMs") + public void checkIterationResultTimeoutFromReplicationManager(@Nonnull MockedSCM mockedSCM) + throws NodeNotFoundException, ContainerNotFoundException, TimeoutException, ContainerReplicaNotFoundException { + ContainerBalancerConfiguration config = new ContainerBalancerConfigBuilder(mockedSCM.getNodeCount()).build(); + config.setMaxSizeEnteringTarget(10 * STORAGE_UNIT); + config.setMaxSizeToMovePerIteration(100 * STORAGE_UNIT); + config.setMoveTimeout(Duration.ofMillis(500)); + + CompletableFuture future = + CompletableFuture.supplyAsync(() -> MoveManager.MoveResult.REPLICATION_FAIL_TIME_OUT); + CompletableFuture future2 = + CompletableFuture.supplyAsync(() -> MoveManager.MoveResult.DELETION_FAIL_TIME_OUT); + when(mockedSCM.getReplicationManager() + .move(any(ContainerID.class), any(DatanodeDetails.class), any(DatanodeDetails.class))) + .thenReturn(future, future2); + + mockedSCM.enableLegacyReplicationManager(); + + ContainerBalancerTask task = mockedSCM.startBalancerTask(config); + assertThat(task.getMetrics().getNumContainerMovesTimeoutInLatestIteration()).isGreaterThan(0); + assertEquals(0, task.getMetrics().getNumContainerMovesCompletedInLatestIteration()); + + // Try the same test with MoveManager instead of LegacyReplicationManager. + when(mockedSCM.getMoveManager() + .move(any(ContainerID.class), any(DatanodeDetails.class), any(DatanodeDetails.class))) + .thenReturn(future).thenAnswer(invocation -> future2); + + mockedSCM.disableLegacyReplicationManager(); + + task = mockedSCM.startBalancerTask(config); + assertThat(task.getMetrics().getNumContainerMovesTimeoutInLatestIteration()).isGreaterThan(0); + assertEquals(0, task.getMetrics().getNumContainerMovesCompletedInLatestIteration()); + } + + @ParameterizedTest(name = "MockedSCM #{index}: {0}") + @MethodSource("createMockedSCMs") + @Flaky("HDDS-11855") + public void checkIterationResultException(@Nonnull MockedSCM mockedSCM) + throws NodeNotFoundException, ContainerNotFoundException, TimeoutException, ContainerReplicaNotFoundException { + int nodeCount = mockedSCM.getNodeCount(); + ContainerBalancerConfiguration config = new ContainerBalancerConfigBuilder(mockedSCM.getNodeCount()).build(); + config.setMaxSizeEnteringTarget(10 * STORAGE_UNIT); + config.setMaxSizeToMovePerIteration(100 * STORAGE_UNIT); + config.setMoveTimeout(Duration.ofMillis(500)); + + CompletableFuture future = new CompletableFuture<>(); + future.completeExceptionally(new RuntimeException("Runtime Exception")); + when(mockedSCM.getReplicationManager() + .move(any(ContainerID.class), any(DatanodeDetails.class), any(DatanodeDetails.class))) + .thenReturn(genCompletableFutureWithException(1)) + .thenThrow(new ContainerNotFoundException("Test Container not found")) + .thenReturn(future); + + mockedSCM.enableLegacyReplicationManager(); + + ContainerBalancerTask task = mockedSCM.startBalancerTask(config); + assertEquals(ContainerBalancerTask.IterationResult.ITERATION_COMPLETED, task.getIterationResult()); + + int expectedMovesFailed = (nodeCount > 6) ? 3 : 1; + assertThat(task.getMetrics().getNumContainerMovesFailed()).isGreaterThanOrEqualTo(expectedMovesFailed); + + // Try the same test but with MoveManager instead of ReplicationManager. + when(mockedSCM.getMoveManager() + .move(any(ContainerID.class), any(DatanodeDetails.class), any(DatanodeDetails.class))) + .thenReturn(genCompletableFutureWithException(1)) + .thenThrow(new ContainerNotFoundException("Test Container not found")) + .thenReturn(future); + + mockedSCM.disableLegacyReplicationManager(); + task = mockedSCM.startBalancerTask(config); + assertEquals(ContainerBalancerTask.IterationResult.ITERATION_COMPLETED, task.getIterationResult()); + assertThat(task.getMetrics().getNumContainerMovesFailed()).isGreaterThanOrEqualTo(expectedMovesFailed); + } + public static List getUnBalancedNodes(@Nonnull ContainerBalancerTask task) { ArrayList result = new ArrayList<>(); result.addAll(task.getOverUtilizedNodes()); @@ -604,9 +640,24 @@ private static boolean stillHaveUnbalancedNodes(@Nonnull ContainerBalancerTask t return new MockedSCM(new TestableCluster(datanodeCount, STORAGE_UNIT)); } - private static @Nonnull ContainerBalancerConfiguration balancerConfigByOzoneConfig( - @Nonnull OzoneConfiguration ozoneConfiguration - ) { - return ozoneConfiguration.getObject(ContainerBalancerConfiguration.class); + private static CompletableFuture genCompletableFuture(int sleepMilSec) { + return CompletableFuture.supplyAsync(() -> { + try { + Thread.sleep(sleepMilSec); + } catch (InterruptedException e) { + e.printStackTrace(); + } + return MoveManager.MoveResult.COMPLETED; + }); + } + + private static CompletableFuture genCompletableFutureWithException(int sleepMilSec) { + return CompletableFuture.supplyAsync(() -> { + try { + Thread.sleep(sleepMilSec); + } catch (Exception ignored) { + } + throw new RuntimeException("Runtime Exception after doing work"); + }); } } diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/balancer/TestContainerBalancerStatusInfo.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/balancer/TestContainerBalancerStatusInfo.java index 48b3ee2d0de..e2d3003af07 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/balancer/TestContainerBalancerStatusInfo.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/balancer/TestContainerBalancerStatusInfo.java @@ -18,16 +18,23 @@ package org.apache.hadoop.hdds.scm.container.balancer; +import org.apache.commons.math3.util.ArithmeticUtils; +import org.apache.hadoop.hdds.HddsConfigKeys; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.scm.server.StorageContainerManager; import org.apache.hadoop.ozone.OzoneConsts; +import org.apache.ozone.test.LambdaTestUtils; import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.Test; import java.util.List; +import java.util.Map; +import java.util.UUID; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertNull; import static org.junit.jupiter.api.Assertions.assertTrue; /** @@ -47,15 +54,185 @@ void testGetIterationStatistics() { ContainerBalancerTask task = mockedScm.startBalancerTask(config); List iterationStatistics = task.getCurrentIterationsStatistic(); - assertEquals(3, iterationStatistics.size()); - iterationStatistics.forEach(is -> { - assertTrue(is.getContainerMovesCompleted() > 0); - assertEquals(0, is.getContainerMovesFailed()); - assertEquals(0, is.getContainerMovesTimeout()); - assertFalse(is.getSizeEnteringNodesGB().isEmpty()); - assertFalse(is.getSizeLeavingNodesGB().isEmpty()); + assertEquals(2, iterationStatistics.size()); + + ContainerBalancerTaskIterationStatusInfo iterationHistory1 = iterationStatistics.get(0); + verifyCompletedIteration(iterationHistory1, 1); + + ContainerBalancerTaskIterationStatusInfo iterationHistory2 = iterationStatistics.get(1); + verifyCompletedIteration(iterationHistory2, 2); + } + + @Test + void testReRequestIterationStatistics() throws Exception { + MockedSCM mockedScm = new MockedSCM(new TestableCluster(20, OzoneConsts.GB)); + + ContainerBalancerConfiguration config = new OzoneConfiguration().getObject(ContainerBalancerConfiguration.class); + + config.setIterations(2); + config.setBalancingInterval(0); + config.setMaxSizeToMovePerIteration(50 * OzoneConsts.GB); + + ContainerBalancerTask task = mockedScm.startBalancerTask(config); + List firstRequestIterationStatistics = + task.getCurrentIterationsStatistic(); + Thread.sleep(1000L); + List secondRequestIterationStatistics = + task.getCurrentIterationsStatistic(); + assertEquals(firstRequestIterationStatistics.get(0), secondRequestIterationStatistics.get(0)); + assertEquals(firstRequestIterationStatistics.get(1), secondRequestIterationStatistics.get(1)); + } + + @Test + void testGetCurrentStatisticsRequestInPeriodBetweenIterations() throws Exception { + MockedSCM mockedScm = new MockedSCM(new TestableCluster(20, OzoneConsts.GB)); + + ContainerBalancerConfiguration config = new OzoneConfiguration().getObject(ContainerBalancerConfiguration.class); + + config.setIterations(2); + config.setBalancingInterval(10000); + config.setMaxSizeToMovePerIteration(50 * OzoneConsts.GB); + + ContainerBalancerTask task = mockedScm.startBalancerTaskAsync(config, false); + LambdaTestUtils.await(5000, 10, + () -> task.getCurrentIterationsStatistic().size() == 1 && + "ITERATION_COMPLETED".equals(task.getCurrentIterationsStatistic().get(0).getIterationResult())); + List iterationsStatic = task.getCurrentIterationsStatistic(); + assertEquals(1, iterationsStatic.size()); + + ContainerBalancerTaskIterationStatusInfo firstIteration = iterationsStatic.get(0); + verifyCompletedIteration(firstIteration, 1); + } + + @Test + void testCurrentStatisticsDoesntChangeWhenReRequestInPeriodBetweenIterations() throws InterruptedException { + MockedSCM mockedScm = new MockedSCM(new TestableCluster(20, OzoneConsts.GB)); + + ContainerBalancerConfiguration config = new OzoneConfiguration().getObject(ContainerBalancerConfiguration.class); + + config.setIterations(2); + config.setBalancingInterval(10000); + config.setMaxSizeToMovePerIteration(50 * OzoneConsts.GB); + + ContainerBalancerTask task = mockedScm.startBalancerTaskAsync(config, false); + // Delay in finishing the first iteration + Thread.sleep(1000L); + List firstRequestIterationStatistics = + task.getCurrentIterationsStatistic(); + // Delay occurred for some time during the period between iterations. + Thread.sleep(1000L); + List secondRequestIterationStatistics = + task.getCurrentIterationsStatistic(); + assertEquals(1, firstRequestIterationStatistics.size()); + assertEquals(1, secondRequestIterationStatistics.size()); + assertEquals(firstRequestIterationStatistics.get(0), secondRequestIterationStatistics.get(0)); + } + + @Test + void testGetCurrentStatisticsWithDelay() throws Exception { + MockedSCM mockedScm = new MockedSCM(new TestableCluster(20, OzoneConsts.GB)); + + ContainerBalancerConfiguration config = new OzoneConfiguration().getObject(ContainerBalancerConfiguration.class); + + config.setIterations(2); + config.setBalancingInterval(0); + config.setMaxSizeToMovePerIteration(50 * OzoneConsts.GB); + OzoneConfiguration configuration = new OzoneConfiguration(); + configuration.set(HddsConfigKeys.HDDS_SCM_WAIT_TIME_AFTER_SAFE_MODE_EXIT, "1"); + ContainerBalancerTask task = mockedScm.startBalancerTaskAsync(config, configuration, true); + // Delay in finishing the first iteration + LambdaTestUtils.await(1100, 1, () -> task.getCurrentIterationsStatistic().size() == 1); + List iterationsStatic = task.getCurrentIterationsStatistic(); + assertEquals(1, iterationsStatic.size()); + ContainerBalancerTaskIterationStatusInfo currentIteration = iterationsStatic.get(0); + verifyStartedEmptyIteration(currentIteration); + } + + @Test + void testGetCurrentStatisticsWhileBalancingInProgress() throws Exception { + MockedSCM mockedScm = new MockedSCM(new TestableCluster(20, OzoneConsts.GB)); + + ContainerBalancerConfiguration config = new OzoneConfiguration().getObject(ContainerBalancerConfiguration.class); + + config.setIterations(3); + config.setBalancingInterval(0); + config.setMaxSizeToMovePerIteration(50 * OzoneConsts.GB); + + ContainerBalancerTask task = mockedScm.startBalancerTaskAsync(config, false); + // Get the current iteration statistics when it has information about the containers moving. + LambdaTestUtils.await(5000, 1, + () -> task.getCurrentIterationsStatistic().size() == 2 && + task.getCurrentIterationsStatistic().get(1).getContainerMovesScheduled() > 0); + List iterationsStatic = task.getCurrentIterationsStatistic(); + assertEquals(2, iterationsStatic.size()); + ContainerBalancerTaskIterationStatusInfo currentIteration = iterationsStatic.get(1); + assertCurrentIterationStatisticWhileBalancingInProgress(currentIteration); + } + + private static void assertCurrentIterationStatisticWhileBalancingInProgress( + ContainerBalancerTaskIterationStatusInfo iterationsStatic + ) { + // No need to check others iterationsStatic fields(e.x. '*ContainerMoves*'), because it can lead to flaky results. + assertEquals(2, iterationsStatic.getIterationNumber()); + assertNull(iterationsStatic.getIterationResult()); + assertEquals(0, iterationsStatic.getContainerMovesFailed()); + assertEquals(0, iterationsStatic.getContainerMovesTimeout()); + iterationsStatic.getSizeEnteringNodes().forEach((id, size) -> { + assertNotNull(id); + assertTrue(size > 0); }); + iterationsStatic.getSizeLeavingNodes().forEach((id, size) -> { + assertNotNull(id); + assertTrue(size > 0); + }); + } + + private void verifyCompletedIteration( + ContainerBalancerTaskIterationStatusInfo iteration, + Integer expectedIterationNumber + ) { + assertEquals(expectedIterationNumber, iteration.getIterationNumber()); + assertEquals("ITERATION_COMPLETED", iteration.getIterationResult()); + assertNotNull(iteration.getIterationDuration()); + assertTrue(iteration.getContainerMovesScheduled() > 0); + assertTrue(iteration.getContainerMovesCompleted() > 0); + assertEquals(0, iteration.getContainerMovesFailed()); + assertEquals(0, iteration.getContainerMovesTimeout()); + assertTrue(iteration.getSizeScheduledForMove() > 0); + assertTrue(iteration.getDataSizeMoved() > 0); + assertFalse(iteration.getSizeEnteringNodes().isEmpty()); + assertFalse(iteration.getSizeLeavingNodes().isEmpty()); + iteration.getSizeEnteringNodes().forEach((id, size) -> { + assertNotNull(id); + assertTrue(size > 0); + }); + iteration.getSizeLeavingNodes().forEach((id, size) -> { + assertNotNull(id); + assertTrue(size > 0); + }); + Long enteringDataSum = getTotalMovedData(iteration.getSizeEnteringNodes()); + Long leavingDataSum = getTotalMovedData(iteration.getSizeLeavingNodes()); + assertEquals(enteringDataSum, leavingDataSum); + } + + private void verifyStartedEmptyIteration( + ContainerBalancerTaskIterationStatusInfo iteration + ) { + assertEquals(1, iteration.getIterationNumber()); + assertNull(iteration.getIterationResult()); + assertNotNull(iteration.getIterationDuration()); + assertEquals(0, iteration.getContainerMovesScheduled()); + assertEquals(0, iteration.getContainerMovesCompleted()); + assertEquals(0, iteration.getContainerMovesFailed()); + assertEquals(0, iteration.getContainerMovesTimeout()); + assertEquals(0, iteration.getSizeScheduledForMove()); + assertEquals(0, iteration.getDataSizeMoved()); + assertTrue(iteration.getSizeEnteringNodes().isEmpty()); + assertTrue(iteration.getSizeLeavingNodes().isEmpty()); + } + private static Long getTotalMovedData(Map iteration) { + return iteration.values().stream().reduce(0L, ArithmeticUtils::addAndCheck); } /** diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/balancer/TestContainerBalancerTask.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/balancer/TestContainerBalancerTask.java index d0e9cd53fec..e689e8d1144 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/balancer/TestContainerBalancerTask.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/balancer/TestContainerBalancerTask.java @@ -32,7 +32,6 @@ import org.apache.hadoop.hdds.scm.container.ContainerID; import org.apache.hadoop.hdds.scm.container.ContainerInfo; import org.apache.hadoop.hdds.scm.container.ContainerManager; -import org.apache.hadoop.hdds.scm.container.ContainerNotFoundException; import org.apache.hadoop.hdds.scm.container.ContainerReplica; import org.apache.hadoop.hdds.scm.container.MockNodeManager; import org.apache.hadoop.hdds.scm.container.placement.algorithms.ContainerPlacementPolicyFactory; @@ -60,7 +59,6 @@ import java.io.IOException; import java.time.Clock; -import java.time.Duration; import java.time.ZoneId; import java.util.ArrayList; import java.util.HashMap; @@ -73,16 +71,16 @@ import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeoutException; +import static org.apache.hadoop.hdds.scm.container.replication.ReplicationManager.ReplicationManagerConfiguration; +import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; import static org.junit.jupiter.api.Assertions.assertNotSame; import static org.junit.jupiter.api.Assertions.assertTrue; -import static org.junit.jupiter.api.Assertions.assertFalse; -import static org.apache.hadoop.hdds.scm.container.replication.ReplicationManager.ReplicationManagerConfiguration; -import static org.assertj.core.api.Assertions.assertThat; -import static org.mockito.Mockito.doNothing; -import static org.mockito.Mockito.doAnswer; -import static org.mockito.Mockito.anyString; import static org.mockito.Mockito.any; +import static org.mockito.Mockito.anyString; +import static org.mockito.Mockito.doAnswer; +import static org.mockito.Mockito.doNothing; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; @@ -118,7 +116,7 @@ public class TestContainerBalancerTask { private static final ThreadLocalRandom RANDOM = ThreadLocalRandom.current(); private StatefulServiceStateManager serviceStateManager; - private static final long STORAGE_UNIT = OzoneConsts.GB; + static final long STORAGE_UNIT = OzoneConsts.GB; /** * Sets up configuration values and creates a mock cluster. @@ -337,229 +335,6 @@ public void testContainerBalancerConfiguration() { cbConf.getMoveReplicationTimeout().toMinutes()); } - @Test - public void checkIterationResult() - throws NodeNotFoundException, IOException, - IllegalContainerBalancerStateException, - InvalidContainerBalancerConfigurationException, - TimeoutException { - balancerConfiguration.setThreshold(10); - balancerConfiguration.setIterations(1); - balancerConfiguration.setMaxSizeEnteringTarget(10 * STORAGE_UNIT); - balancerConfiguration.setMaxSizeToMovePerIteration(100 * STORAGE_UNIT); - balancerConfiguration.setMaxDatanodesPercentageToInvolvePerIteration(100); - rmConf.setEnableLegacy(true); - - startBalancer(balancerConfiguration); - - /* - According to the setup and configurations, this iteration's result should - be ITERATION_COMPLETED. - */ - assertEquals( - ContainerBalancerTask.IterationResult.ITERATION_COMPLETED, - containerBalancerTask.getIterationResult()); - stopBalancer(); - - /* - Now, limit maxSizeToMovePerIteration but fail all container moves. The - result should still be ITERATION_COMPLETED. - */ - when(replicationManager.move(any(ContainerID.class), - any(DatanodeDetails.class), - any(DatanodeDetails.class))) - .thenReturn(CompletableFuture.completedFuture( - MoveManager.MoveResult.REPLICATION_FAIL_NODE_UNHEALTHY)); - balancerConfiguration.setMaxSizeToMovePerIteration(10 * STORAGE_UNIT); - - startBalancer(balancerConfiguration); - - assertEquals( - ContainerBalancerTask.IterationResult.ITERATION_COMPLETED, - containerBalancerTask.getIterationResult()); - stopBalancer(); - - /* - Try the same but use MoveManager for container move instead of legacy RM. - */ - rmConf.setEnableLegacy(false); - startBalancer(balancerConfiguration); - assertEquals( - ContainerBalancerTask.IterationResult.ITERATION_COMPLETED, - containerBalancerTask.getIterationResult()); - stopBalancer(); - } - - /** - * Tests the situation where some container moves time out because they - * take longer than "move.timeout". - */ - @Test - public void checkIterationResultTimeout() - throws NodeNotFoundException, IOException, - IllegalContainerBalancerStateException, - InvalidContainerBalancerConfigurationException, - TimeoutException { - - CompletableFuture completedFuture = - CompletableFuture.completedFuture(MoveManager.MoveResult.COMPLETED); - when(replicationManager.move(any(ContainerID.class), - any(DatanodeDetails.class), - any(DatanodeDetails.class))) - .thenReturn(completedFuture) - .thenAnswer(invocation -> genCompletableFuture(2000)); - - balancerConfiguration.setThreshold(10); - balancerConfiguration.setIterations(1); - balancerConfiguration.setMaxSizeEnteringTarget(10 * STORAGE_UNIT); - balancerConfiguration.setMaxSizeToMovePerIteration(100 * STORAGE_UNIT); - balancerConfiguration.setMaxDatanodesPercentageToInvolvePerIteration(100); - balancerConfiguration.setMoveTimeout(Duration.ofMillis(500)); - rmConf.setEnableLegacy(true); - startBalancer(balancerConfiguration); - - /* - According to the setup and configurations, this iteration's result should - be ITERATION_COMPLETED. - */ - assertEquals(ContainerBalancerTask.IterationResult.ITERATION_COMPLETED, - containerBalancerTask.getIterationResult()); - assertEquals(1, containerBalancerTask.getMetrics().getNumContainerMovesCompletedInLatestIteration()); - assertThat(containerBalancerTask.getMetrics() - .getNumContainerMovesTimeoutInLatestIteration()).isGreaterThan(1); - stopBalancer(); - - /* - Test the same but use MoveManager instead of LegacyReplicationManager. - The first move being 10ms falls within the timeout duration of 500ms. It - should be successful. The rest should fail. - */ - rmConf.setEnableLegacy(false); - when(moveManager.move(any(ContainerID.class), - any(DatanodeDetails.class), - any(DatanodeDetails.class))) - .thenReturn(completedFuture) - .thenAnswer(invocation -> genCompletableFuture(2000)); - - startBalancer(balancerConfiguration); - assertEquals(ContainerBalancerTask.IterationResult.ITERATION_COMPLETED, - containerBalancerTask.getIterationResult()); - assertEquals(1, containerBalancerTask.getMetrics().getNumContainerMovesCompletedInLatestIteration()); - assertThat(containerBalancerTask.getMetrics() - .getNumContainerMovesTimeoutInLatestIteration()).isGreaterThan(1); - stopBalancer(); - } - - @Test - public void checkIterationResultTimeoutFromReplicationManager() - throws NodeNotFoundException, IOException, - IllegalContainerBalancerStateException, - InvalidContainerBalancerConfigurationException, TimeoutException { - CompletableFuture future - = CompletableFuture.supplyAsync(() -> - MoveManager.MoveResult.REPLICATION_FAIL_TIME_OUT); - CompletableFuture future2 - = CompletableFuture.supplyAsync(() -> - MoveManager.MoveResult.DELETION_FAIL_TIME_OUT); - when(replicationManager.move(any(ContainerID.class), - any(DatanodeDetails.class), - any(DatanodeDetails.class))) - .thenReturn(future, future2); - - balancerConfiguration.setThreshold(10); - balancerConfiguration.setIterations(1); - balancerConfiguration.setMaxSizeEnteringTarget(10 * STORAGE_UNIT); - balancerConfiguration.setMaxSizeToMovePerIteration(100 * STORAGE_UNIT); - balancerConfiguration.setMaxDatanodesPercentageToInvolvePerIteration(100); - balancerConfiguration.setMoveTimeout(Duration.ofMillis(500)); - rmConf.setEnableLegacy(true); - startBalancer(balancerConfiguration); - - assertThat(containerBalancerTask.getMetrics() - .getNumContainerMovesTimeoutInLatestIteration()).isGreaterThan(0); - assertEquals(0, containerBalancerTask.getMetrics().getNumContainerMovesCompletedInLatestIteration()); - stopBalancer(); - - /* - Try the same test with MoveManager instead of LegacyReplicationManager. - */ - when(moveManager.move(any(ContainerID.class), - any(DatanodeDetails.class), - any(DatanodeDetails.class))) - .thenReturn(future).thenAnswer(invocation -> future2); - - rmConf.setEnableLegacy(false); - startBalancer(balancerConfiguration); - assertThat(containerBalancerTask.getMetrics() - .getNumContainerMovesTimeoutInLatestIteration()).isGreaterThan(0); - assertEquals(0, containerBalancerTask.getMetrics().getNumContainerMovesCompletedInLatestIteration()); - stopBalancer(); - } - - @Test - public void checkIterationResultException() - throws NodeNotFoundException, IOException, - IllegalContainerBalancerStateException, - InvalidContainerBalancerConfigurationException, - TimeoutException { - - CompletableFuture future = - new CompletableFuture<>(); - future.completeExceptionally(new RuntimeException("Runtime Exception")); - when(replicationManager.move(any(ContainerID.class), - any(DatanodeDetails.class), - any(DatanodeDetails.class))) - .thenReturn(CompletableFuture.supplyAsync(() -> { - try { - Thread.sleep(1); - } catch (Exception ignored) { - } - throw new RuntimeException("Runtime Exception after doing work"); - })) - .thenThrow(new ContainerNotFoundException("Test Container not found")) - .thenReturn(future); - - balancerConfiguration.setThreshold(10); - balancerConfiguration.setIterations(1); - balancerConfiguration.setMaxSizeEnteringTarget(10 * STORAGE_UNIT); - balancerConfiguration.setMaxSizeToMovePerIteration(100 * STORAGE_UNIT); - balancerConfiguration.setMaxDatanodesPercentageToInvolvePerIteration(100); - balancerConfiguration.setMoveTimeout(Duration.ofMillis(500)); - rmConf.setEnableLegacy(true); - - startBalancer(balancerConfiguration); - - assertEquals(ContainerBalancerTask.IterationResult.ITERATION_COMPLETED, - containerBalancerTask.getIterationResult()); - assertThat(containerBalancerTask.getMetrics().getNumContainerMovesFailed()) - .isGreaterThanOrEqualTo(3); - stopBalancer(); - - /* - Try the same test but with MoveManager instead of ReplicationManager. - */ - when(moveManager.move(any(ContainerID.class), - any(DatanodeDetails.class), - any(DatanodeDetails.class))) - .thenReturn(CompletableFuture.supplyAsync(() -> { - try { - Thread.sleep(1); - } catch (Exception ignored) { - } - throw new RuntimeException("Runtime Exception after doing work"); - })) - .thenThrow(new ContainerNotFoundException("Test Container not found")) - .thenReturn(future); - - rmConf.setEnableLegacy(false); - startBalancer(balancerConfiguration); - assertEquals(ContainerBalancerTask.IterationResult.ITERATION_COMPLETED, - containerBalancerTask.getIterationResult()); - assertThat(containerBalancerTask.getMetrics().getNumContainerMovesFailed()) - .isGreaterThanOrEqualTo(3); - stopBalancer(); - } - @Test public void testDelayedStart() throws InterruptedException, TimeoutException { conf.setTimeDuration("hdds.scm.wait.time.after.safemode.exit", 10, diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestLegacyReplicationManager.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestLegacyReplicationManager.java index 8aac64de702..7f36279ba08 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestLegacyReplicationManager.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestLegacyReplicationManager.java @@ -179,8 +179,7 @@ void setup(@TempDir File testDir) throws IOException, InterruptedException, nodeManager = new SimpleMockNodeManager(); eventQueue = new EventQueue(); SCMHAManager scmhaManager = SCMHAManagerStub.getInstance(true); - dbStore = DBStoreBuilder.createDBStore( - conf, new SCMDBDefinition()); + dbStore = DBStoreBuilder.createDBStore(conf, SCMDBDefinition.get()); PipelineManager pipelineManager = mock(PipelineManager.class); when(pipelineManager.containsPipeline(any(PipelineID.class))) .thenReturn(true); @@ -277,8 +276,7 @@ private void createReplicationManager(ReplicationManagerConfiguration rmConf, SCMHAManager scmHAManager = SCMHAManagerStub .getInstance(true, new SCMDBTransactionBufferImpl()); - dbStore = DBStoreBuilder.createDBStore( - config, new SCMDBDefinition()); + dbStore = DBStoreBuilder.createDBStore(config, SCMDBDefinition.get()); LegacyReplicationManager legacyRM = new LegacyReplicationManager( config, containerManager, ratisContainerPlacementPolicy, eventQueue, diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/ha/TestReplicationAnnotation.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/ha/TestReplicationAnnotation.java index a5a2054a8ae..049f38480d8 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/ha/TestReplicationAnnotation.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/ha/TestReplicationAnnotation.java @@ -23,6 +23,7 @@ import org.apache.hadoop.hdds.scm.RemoveSCMRequest; import org.apache.hadoop.hdds.scm.container.ContainerStateManager; import org.apache.ratis.grpc.GrpcTlsConfig; +import org.apache.ratis.protocol.RaftPeerId; import org.apache.ratis.protocol.exceptions.NotLeaderException; import org.apache.ratis.server.RaftServer; import org.junit.jupiter.api.BeforeEach; @@ -31,6 +32,7 @@ import java.io.IOException; import java.lang.reflect.Proxy; import java.util.List; +import java.util.UUID; import java.util.concurrent.ExecutionException; import static org.assertj.core.api.Assertions.assertThat; @@ -111,6 +113,11 @@ public SCMStateMachine getSCMStateMachine() { public GrpcTlsConfig getGrpcTlsConfig() { return null; } + + @Override + public RaftPeerId getLeaderId() { + return RaftPeerId.valueOf(UUID.randomUUID().toString()); + } }; } diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/ha/TestSCMRatisServerImpl.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/ha/TestSCMRatisServerImpl.java new file mode 100644 index 00000000000..6919ce41ed1 --- /dev/null +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/ha/TestSCMRatisServerImpl.java @@ -0,0 +1,108 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

    + * http://www.apache.org/licenses/LICENSE-2.0 + *

    + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hdds.scm.ha; + +import org.apache.hadoop.hdds.conf.ConfigurationSource; +import org.apache.hadoop.hdds.scm.server.StorageContainerManager; +import org.apache.hadoop.hdds.security.SecurityConfig; +import org.apache.ratis.conf.RaftProperties; +import org.apache.ratis.protocol.RaftPeer; +import org.apache.ratis.protocol.RaftPeerId; +import org.apache.ratis.server.RaftServer; +import org.junit.jupiter.api.Test; +import org.mockito.MockedConstruction; +import org.mockito.MockedStatic; + +import java.util.UUID; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNull; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.doReturn; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.mockConstruction; +import static org.mockito.Mockito.mockStatic; +import static org.mockito.Mockito.spy; +import static org.mockito.Mockito.when; + +/** + * Test for SCM Ratis Server Implementation. + */ +public class TestSCMRatisServerImpl { + + @Test + public void testGetLeaderId() throws Exception { + + try ( + MockedConstruction mockedSecurityConfigConstruction = mockConstruction(SecurityConfig.class); + MockedStatic staticMockedRaftServer = mockStatic(RaftServer.class); + MockedStatic staticMockedRatisUtil = mockStatic(RatisUtil.class); + ) { + // given + ConfigurationSource conf = mock(ConfigurationSource.class); + StorageContainerManager scm = mock(StorageContainerManager.class); + String clusterId = "CID-" + UUID.randomUUID(); + when(scm.getClusterId()).thenReturn(clusterId); + SCMHADBTransactionBuffer dbTransactionBuffer = mock(SCMHADBTransactionBuffer.class); + + RaftServer.Builder raftServerBuilder = mock(RaftServer.Builder.class); + when(raftServerBuilder.setServerId(any())).thenReturn(raftServerBuilder); + when(raftServerBuilder.setProperties(any())).thenReturn(raftServerBuilder); + when(raftServerBuilder.setStateMachineRegistry(any())).thenReturn(raftServerBuilder); + when(raftServerBuilder.setOption(any())).thenReturn(raftServerBuilder); + when(raftServerBuilder.setGroup(any())).thenReturn(raftServerBuilder); + when(raftServerBuilder.setParameters(any())).thenReturn(raftServerBuilder); + + RaftServer raftServer = mock(RaftServer.class); + + RaftServer.Division division = mock(RaftServer.Division.class); + when(raftServer.getDivision(any())).thenReturn(division); + + SCMStateMachine scmStateMachine = mock(SCMStateMachine.class); + when(division.getStateMachine()).thenReturn(scmStateMachine); + + when(raftServerBuilder.build()).thenReturn(raftServer); + + staticMockedRaftServer.when(RaftServer::newBuilder).thenReturn(raftServerBuilder); + + RaftProperties raftProperties = mock(RaftProperties.class); + staticMockedRatisUtil.when(() -> RatisUtil.newRaftProperties(conf)).thenReturn(raftProperties); + + SecurityConfig sc = new SecurityConfig(conf); + when(sc.isSecurityEnabled()).thenReturn(false); + + SCMRatisServerImpl scmRatisServer = spy(new SCMRatisServerImpl(conf, scm, dbTransactionBuffer)); + doReturn(RaftPeer.newBuilder().setId(RaftPeerId.valueOf("peer1")).build()).when(scmRatisServer).getLeader(); + + // when + RaftPeerId leaderId = scmRatisServer.getLeaderId(); + + // then + assertEquals(RaftPeerId.valueOf("peer1"), leaderId); + + // but when + doReturn(null).when(scmRatisServer).getLeader(); + leaderId = scmRatisServer.getLeaderId(); + + // then + assertNull(leaderId); + } + } + +} diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/ha/TestStatefulServiceStateManagerImpl.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/ha/TestStatefulServiceStateManagerImpl.java index 92509d22685..4e69f46b6e9 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/ha/TestStatefulServiceStateManagerImpl.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/ha/TestStatefulServiceStateManagerImpl.java @@ -49,7 +49,7 @@ public class TestStatefulServiceStateManagerImpl { void setup(@TempDir File testDir) throws IOException { conf = SCMTestUtils.getConf(testDir); conf.setBoolean(ScmConfigKeys.OZONE_SCM_HA_ENABLE_KEY, true); - dbStore = DBStoreBuilder.createDBStore(conf, new SCMDBDefinition()); + dbStore = DBStoreBuilder.createDBStore(conf, SCMDBDefinition.get()); statefulServiceConfig = SCMDBDefinition.STATEFUL_SERVICE_CONFIG.getTable(dbStore); scmhaManager = SCMHAManagerStub.getInstance(true, dbStore); diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/metadata/OldPipelineIDCodecForTesting.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/metadata/OldPipelineIDCodecForTesting.java index fb80fbbee78..f09bb43d4cf 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/metadata/OldPipelineIDCodecForTesting.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/metadata/OldPipelineIDCodecForTesting.java @@ -30,6 +30,10 @@ * Codec to serialize / deserialize PipelineID. */ public class OldPipelineIDCodecForTesting implements Codec { + @Override + public Class getTypeClass() { + return PipelineID.class; + } @Override public byte[] toPersistedFormat(PipelineID object) throws IOException { diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/metadata/OldX509CertificateCodecForTesting.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/metadata/OldX509CertificateCodecForTesting.java index 67593dc7778..3a8fc9a9632 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/metadata/OldX509CertificateCodecForTesting.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/metadata/OldX509CertificateCodecForTesting.java @@ -45,6 +45,11 @@ private OldX509CertificateCodecForTesting() { // singleton } + @Override + public Class getTypeClass() { + return X509Certificate.class; + } + @Override public byte[] toPersistedFormat(X509Certificate object) throws IOException { try { diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestContainerPlacement.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestContainerPlacement.java index f3a303cad73..0862c46e838 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestContainerPlacement.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestContainerPlacement.java @@ -100,8 +100,7 @@ public class TestContainerPlacement { public void setUp() throws Exception { conf = getConf(); conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, testDir.getAbsolutePath()); - dbStore = DBStoreBuilder.createDBStore( - conf, new SCMDBDefinition()); + dbStore = DBStoreBuilder.createDBStore(conf, SCMDBDefinition.get()); scmhaManager = SCMHAManagerStub.getInstance(true); sequenceIdGen = new SequenceIdGenerator( conf, scmhaManager, SCMDBDefinition.SEQUENCE_ID.getTable(dbStore)); diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestNodeDecommissionManager.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestNodeDecommissionManager.java index 4511ffea5d2..12cb37b8409 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestNodeDecommissionManager.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestNodeDecommissionManager.java @@ -237,21 +237,15 @@ public void testNodesCanBeDecommissionedAndRecommissionedMixedPorts() // same IP so we have 3 registered from the same host and 2 distinct ports. DatanodeDetails sourceDN = dns.get(9); int ratisPort = sourceDN - .getPort(DatanodeDetails.Port.Name.RATIS).getValue(); + .getRatisPort().getValue(); DatanodeDetails.Builder builder = DatanodeDetails.newBuilder(); builder.setUuid(UUID.randomUUID()) .setHostName(sourceDN.getHostName()) .setIpAddress(sourceDN.getIpAddress()) - .addPort(DatanodeDetails.newPort( - DatanodeDetails.Port.Name.STANDALONE, - sourceDN.getPort(DatanodeDetails.Port.Name.STANDALONE) - .getValue() + 1)) - .addPort(DatanodeDetails.newPort( - DatanodeDetails.Port.Name.RATIS, - ratisPort + 1)) - .addPort(DatanodeDetails.newPort( - DatanodeDetails.Port.Name.REST, - sourceDN.getPort(DatanodeDetails.Port.Name.REST).getValue() + 1)) + .addPort(DatanodeDetails.newStandalonePort(sourceDN.getStandalonePort() + .getValue() + 1)) + .addPort(DatanodeDetails.newRatisPort(ratisPort + 1)) + .addPort(DatanodeDetails.newRestPort(sourceDN.getRestPort().getValue() + 1)) .setNetworkLocation(sourceDN.getNetworkLocation()); DatanodeDetails extraDN = builder.build(); dns.add(extraDN); @@ -440,6 +434,10 @@ public void testInsufficientNodeDecommissionThrowsExceptionForRatis() throws error = decom.decommissionNodes(Arrays.asList(dns.get(1).getIpAddress(), dns.get(2).getIpAddress(), dns.get(3).getIpAddress(), dns.get(4).getIpAddress()), false); assertTrue(error.get(0).getHostname().contains("AllHosts")); + String errorMsg = String.format("%d IN-SERVICE HEALTHY and %d not IN-SERVICE or not HEALTHY nodes.", 5, 0); + assertTrue(error.get(0).getError().contains(errorMsg)); + errorMsg = String.format("Cannot decommission as a minimum of %d IN-SERVICE HEALTHY nodes are required", 3); + assertTrue(error.get(0).getError().contains(errorMsg)); assertEquals(HddsProtos.NodeOperationalState.IN_SERVICE, nodeManager.getNodeStatus(dns.get(1)).getOperationalState()); assertEquals(HddsProtos.NodeOperationalState.IN_SERVICE, @@ -489,6 +487,10 @@ public void testInsufficientNodeDecommissionThrowsExceptionForEc() throws error = decom.decommissionNodes(Arrays.asList(dns.get(1).getIpAddress()), false); assertTrue(error.get(0).getHostname().contains("AllHosts")); + String errorMsg = String.format("%d IN-SERVICE HEALTHY and %d not IN-SERVICE or not HEALTHY nodes.", 5, 0); + assertTrue(error.get(0).getError().contains(errorMsg)); + errorMsg = String.format("Cannot decommission as a minimum of %d IN-SERVICE HEALTHY nodes are required", 5); + assertTrue(error.get(0).getError().contains(errorMsg)); assertEquals(HddsProtos.NodeOperationalState.IN_SERVICE, nodeManager.getNodeStatus(dns.get(1)).getOperationalState()); error = decom.decommissionNodes(Arrays.asList(dns.get(1).getIpAddress()), true); @@ -537,6 +539,10 @@ public void testInsufficientNodeDecommissionThrowsExceptionRatisAndEc() throws error = decom.decommissionNodes(Arrays.asList(dns.get(1).getIpAddress()), false); assertTrue(error.get(0).getHostname().contains("AllHosts")); + String errorMsg = String.format("%d IN-SERVICE HEALTHY and %d not IN-SERVICE or not HEALTHY nodes.", 5, 0); + assertTrue(error.get(0).getError().contains(errorMsg)); + errorMsg = String.format("Cannot decommission as a minimum of %d IN-SERVICE HEALTHY nodes are required", 5); + assertTrue(error.get(0).getError().contains(errorMsg)); assertEquals(HddsProtos.NodeOperationalState.IN_SERVICE, nodeManager.getNodeStatus(dns.get(1)).getOperationalState()); error = decom.decommissionNodes(Arrays.asList(dns.get(1).getIpAddress()), true); @@ -637,6 +643,7 @@ public void testInsufficientNodeDecommissionChecksForNNF() throws error = decom.decommissionNodes(Arrays.asList(dns.get(0).getIpAddress(), dns.get(1).getIpAddress(), dns.get(2).getIpAddress()), false); assertFalse(error.get(0).getHostname().contains("AllHosts")); + assertTrue(error.get(0).getError().contains("The host was not found in SCM")); assertEquals(HddsProtos.NodeOperationalState.DECOMMISSIONING, nodeManager.getNodeStatus(dns.get(1)).getOperationalState()); assertEquals(HddsProtos.NodeOperationalState.DECOMMISSIONING, @@ -673,6 +680,11 @@ public void testInsufficientNodeMaintenanceThrowsExceptionForRatis() throws error = decom.startMaintenanceNodes(Arrays.asList(dns.get(1).getIpAddress(), dns.get(2).getIpAddress(), dns.get(3).getIpAddress(), dns.get(4).getIpAddress()), 100, false); assertTrue(error.get(0).getHostname().contains("AllHosts")); + String errorMsg = String.format("%d IN-SERVICE HEALTHY and %d not IN-SERVICE or not HEALTHY nodes.", 5, 0); + assertTrue(error.get(0).getError().contains(errorMsg)); + errorMsg = String.format("Cannot enter maintenance mode as a minimum of %d IN-SERVICE HEALTHY nodes are required", + 2); + assertTrue(error.get(0).getError().contains(errorMsg)); assertEquals(HddsProtos.NodeOperationalState.IN_SERVICE, nodeManager.getNodeStatus(dns.get(1)).getOperationalState()); assertEquals(HddsProtos.NodeOperationalState.IN_SERVICE, @@ -768,6 +780,11 @@ public void testInsufficientNodeMaintenanceThrowsExceptionForEc() throws error = decom.startMaintenanceNodes(Arrays.asList(dns.get(1).getIpAddress(), dns.get(2).getIpAddress()), 100, false); assertTrue(error.get(0).getHostname().contains("AllHosts")); + String errorMsg = String.format("%d IN-SERVICE HEALTHY and %d not IN-SERVICE or not HEALTHY nodes.", 5, 0); + assertTrue(error.get(0).getError().contains(errorMsg)); + errorMsg = String.format("Cannot enter maintenance mode as a minimum of %d IN-SERVICE HEALTHY nodes are required", + 4); + assertTrue(error.get(0).getError().contains(errorMsg)); assertEquals(HddsProtos.NodeOperationalState.IN_SERVICE, nodeManager.getNodeStatus(dns.get(1)).getOperationalState()); assertEquals(HddsProtos.NodeOperationalState.IN_SERVICE, @@ -869,6 +886,11 @@ public void testInsufficientNodeMaintenanceThrowsExceptionForRatisAndEc() throws // it should not be allowed as for EC, maintenance.remaining.redundancy is 2 => 3+2=5 DNs are required error = decom.startMaintenanceNodes(Arrays.asList(dns.get(1).getIpAddress()), 100, false); assertTrue(error.get(0).getHostname().contains("AllHosts")); + String errorMsg = String.format("%d IN-SERVICE HEALTHY and %d not IN-SERVICE or not HEALTHY nodes.", 5, 0); + assertTrue(error.get(0).getError().contains(errorMsg)); + errorMsg = String.format("Cannot enter maintenance mode as a minimum of %d IN-SERVICE HEALTHY nodes are required", + 5); + assertTrue(error.get(0).getError().contains(errorMsg)); assertEquals(HddsProtos.NodeOperationalState.IN_SERVICE, nodeManager.getNodeStatus(dns.get(1)).getOperationalState()); @@ -1053,12 +1075,9 @@ private List generateDatanodes() { builder.setUuid(UUID.randomUUID()) .setHostName(multiDn.getHostName()) .setIpAddress(multiDn.getIpAddress()) - .addPort(DatanodeDetails.newPort( - DatanodeDetails.Port.Name.STANDALONE, 3456)) - .addPort(DatanodeDetails.newPort( - DatanodeDetails.Port.Name.RATIS, 4567)) - .addPort(DatanodeDetails.newPort( - DatanodeDetails.Port.Name.REST, 5678)) + .addPort(DatanodeDetails.newStandalonePort(3456)) + .addPort(DatanodeDetails.newRatisPort(4567)) + .addPort(DatanodeDetails.newRestPort(5678)) .setNetworkLocation(multiDn.getNetworkLocation()); DatanodeDetails dn = builder.build(); @@ -1072,16 +1091,9 @@ private List generateDatanodes() { builder.setUuid(UUID.randomUUID()) .setHostName(duplicatePorts.getHostName()) .setIpAddress(duplicatePorts.getIpAddress()) - .addPort(DatanodeDetails.newPort( - DatanodeDetails.Port.Name.STANDALONE, - duplicatePorts.getPort(DatanodeDetails.Port.Name.STANDALONE) - .getValue())) - .addPort(DatanodeDetails.newPort( - DatanodeDetails.Port.Name.RATIS, - duplicatePorts.getPort(DatanodeDetails.Port.Name.RATIS).getValue())) - .addPort(DatanodeDetails.newPort( - DatanodeDetails.Port.Name.REST, - duplicatePorts.getPort(DatanodeDetails.Port.Name.REST).getValue())) + .addPort(DatanodeDetails.newStandalonePort(duplicatePorts.getStandalonePort().getValue())) + .addPort(DatanodeDetails.newRatisPort(duplicatePorts.getRatisPort().getValue())) + .addPort(DatanodeDetails.newRestPort(duplicatePorts.getRestPort().getValue())) .setNetworkLocation(multiDn.getNetworkLocation()); dn = builder.build(); dns.add(dn); diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestSCMNodeManager.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestSCMNodeManager.java index cc9133cf684..6d11cb5fe58 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestSCMNodeManager.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestSCMNodeManager.java @@ -123,6 +123,7 @@ import org.junit.jupiter.params.ParameterizedTest; import org.junit.jupiter.params.provider.Arguments; +import org.junit.jupiter.params.provider.EnumSource; import org.junit.jupiter.params.provider.MethodSource; import org.junit.jupiter.params.provider.ValueSource; import org.mockito.ArgumentCaptor; @@ -850,15 +851,12 @@ void testScmHandleJvmPause() throws Exception { } } - @Test - public void testProcessLayoutVersion() throws IOException { - // TODO: Refactor this class to use org.junit.jupiter so test - // parameterization can be used. - for (FinalizationCheckpoint checkpoint: FinalizationCheckpoint.values()) { - LOG.info("Testing with SCM finalization checkpoint {}", checkpoint); - testProcessLayoutVersionLowerMlv(checkpoint); - testProcessLayoutVersionReportHigherMlv(checkpoint); - } + @ParameterizedTest + @EnumSource(FinalizationCheckpoint.class) + public void testProcessLayoutVersion(FinalizationCheckpoint checkpoint) throws IOException { + LOG.info("Testing with SCM finalization checkpoint {}", checkpoint); + testProcessLayoutVersionLowerMlv(checkpoint); + testProcessLayoutVersionReportHigherMlv(checkpoint); } // Currently invoked by testProcessLayoutVersion. diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineDatanodesIntersection.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineDatanodesIntersection.java index 385e1c65316..9908210e074 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineDatanodesIntersection.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineDatanodesIntersection.java @@ -66,8 +66,7 @@ public class TestPipelineDatanodesIntersection { public void initialize() throws IOException { conf = SCMTestUtils.getConf(testDir); end = false; - dbStore = DBStoreBuilder.createDBStore( - conf, new SCMDBDefinition()); + dbStore = DBStoreBuilder.createDBStore(conf, SCMDBDefinition.get()); } @AfterEach diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineManagerImpl.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineManagerImpl.java index e9407d6a941..1dfbfd32785 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineManagerImpl.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineManagerImpl.java @@ -136,7 +136,7 @@ void init(@TempDir File testDir, @TempDir File dbDir) throws Exception { // placement policy (Rack Scatter), so just use the random one. conf.set(ScmConfigKeys.OZONE_SCM_CONTAINER_PLACEMENT_EC_IMPL_KEY, SCMContainerPlacementRandom.class.getName()); - dbStore = DBStoreBuilder.createDBStore(conf, new SCMDBDefinition()); + dbStore = DBStoreBuilder.createDBStore(conf, SCMDBDefinition.get()); nodeManager = new MockNodeManager(true, 20); maxPipelineCount = nodeManager.getNodeCount( HddsProtos.NodeOperationalState.IN_SERVICE, @@ -358,7 +358,8 @@ public void testClosePipelineShouldFailOnFollower() throws Exception { public void testPipelineReport() throws Exception { try (PipelineManagerImpl pipelineManager = createPipelineManager(true)) { SCMSafeModeManager scmSafeModeManager = - new SCMSafeModeManager(conf, new ArrayList<>(), null, pipelineManager, + new SCMSafeModeManager(conf, new ArrayList<>(), + mock(ContainerManager.class), pipelineManager, new EventQueue(), serviceManager, scmContext); Pipeline pipeline = pipelineManager .createPipeline(RatisReplicationConfig @@ -469,7 +470,7 @@ public void testPipelineOpenOnlyWhenLeaderReported() throws Exception { SCMSafeModeManager scmSafeModeManager = new SCMSafeModeManager(new OzoneConfiguration(), new ArrayList<>(), - null, pipelineManager, new EventQueue(), + mock(ContainerManager.class), pipelineManager, new EventQueue(), serviceManager, scmContext); PipelineReportHandler pipelineReportHandler = new PipelineReportHandler(scmSafeModeManager, pipelineManager, diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelinePlacementFactory.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelinePlacementFactory.java index 96f62432b31..82fcc01d7ee 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelinePlacementFactory.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelinePlacementFactory.java @@ -135,8 +135,7 @@ private void setupRacks(int datanodeCount, int nodesPerRack, .thenReturn(dn); } - dbStore = DBStoreBuilder.createDBStore( - conf, new SCMDBDefinition()); + dbStore = DBStoreBuilder.createDBStore(conf, SCMDBDefinition.get()); scmhaManager = SCMHAManagerStub.getInstance(true); stateManager = PipelineStateManagerImpl.newBuilder() diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelinePlacementPolicy.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelinePlacementPolicy.java index 0f9ec84f033..722bb260859 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelinePlacementPolicy.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelinePlacementPolicy.java @@ -112,8 +112,7 @@ public void init() throws Exception { conf.setStorageSize(OZONE_DATANODE_RATIS_VOLUME_FREE_SPACE_MIN, 10, StorageUnit.MB); nodeManager.setNumPipelinePerDatanode(PIPELINE_LOAD_LIMIT); - dbStore = DBStoreBuilder.createDBStore( - conf, new SCMDBDefinition()); + dbStore = DBStoreBuilder.createDBStore(conf, SCMDBDefinition.get()); scmhaManager = SCMHAManagerStub.getInstance(true); stateManager = PipelineStateManagerImpl.newBuilder() .setPipelineStore(SCMDBDefinition.PIPELINES.getTable(dbStore)) @@ -398,9 +397,9 @@ private DatanodeDetails overwriteLocationInNode( .setUuid(datanode.getUuid()) .setHostName(datanode.getHostName()) .setIpAddress(datanode.getIpAddress()) - .addPort(datanode.getPort(DatanodeDetails.Port.Name.STANDALONE)) - .addPort(datanode.getPort(DatanodeDetails.Port.Name.RATIS)) - .addPort(datanode.getPort(DatanodeDetails.Port.Name.REST)) + .addPort(datanode.getStandalonePort()) + .addPort(datanode.getRatisPort()) + .addPort(datanode.getRestPort()) .setNetworkLocation(node.getNetworkLocation()).build(); return result; } diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineStateManagerImpl.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineStateManagerImpl.java index 9feb9e1f0a9..4a0baa2daca 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineStateManagerImpl.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineStateManagerImpl.java @@ -68,8 +68,7 @@ public class TestPipelineStateManagerImpl { @BeforeEach public void init() throws Exception { final OzoneConfiguration conf = SCMTestUtils.getConf(testDir); - dbStore = DBStoreBuilder.createDBStore( - conf, new SCMDBDefinition()); + dbStore = DBStoreBuilder.createDBStore(conf, SCMDBDefinition.get()); SCMHAManager scmhaManager = SCMHAManagerStub.getInstance(true); NodeManager nodeManager = new MockNodeManager(true, 10); diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestRatisPipelineProvider.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestRatisPipelineProvider.java index 5350c0da86e..94c0d45276c 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestRatisPipelineProvider.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestRatisPipelineProvider.java @@ -93,8 +93,7 @@ public void init(int maxPipelinePerNode, OzoneConfiguration conf) public void init(int maxPipelinePerNode, OzoneConfiguration conf, File dir) throws Exception { conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, dir.getAbsolutePath()); - dbStore = DBStoreBuilder.createDBStore( - conf, new SCMDBDefinition()); + dbStore = DBStoreBuilder.createDBStore(conf, SCMDBDefinition.get()); nodeManager = new MockNodeManager(true, 10); nodeManager.setNumPipelinePerDatanode(maxPipelinePerNode); SCMHAManager scmhaManager = SCMHAManagerStub.getInstance(true); diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestSimplePipelineProvider.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestSimplePipelineProvider.java index b69ebedb04d..7fb31d2c768 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestSimplePipelineProvider.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestSimplePipelineProvider.java @@ -60,8 +60,7 @@ public class TestSimplePipelineProvider { public void init() throws Exception { nodeManager = new MockNodeManager(true, 10); final OzoneConfiguration conf = SCMTestUtils.getConf(testDir); - dbStore = DBStoreBuilder.createDBStore( - conf, new SCMDBDefinition()); + dbStore = DBStoreBuilder.createDBStore(conf, SCMDBDefinition.get()); SCMHAManager scmhaManager = SCMHAManagerStub.getInstance(true); stateManager = PipelineStateManagerImpl.newBuilder() .setPipelineStore(SCMDBDefinition.PIPELINES.getTable(dbStore)) diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestWritableECContainerProvider.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestWritableECContainerProvider.java index 4f86450d03e..78aab4843cf 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestWritableECContainerProvider.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestWritableECContainerProvider.java @@ -128,8 +128,7 @@ void setup(@TempDir File testDir) throws IOException { containers = new HashMap<>(); conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, testDir.getAbsolutePath()); - dbStore = DBStoreBuilder.createDBStore( - conf, new SCMDBDefinition()); + dbStore = DBStoreBuilder.createDBStore(conf, SCMDBDefinition.get()); scmhaManager = SCMHAManagerStub.getInstance(true); pipelineManager = new MockPipelineManager(dbStore, scmhaManager, nodeManager); diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/safemode/TestHealthyPipelineSafeModeRule.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/safemode/TestHealthyPipelineSafeModeRule.java index 98f16394902..13eb4be724c 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/safemode/TestHealthyPipelineSafeModeRule.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/safemode/TestHealthyPipelineSafeModeRule.java @@ -31,6 +31,7 @@ import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor; import org.apache.hadoop.hdds.scm.HddsTestUtils; import org.apache.hadoop.hdds.scm.container.ContainerInfo; +import org.apache.hadoop.hdds.scm.container.ContainerManager; import org.apache.hadoop.hdds.scm.container.MockNodeManager; import org.apache.hadoop.hdds.scm.events.SCMEvents; import org.apache.hadoop.hdds.scm.ha.SCMHAManagerStub; @@ -50,6 +51,8 @@ import static org.junit.jupiter.api.Assertions.assertTrue; import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; /** * This class tests HealthyPipelineSafeMode rule. @@ -69,6 +72,8 @@ public void testHealthyPipelineSafeModeRuleWithNoPipelines() OzoneConfiguration config = new OzoneConfiguration(); MockNodeManager nodeManager = new MockNodeManager(true, 0); + ContainerManager containerManager = mock(ContainerManager.class); + when(containerManager.getContainers()).thenReturn(containers); config.set(HddsConfigKeys.OZONE_METADATA_DIRS, tempFile.getPath()); // enable pipeline check config.setBoolean( @@ -94,7 +99,7 @@ public void testHealthyPipelineSafeModeRuleWithNoPipelines() pipelineManager.setPipelineProvider(HddsProtos.ReplicationType.RATIS, mockRatisProvider); SCMSafeModeManager scmSafeModeManager = new SCMSafeModeManager( - config, containers, null, pipelineManager, eventQueue, + config, containers, containerManager, pipelineManager, eventQueue, serviceManager, scmContext); HealthyPipelineSafeModeRule healthyPipelineSafeModeRule = @@ -121,6 +126,8 @@ public void testHealthyPipelineSafeModeRuleWithPipelines() throws Exception { // stale and last one is dead, and this repeats. So for a 12 node, 9 // healthy, 2 stale and one dead. MockNodeManager nodeManager = new MockNodeManager(true, 12); + ContainerManager containerManager = mock(ContainerManager.class); + when(containerManager.getContainers()).thenReturn(containers); config.set(HddsConfigKeys.OZONE_METADATA_DIRS, tempFile.getPath()); // enable pipeline check config.setBoolean( @@ -172,7 +179,7 @@ public void testHealthyPipelineSafeModeRuleWithPipelines() throws Exception { MockRatisPipelineProvider.markPipelineHealthy(pipeline3); SCMSafeModeManager scmSafeModeManager = new SCMSafeModeManager( - config, containers, null, pipelineManager, eventQueue, + config, containers, containerManager, pipelineManager, eventQueue, serviceManager, scmContext); HealthyPipelineSafeModeRule healthyPipelineSafeModeRule = @@ -215,6 +222,8 @@ public void testHealthyPipelineSafeModeRuleWithMixedPipelines() // stale and last one is dead, and this repeats. So for a 12 node, 9 // healthy, 2 stale and one dead. MockNodeManager nodeManager = new MockNodeManager(true, 12); + ContainerManager containerManager = mock(ContainerManager.class); + when(containerManager.getContainers()).thenReturn(containers); config.set(HddsConfigKeys.OZONE_METADATA_DIRS, tempFile.getPath()); // enable pipeline check config.setBoolean( @@ -266,7 +275,7 @@ public void testHealthyPipelineSafeModeRuleWithMixedPipelines() MockRatisPipelineProvider.markPipelineHealthy(pipeline3); SCMSafeModeManager scmSafeModeManager = new SCMSafeModeManager( - config, containers, null, pipelineManager, eventQueue, + config, containers, containerManager, pipelineManager, eventQueue, serviceManager, scmContext); HealthyPipelineSafeModeRule healthyPipelineSafeModeRule = diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/safemode/TestOneReplicaPipelineSafeModeRule.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/safemode/TestOneReplicaPipelineSafeModeRule.java index e070a2b6036..76bafa8b1fb 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/safemode/TestOneReplicaPipelineSafeModeRule.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/safemode/TestOneReplicaPipelineSafeModeRule.java @@ -35,6 +35,7 @@ import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.PipelineReport; import org.apache.hadoop.hdds.scm.HddsTestUtils; import org.apache.hadoop.hdds.scm.container.ContainerInfo; +import org.apache.hadoop.hdds.scm.container.ContainerManager; import org.apache.hadoop.hdds.scm.container.MockNodeManager; import org.apache.hadoop.hdds.scm.events.SCMEvents; import org.apache.hadoop.hdds.scm.ha.SCMHAManagerStub; @@ -58,6 +59,8 @@ import org.slf4j.LoggerFactory; import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; /** * This class tests OneReplicaPipelineSafeModeRule. @@ -86,7 +89,8 @@ private void setup(int nodes, int pipelineFactorThreeCount, List containers = new ArrayList<>(); containers.addAll(HddsTestUtils.getContainerInfo(1)); mockNodeManager = new MockNodeManager(true, nodes); - + ContainerManager containerManager = mock(ContainerManager.class); + when(containerManager.getContainers()).thenReturn(containers); eventQueue = new EventQueue(); serviceManager = new SCMServiceManager(); scmContext = SCMContext.emptyContext(); @@ -116,7 +120,7 @@ private void setup(int nodes, int pipelineFactorThreeCount, HddsProtos.ReplicationFactor.ONE); SCMSafeModeManager scmSafeModeManager = - new SCMSafeModeManager(ozoneConfiguration, containers, null, + new SCMSafeModeManager(ozoneConfiguration, containers, containerManager, pipelineManager, eventQueue, serviceManager, scmContext); rule = scmSafeModeManager.getOneReplicaPipelineSafeModeRule(); diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/safemode/TestSCMSafeModeManager.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/safemode/TestSCMSafeModeManager.java index 319caabe40a..fc8ec9c1912 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/safemode/TestSCMSafeModeManager.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/safemode/TestSCMSafeModeManager.java @@ -20,6 +20,7 @@ import java.io.File; import java.io.IOException; import java.time.Clock; +import java.time.ZoneId; import java.time.ZoneOffset; import java.util.ArrayList; import java.util.Collections; @@ -28,6 +29,7 @@ import java.util.Set; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; +import java.util.stream.Stream; import org.apache.commons.lang3.tuple.Pair; import org.apache.hadoop.hdds.HddsConfigKeys; @@ -39,7 +41,10 @@ import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos; import org.apache.hadoop.hdds.scm.HddsTestUtils; import org.apache.hadoop.hdds.scm.container.ContainerInfo; +import org.apache.hadoop.hdds.scm.container.ContainerManager; +import org.apache.hadoop.hdds.scm.container.ContainerManagerImpl; import org.apache.hadoop.hdds.scm.container.MockNodeManager; +import org.apache.hadoop.hdds.scm.container.replication.ContainerReplicaPendingOps; import org.apache.hadoop.hdds.scm.events.SCMEvents; import org.apache.hadoop.hdds.scm.ha.SCMHAManagerStub; import org.apache.hadoop.hdds.scm.ha.SCMContext; @@ -52,6 +57,7 @@ import org.apache.hadoop.hdds.scm.pipeline.PipelineProvider; import org.apache.hadoop.hdds.scm.pipeline.PipelineManagerImpl; import org.apache.hadoop.hdds.scm.server.SCMDatanodeHeartbeatDispatcher; +import org.apache.hadoop.hdds.scm.server.SCMDatanodeProtocolServer; import org.apache.hadoop.hdds.server.events.EventHandler; import org.apache.hadoop.hdds.server.events.EventPublisher; import org.apache.hadoop.hdds.server.events.EventQueue; @@ -63,14 +69,19 @@ import org.junit.jupiter.api.Timeout; import org.junit.jupiter.api.io.TempDir; import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.Arguments; import org.junit.jupiter.params.provider.CsvSource; +import org.junit.jupiter.params.provider.MethodSource; +import org.junit.jupiter.params.provider.ValueSource; import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertFalse; import static org.junit.jupiter.api.Assertions.assertThrows; import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.junit.jupiter.params.provider.Arguments.arguments; import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; /** Test class for SCMSafeModeManager. */ @@ -96,8 +107,7 @@ public void setUp() throws IOException { config = new OzoneConfiguration(); config.setBoolean(HddsConfigKeys.HDDS_SCM_SAFEMODE_PIPELINE_CREATION, false); - config.set(HddsConfigKeys.OZONE_METADATA_DIRS, - tempDir.getAbsolutePath().toString()); + config.set(HddsConfigKeys.OZONE_METADATA_DIRS, tempDir.getAbsolutePath()); scmMetadataStore = new SCMMetadataStoreImpl(config); } @@ -108,19 +118,10 @@ public void destroyDbStore() throws Exception { } } - @Test - public void testSafeModeState() throws Exception { - // Test 1: test for 0 containers - testSafeMode(0); - - // Test 2: test for 20 containers - testSafeMode(20); - } - - @Test - public void testSafeModeStateWithNullContainers() { - new SCMSafeModeManager(config, Collections.emptyList(), - null, null, queue, serviceManager, scmContext); + @ParameterizedTest + @ValueSource(ints = {0, 20}) + public void testSafeModeState(int numContainers) throws Exception { + testSafeMode(numContainers); } private void testSafeMode(int numContainers) throws Exception { @@ -132,14 +133,18 @@ private void testSafeMode(int numContainers) throws Exception { container.setState(HddsProtos.LifeCycleState.CLOSED); container.setNumberOfKeys(10); } + ContainerManager containerManager = mock(ContainerManager.class); + when(containerManager.getContainers()).thenReturn(containers); scmSafeModeManager = new SCMSafeModeManager( - config, containers, null, null, queue, + config, containers, containerManager, null, queue, serviceManager, scmContext); assertTrue(scmSafeModeManager.getInSafeMode()); validateRuleStatus("DatanodeSafeModeRule", "registered datanodes 0"); - queue.fireEvent(SCMEvents.NODE_REGISTRATION_CONT_REPORT, - HddsTestUtils.createNodeRegistrationContainerReport(containers)); + SCMDatanodeProtocolServer.NodeRegistrationContainerReport nodeRegistrationContainerReport = + HddsTestUtils.createNodeRegistrationContainerReport(containers); + queue.fireEvent(SCMEvents.NODE_REGISTRATION_CONT_REPORT, nodeRegistrationContainerReport); + queue.fireEvent(SCMEvents.CONTAINER_REGISTRATION_REPORT, nodeRegistrationContainerReport); long cutOff = (long) Math.ceil(numContainers * config.getDouble( HddsConfigKeys.HDDS_SCM_SAFEMODE_THRESHOLD_PCT, @@ -167,8 +172,10 @@ public void testSafeModeExitRule() throws Exception { container.setState(HddsProtos.LifeCycleState.CLOSED); container.setNumberOfKeys(10); } + ContainerManager containerManager = mock(ContainerManager.class); + when(containerManager.getContainers()).thenReturn(containers); scmSafeModeManager = new SCMSafeModeManager( - config, containers, null, null, queue, + config, containers, containerManager, null, queue, serviceManager, scmContext); long cutOff = (long) Math.ceil(numContainers * config.getDouble( @@ -180,7 +187,7 @@ public void testSafeModeExitRule() throws Exception { assertTrue(scmSafeModeManager.getInSafeMode()); validateRuleStatus("ContainerSafeModeRule", - "% of containers with at least one reported"); + "0.00% of [Ratis] Containers(0 / 100) with at least one reported"); testContainerThreshold(containers.subList(0, 25), 0.25); assertEquals(25, scmSafeModeManager.getSafeModeMetrics() .getCurrentContainersWithOneReplicaReportedCount().value()); @@ -215,36 +222,6 @@ private OzoneConfiguration createConf(double healthyPercent, return conf; } - @Test - public void testSafeModeExitRuleWithPipelineAvailabilityCheck1() - throws Exception { - testSafeModeExitRuleWithPipelineAvailabilityCheck(100, 30, 8, 0.90, 1); - } - - @Test - public void testSafeModeExitRuleWithPipelineAvailabilityCheck2() - throws Exception { - testSafeModeExitRuleWithPipelineAvailabilityCheck(100, 90, 22, 0.10, 0.9); - } - - @Test - public void testSafeModeExitRuleWithPipelineAvailabilityCheck3() - throws Exception { - testSafeModeExitRuleWithPipelineAvailabilityCheck(100, 30, 8, 0, 0.9); - } - - @Test - public void testSafeModeExitRuleWithPipelineAvailabilityCheck4() - throws Exception { - testSafeModeExitRuleWithPipelineAvailabilityCheck(100, 90, 22, 0, 0); - } - - @Test - public void testSafeModeExitRuleWithPipelineAvailabilityCheck5() - throws Exception { - testSafeModeExitRuleWithPipelineAvailabilityCheck(100, 90, 22, 0, 0.5); - } - @ParameterizedTest @CsvSource(value = {"100,0.9,false", "0.9,200,false", "0.9,0.1,true"}) public void testHealthyPipelinePercentWithIncorrectValue(double healthyPercent, @@ -264,11 +241,26 @@ public void testHealthyPipelinePercentWithIncorrectValue(double healthyPercent, scmContext, serviceManager, Clock.system(ZoneOffset.UTC)); + ContainerManager containerManager = mock(ContainerManager.class); + when(containerManager.getContainers()).thenReturn(containers); IllegalArgumentException exception = assertThrows(IllegalArgumentException.class, - () -> new SCMSafeModeManager(conf, containers, null, pipelineManager, queue, serviceManager, scmContext)); + () -> new SCMSafeModeManager(conf, containers, containerManager, + pipelineManager, queue, serviceManager, scmContext)); assertThat(exception).hasMessageEndingWith("value should be >= 0.0 and <= 1.0"); } + private static Stream testCaseForSafeModeExitRuleWithPipelineAvailabilityCheck() { + return Stream.of( + Arguments.of(100, 30, 8, 0.90, 1), + Arguments.of(100, 90, 22, 0.10, 0.9), + Arguments.of(100, 30, 8, 0, 0.9), + Arguments.of(100, 90, 22, 0, 0), + Arguments.of(100, 90, 22, 0, 0.5) + ); + } + + @ParameterizedTest + @MethodSource("testCaseForSafeModeExitRuleWithPipelineAvailabilityCheck") public void testSafeModeExitRuleWithPipelineAvailabilityCheck( int containerCount, int nodeCount, int pipelineCount, double healthyPipelinePercent, double oneReplicaPercent) @@ -315,8 +307,11 @@ public void testSafeModeExitRuleWithPipelineAvailabilityCheck( container.setState(HddsProtos.LifeCycleState.CLOSED); } + ContainerManager containerManager = mock(ContainerManager.class); + when(containerManager.getContainers()).thenReturn(containers); + scmSafeModeManager = new SCMSafeModeManager( - conf, containers, null, pipelineManager, queue, serviceManager, + conf, containers, containerManager, pipelineManager, queue, serviceManager, scmContext); assertTrue(scmSafeModeManager.getInSafeMode()); @@ -449,18 +444,19 @@ public void testDisableSafeMode() { OzoneConfiguration conf = new OzoneConfiguration(config); conf.setBoolean(HddsConfigKeys.HDDS_SCM_SAFEMODE_ENABLED, false); PipelineManager pipelineManager = mock(PipelineManager.class); + ContainerManager containerManager = mock(ContainerManager.class); + when(containerManager.getContainers()).thenReturn(containers); scmSafeModeManager = new SCMSafeModeManager( - conf, containers, null, pipelineManager, queue, serviceManager, + conf, containers, containerManager, pipelineManager, queue, serviceManager, scmContext); assertFalse(scmSafeModeManager.getInSafeMode()); } - @Test - public void testSafeModeDataNodeExitRule() throws Exception { + @ParameterizedTest + @ValueSource(ints = {0, 3, 5}) + public void testSafeModeDataNodeExitRule(int numberOfDns) throws Exception { containers = new ArrayList<>(); - testSafeModeDataNodes(0); - testSafeModeDataNodes(3); - testSafeModeDataNodes(5); + testSafeModeDataNodes(numberOfDns); } /** @@ -489,8 +485,11 @@ public void testContainerSafeModeRule() throws Exception { container.setNumberOfKeys(0); } + ContainerManager containerManager = mock(ContainerManager.class); + when(containerManager.getContainers()).thenReturn(containers); + scmSafeModeManager = new SCMSafeModeManager( - config, containers, null, null, queue, serviceManager, scmContext); + config, containers, containerManager, null, queue, serviceManager, scmContext); assertTrue(scmSafeModeManager.getInSafeMode()); @@ -510,11 +509,86 @@ public void testContainerSafeModeRule() throws Exception { 100, 1000 * 5); } + // We simulate common EC types: EC-2-2-1024K, EC-3-2-1024K, EC-6-3-1024K. + static Stream processECDataParityCombination() { + Stream args = Stream.of(arguments(2, 2), + arguments(3, 2), arguments(6, 3)); + return args; + } + + @ParameterizedTest + @MethodSource("processECDataParityCombination") + public void testContainerSafeModeRuleEC(int data, int parity) throws Exception { + containers = new ArrayList<>(); + + // We generate 100 EC Containers. + containers.addAll(HddsTestUtils.getECContainerInfo(25 * 4, data, parity)); + + // Prepare the data for the container. + // We have prepared 25 containers in the CLOSED state and 75 containers in the OPEN state. + // Out of the 25 containers, only 20 containers have a NumberOfKeys greater than 0. + for (ContainerInfo container : containers.subList(0, 25)) { + container.setState(HddsProtos.LifeCycleState.CLOSED); + container.setNumberOfKeys(10); + } + + for (ContainerInfo container : containers.subList(25, 100)) { + container.setState(HddsProtos.LifeCycleState.OPEN); + container.setNumberOfKeys(10); + } + + // Set the last 5 closed containers to be empty + for (ContainerInfo container : containers.subList(20, 25)) { + container.setNumberOfKeys(0); + } + + for (ContainerInfo container : containers) { + scmMetadataStore.getContainerTable().put(container.containerID(), container); + } + + // Declare SCMSafeModeManager and confirm entry into Safe Mode. + EventQueue eventQueue = new EventQueue(); + MockNodeManager nodeManager = new MockNodeManager(true, 0); + PipelineManager pipelineManager = PipelineManagerImpl.newPipelineManager( + config, + SCMHAManagerStub.getInstance(true), + nodeManager, + scmMetadataStore.getPipelineTable(), + eventQueue, + scmContext, + serviceManager, + Clock.system(ZoneOffset.UTC)); + + ContainerManager containerManager = new ContainerManagerImpl(config, + SCMHAManagerStub.getInstance(true), null, pipelineManager, + scmMetadataStore.getContainerTable(), + new ContainerReplicaPendingOps(Clock.system(ZoneId.systemDefault()))); + + scmSafeModeManager = new SCMSafeModeManager( + config, containers, containerManager, pipelineManager, queue, serviceManager, scmContext); + assertTrue(scmSafeModeManager.getInSafeMode()); + + // Only 20 containers are involved in the calculation, + // so when 10 containers complete registration, our threshold is 50%. + testECContainerThreshold(containers.subList(0, 10), 0.5, data); + assertTrue(scmSafeModeManager.getInSafeMode()); + + // When the registration of the remaining containers is completed, + // the threshold will reach 100%. + testECContainerThreshold(containers.subList(10, 20), 1.0, data); + + ContainerSafeModeRule containerSafeModeRule = + scmSafeModeManager.getContainerSafeModeRule(); + assertTrue(containerSafeModeRule.validate()); + } + private void testSafeModeDataNodes(int numOfDns) throws Exception { OzoneConfiguration conf = new OzoneConfiguration(config); conf.setInt(HddsConfigKeys.HDDS_SCM_SAFEMODE_MIN_DATANODE, numOfDns); + ContainerManager containerManager = mock(ContainerManager.class); + when(containerManager.getContainers()).thenReturn(containers); scmSafeModeManager = new SCMSafeModeManager( - conf, containers, null, null, queue, + conf, containers, containerManager, null, queue, serviceManager, scmContext); // Assert SCM is in Safe mode. @@ -522,8 +596,10 @@ private void testSafeModeDataNodes(int numOfDns) throws Exception { // Register all DataNodes except last one and assert SCM is in safe mode. for (int i = 0; i < numOfDns - 1; i++) { - queue.fireEvent(SCMEvents.NODE_REGISTRATION_CONT_REPORT, - HddsTestUtils.createNodeRegistrationContainerReport(containers)); + SCMDatanodeProtocolServer.NodeRegistrationContainerReport nodeRegistrationContainerReport = + HddsTestUtils.createNodeRegistrationContainerReport(containers); + queue.fireEvent(SCMEvents.NODE_REGISTRATION_CONT_REPORT, nodeRegistrationContainerReport); + queue.fireEvent(SCMEvents.CONTAINER_REGISTRATION_REPORT, nodeRegistrationContainerReport); assertTrue(scmSafeModeManager.getInSafeMode()); assertEquals(1, scmSafeModeManager.getCurrentContainerThreshold()); } @@ -543,14 +619,52 @@ private void testSafeModeDataNodes(int numOfDns) throws Exception { private void testContainerThreshold(List dnContainers, double expectedThreshold) throws Exception { + SCMDatanodeProtocolServer.NodeRegistrationContainerReport nodeRegistrationContainerReport = + HddsTestUtils.createNodeRegistrationContainerReport(dnContainers); queue.fireEvent(SCMEvents.NODE_REGISTRATION_CONT_REPORT, - HddsTestUtils.createNodeRegistrationContainerReport(dnContainers)); + nodeRegistrationContainerReport); + queue.fireEvent(SCMEvents.CONTAINER_REGISTRATION_REPORT, + nodeRegistrationContainerReport); GenericTestUtils.waitFor(() -> { double threshold = scmSafeModeManager.getCurrentContainerThreshold(); return threshold == expectedThreshold; }, 100, 2000 * 9); } + /** + * Test ECContainer reaching SafeMode threshold. + * + * @param dnContainers + * The list of containers that need to reach the threshold. + * @param expectedThreshold + * The expected threshold. + * @param dataBlockNum + * The number of data blocks. For EC-3-2-1024K, + * we need 3 registration requests to ensure the EC Container is confirmed. + * For EC-6-3-1024K, we need 6 registration requests to ensure the EC Container is confirmed. + * @throws Exception The thrown exception message. + */ + private void testECContainerThreshold(List dnContainers, + double expectedThreshold, int dataBlockNum) throws Exception { + + // Step1. We need to ensure the number of confirmed EC data blocks + // based on the quantity of dataBlockNum. + for (int i = 0; i < dataBlockNum; i++) { + SCMDatanodeProtocolServer.NodeRegistrationContainerReport nodeRegistrationContainerReport = + HddsTestUtils.createNodeRegistrationContainerReport(dnContainers); + queue.fireEvent(SCMEvents.NODE_REGISTRATION_CONT_REPORT, + nodeRegistrationContainerReport); + queue.fireEvent(SCMEvents.CONTAINER_REGISTRATION_REPORT, + nodeRegistrationContainerReport); + } + + // Step2. Wait for the threshold to be reached. + GenericTestUtils.waitFor(() -> { + double threshold = scmSafeModeManager.getCurrentECContainerThreshold(); + return threshold == expectedThreshold; + }, 100, 2000 * 9); + } + @Test public void testSafeModePipelineExitRule() throws Exception { containers = new ArrayList<>(); @@ -584,13 +698,18 @@ public void testSafeModePipelineExitRule() throws Exception { pipeline = pipelineManager.getPipeline(pipeline.getId()); MockRatisPipelineProvider.markPipelineHealthy(pipeline); + ContainerManager containerManager = mock(ContainerManager.class); + when(containerManager.getContainers()).thenReturn(containers); scmSafeModeManager = new SCMSafeModeManager( - config, containers, null, pipelineManager, queue, serviceManager, + config, containers, containerManager, pipelineManager, queue, serviceManager, scmContext); - queue.fireEvent(SCMEvents.NODE_REGISTRATION_CONT_REPORT, - HddsTestUtils.createNodeRegistrationContainerReport(containers)); + SCMDatanodeProtocolServer.NodeRegistrationContainerReport nodeRegistrationContainerReport = + HddsTestUtils.createNodeRegistrationContainerReport(containers); + queue.fireEvent(SCMEvents.NODE_REGISTRATION_CONT_REPORT, nodeRegistrationContainerReport); + queue.fireEvent(SCMEvents.CONTAINER_REGISTRATION_REPORT, nodeRegistrationContainerReport); + assertTrue(scmSafeModeManager.getInSafeMode()); firePipelineEvent(pipelineManager, pipeline); @@ -634,8 +753,11 @@ public void testPipelinesNotCreatedUntilPreCheckPasses() throws Exception { pipelineManager.setPipelineProvider(HddsProtos.ReplicationType.RATIS, mockRatisProvider); + ContainerManager containerManager = mock(ContainerManager.class); + when(containerManager.getContainers()).thenReturn(containers); + scmSafeModeManager = new SCMSafeModeManager( - config, containers, null, pipelineManager, queue, serviceManager, + config, containers, containerManager, pipelineManager, queue, serviceManager, scmContext); // Assert SCM is in Safe mode. @@ -647,8 +769,10 @@ public void testPipelinesNotCreatedUntilPreCheckPasses() throws Exception { // Register all DataNodes except last one and assert SCM is in safe mode. for (int i = 0; i < numOfDns - 1; i++) { - queue.fireEvent(SCMEvents.NODE_REGISTRATION_CONT_REPORT, - HddsTestUtils.createNodeRegistrationContainerReport(containers)); + SCMDatanodeProtocolServer.NodeRegistrationContainerReport nodeRegistrationContainerReport = + HddsTestUtils.createNodeRegistrationContainerReport(containers); + queue.fireEvent(SCMEvents.NODE_REGISTRATION_CONT_REPORT, nodeRegistrationContainerReport); + queue.fireEvent(SCMEvents.CONTAINER_REGISTRATION_REPORT, nodeRegistrationContainerReport); assertTrue(scmSafeModeManager.getInSafeMode()); assertFalse(scmSafeModeManager.getPreCheckComplete()); } diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/safemode/TestSafeModeRuleFactory.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/safemode/TestSafeModeRuleFactory.java new file mode 100644 index 00000000000..837012429be --- /dev/null +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/safemode/TestSafeModeRuleFactory.java @@ -0,0 +1,85 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + *

    + * http://www.apache.org/licenses/LICENSE-2.0 + *

    + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package org.apache.hadoop.hdds.scm.safemode; + +import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.scm.container.ContainerManager; +import org.apache.hadoop.hdds.scm.ha.SCMContext; +import org.apache.hadoop.hdds.scm.pipeline.PipelineManager; +import org.apache.hadoop.hdds.server.events.EventQueue; +import org.junit.jupiter.api.Test; + +import java.lang.reflect.Field; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +class TestSafeModeRuleFactory { + + @Test + public void testIllegalState() { + // If the initialization is already done by different test, we have to reset it. + try { + final Field instance = SafeModeRuleFactory.class.getDeclaredField("instance"); + instance.setAccessible(true); + instance.set(null, null); + } catch (Exception e) { + throw new RuntimeException(); + } + assertThrows(IllegalStateException.class, SafeModeRuleFactory::getInstance); + } + + @Test + public void testLoadedSafeModeRules() { + initializeSafeModeRuleFactory(); + final SafeModeRuleFactory factory = SafeModeRuleFactory.getInstance(); + + // Currently we assert the total count against hardcoded value + // as the rules are hardcoded in SafeModeRuleFactory. + + // This will be fixed once we load rules using annotation. + assertEquals(4, factory.getSafeModeRules().size(), + "The total safemode rules count doesn't match"); + + } + + @Test + public void testLoadedPreCheckRules() { + initializeSafeModeRuleFactory(); + final SafeModeRuleFactory factory = SafeModeRuleFactory.getInstance(); + + // Currently we assert the total count against hardcoded value + // as the rules are hardcoded in SafeModeRuleFactory. + + // This will be fixed once we load rules using annotation. + assertEquals(1, factory.getPreCheckRules().size(), + "The total safemode rules count doesn't match"); + + } + + private void initializeSafeModeRuleFactory() { + final SCMSafeModeManager safeModeManager = mock(SCMSafeModeManager.class); + when(safeModeManager.getSafeModeMetrics()).thenReturn(mock(SafeModeMetrics.class)); + SafeModeRuleFactory.initialize(new OzoneConfiguration(), + SCMContext.emptyContext(), new EventQueue(), safeModeManager, mock( + PipelineManager.class), mock(ContainerManager.class)); + } + +} diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/TestSCMClientProtocolServer.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/TestSCMClientProtocolServer.java index 7c06b79a2ff..8e21eef930e 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/TestSCMClientProtocolServer.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/TestSCMClientProtocolServer.java @@ -17,12 +17,19 @@ */ package org.apache.hadoop.hdds.scm.server; +import org.apache.hadoop.hdds.client.RatisReplicationConfig; import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.conf.ReconfigurationHandler; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.DecommissionScmRequestProto; import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.DecommissionScmResponseProto; +import org.apache.hadoop.hdds.scm.container.ContainerInfo; +import org.apache.hadoop.hdds.scm.container.ContainerManagerImpl; import org.apache.hadoop.hdds.scm.HddsTestUtils; import org.apache.hadoop.hdds.scm.ha.SCMContext; import org.apache.hadoop.hdds.scm.ha.SCMHAManagerStub; +import org.apache.hadoop.hdds.scm.ha.SCMNodeDetails; +import org.apache.hadoop.hdds.scm.pipeline.PipelineID; import org.apache.hadoop.hdds.scm.protocol.StorageContainerLocationProtocolServerSideTranslatorPB; import org.apache.hadoop.hdds.utils.ProtocolMessageMetrics; import org.apache.hadoop.ozone.container.common.SCMTestUtils; @@ -35,9 +42,13 @@ import java.io.File; import java.io.IOException; +import java.net.InetSocketAddress; +import java.util.ArrayList; +import java.util.List; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_READONLY_ADMINISTRATORS; import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertThrows; import static org.junit.jupiter.api.Assertions.assertTrue; @@ -112,4 +123,47 @@ public void testReadOnlyAdmins() throws IOException { UserGroupInformation.reset(); } } + + /** + * Tests listContainer of scm. + */ + @Test + public void testScmListContainer() throws Exception { + SCMClientProtocolServer scmServer = + new SCMClientProtocolServer(new OzoneConfiguration(), + mockStorageContainerManager(), mock(ReconfigurationHandler.class)); + + assertEquals(10, scmServer.listContainer(1, 10, + null, HddsProtos.ReplicationType.RATIS, null).getContainerInfoList().size()); + // Test call from a legacy client, which uses a different method of listContainer + assertEquals(10, scmServer.listContainer(1, 10, null, + HddsProtos.ReplicationFactor.THREE).getContainerInfoList().size()); + } + + private StorageContainerManager mockStorageContainerManager() { + List infos = new ArrayList<>(); + for (int i = 0; i < 10; i++) { + infos.add(newContainerInfoForTest()); + } + ContainerManagerImpl containerManager = mock(ContainerManagerImpl.class); + when(containerManager.getContainers()).thenReturn(infos); + StorageContainerManager storageContainerManager = mock(StorageContainerManager.class); + when(storageContainerManager.getContainerManager()).thenReturn(containerManager); + + SCMNodeDetails scmNodeDetails = mock(SCMNodeDetails.class); + when(scmNodeDetails.getClientProtocolServerAddress()).thenReturn(new InetSocketAddress("localhost", 9876)); + when(scmNodeDetails.getClientProtocolServerAddressKey()).thenReturn("test"); + when(storageContainerManager.getScmNodeDetails()).thenReturn(scmNodeDetails); + return storageContainerManager; + } + + private ContainerInfo newContainerInfoForTest() { + return new ContainerInfo.Builder() + .setContainerID(1) + .setPipelineID(PipelineID.randomId()) + .setReplicationConfig( + RatisReplicationConfig + .getInstance(HddsProtos.ReplicationFactor.THREE)) + .build(); + } } diff --git a/hadoop-hdds/test-utils/pom.xml b/hadoop-hdds/test-utils/pom.xml index f720d65bdf5..6ff87083c03 100644 --- a/hadoop-hdds/test-utils/pom.xml +++ b/hadoop-hdds/test-utils/pom.xml @@ -20,10 +20,10 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> org.apache.ozone hdds - 1.5.0-SNAPSHOT + 2.0.0-SNAPSHOT hdds-test-utils - 1.5.0-SNAPSHOT + 2.0.0-SNAPSHOT Apache Ozone Distributed Data Store Test Utils Apache Ozone HDDS Test Utils jar diff --git a/hadoop-hdds/test-utils/src/main/java/org/apache/ozone/test/GenericTestUtils.java b/hadoop-hdds/test-utils/src/main/java/org/apache/ozone/test/GenericTestUtils.java index 9a3a5c7a8f1..8a770424766 100644 --- a/hadoop-hdds/test-utils/src/main/java/org/apache/ozone/test/GenericTestUtils.java +++ b/hadoop-hdds/test-utils/src/main/java/org/apache/ozone/test/GenericTestUtils.java @@ -29,6 +29,7 @@ import java.time.Instant; import java.util.List; import java.util.Map; +import java.util.concurrent.Callable; import java.util.concurrent.TimeoutException; import com.google.common.base.Preconditions; @@ -92,7 +93,10 @@ public static Instant getTestStartTime() { * Get the (created) base directory for tests. * * @return the absolute directory + * + * @deprecated use {@link org.junit.jupiter.api.io.TempDir} instead. */ + @Deprecated public static File getTestDir() { String prop = System.getProperty(SYSPROP_TEST_DATA_DIR, DEFAULT_TEST_DATA_DIR); @@ -109,7 +113,10 @@ public static File getTestDir() { * Get an uncreated directory for tests. * * @return the absolute directory for tests. Caller is expected to create it. + * + * @deprecated use {@link org.junit.jupiter.api.io.TempDir} instead. */ + @Deprecated public static File getTestDir(String subdir) { return new File(getTestDir(), subdir).getAbsoluteFile(); } @@ -119,7 +126,10 @@ public static File getTestDir(String subdir) { * name. This is likely to provide a unique path for tests run in parallel * * @return the absolute directory for tests. Caller is expected to create it. + * + * @deprecated use {@link org.junit.jupiter.api.io.TempDir} instead. */ + @Deprecated public static File getRandomizedTestDir() { return new File(getRandomizedTempPath()); } @@ -131,7 +141,10 @@ public static File getRandomizedTestDir() { * * @param subpath sub path, with no leading "/" character * @return a string to use in paths + * + * @deprecated use {@link org.junit.jupiter.api.io.TempDir} instead. */ + @Deprecated public static String getTempPath(String subpath) { String prop = WINDOWS ? DEFAULT_TEST_DATA_PATH : System.getProperty(SYSPROP_TEST_DATA_DIR, DEFAULT_TEST_DATA_PATH); @@ -152,8 +165,11 @@ public static String getTempPath(String subpath) { * under the relative path {@link #DEFAULT_TEST_DATA_PATH} * * @return a string to use in paths + * + * @deprecated use {@link org.junit.jupiter.api.io.TempDir} instead. */ @SuppressWarnings("java:S2245") // no need for secure random + @Deprecated public static String getRandomizedTempPath() { return getTempPath(getCallerClass(GenericTestUtils.class).getSimpleName() + "-" + randomAlphanumeric(10)); @@ -205,6 +221,20 @@ public static void waitFor(BooleanSupplier check, int checkEveryMillis, } } + public static T assertThrows( + Class expectedType, + Callable func) { + return Assertions.assertThrows(expectedType, () -> { + final AutoCloseable closeable = func.call(); + try { + if (closeable != null) { + closeable.close(); + } + } catch (Exception ignored) { + } + }); + } + /** * @deprecated use sl4fj based version */ diff --git a/hadoop-hdds/test-utils/src/main/java/org/apache/ozone/test/MetricsAsserts.java b/hadoop-hdds/test-utils/src/main/java/org/apache/ozone/test/MetricsAsserts.java index f2c65d14961..f4651a408f7 100644 --- a/hadoop-hdds/test-utils/src/main/java/org/apache/ozone/test/MetricsAsserts.java +++ b/hadoop-hdds/test-utils/src/main/java/org/apache/ozone/test/MetricsAsserts.java @@ -51,7 +51,8 @@ * Copied from Hadoop and migrated to AssertJ. */ public final class MetricsAsserts { - + // workaround for HADOOP-19301. + private static final MutableQuantiles QUANTILES = new MutableQuantiles(); private static final Logger LOG = LoggerFactory.getLogger(MetricsAsserts.class); private static final Offset EPSILON = Offset.offset(0.00001); private static final Offset EPSILON_FLOAT = Offset.offset(0.00001f); @@ -411,7 +412,7 @@ public static void assertQuantileGauges(String prefix, public static void assertQuantileGauges(String prefix, MetricsRecordBuilder rb, String valueName) { verify(rb).addGauge(eqName(info(prefix + "NumOps", "")), geq(0L)); - for (Quantile q : MutableQuantiles.quantiles) { + for (Quantile q : QUANTILES.getQuantiles()) { String nameTemplate = prefix + "%dthPercentile" + valueName; int percentile = (int) (100 * q.quantile); verify(rb).addGauge( @@ -432,7 +433,7 @@ public static void assertQuantileGauges(String prefix, public static void assertInverseQuantileGauges(String prefix, MetricsRecordBuilder rb, String valueName) { verify(rb).addGauge(eqName(info(prefix + "NumOps", "")), geq(0L)); - for (Quantile q : MutableQuantiles.quantiles) { + for (Quantile q : QUANTILES.getQuantiles()) { String nameTemplate = prefix + "%dthInversePercentile" + valueName; int percentile = (int) (100 * q.quantile); verify(rb).addGauge( diff --git a/hadoop-hdds/tools/pom.xml b/hadoop-hdds/tools/pom.xml index daf6f3d40f4..5b77f394c96 100644 --- a/hadoop-hdds/tools/pom.xml +++ b/hadoop-hdds/tools/pom.xml @@ -20,11 +20,11 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> org.apache.ozone hdds - 1.5.0-SNAPSHOT + 2.0.0-SNAPSHOT hdds-tools - 1.5.0-SNAPSHOT + 2.0.0-SNAPSHOT Apache Ozone Distributed Data Store Tools Apache Ozone HDDS Tools jar @@ -179,20 +179,20 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> maven-compiler-plugin - - org.apache.ozone - hdds-config - ${hdds.version} - org.kohsuke.metainf-services metainf-services ${metainf-services.version} + + info.picocli + picocli-codegen + ${picocli.version} + - org.apache.hadoop.hdds.conf.ConfigFileGenerator org.kohsuke.metainf_services.AnnotationProcessorImpl + picocli.codegen.aot.graalvm.processor.NativeImageConfigGeneratorProcessor @@ -207,8 +207,10 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> Only selected annotation processors are enabled, see configuration of maven-compiler-plugin. - org.apache.hadoop.ozone.om.request.validation.RequestFeatureValidator + org.apache.hadoop.hdds.conf.Config + org.apache.hadoop.hdds.conf.ConfigGroup org.apache.hadoop.hdds.scm.metadata.Replicate + org.apache.hadoop.ozone.om.request.validation.RequestFeatureValidator diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/cli/AdminSubcommand.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/cli/AdminSubcommand.java new file mode 100644 index 00000000000..b03b75eb8a9 --- /dev/null +++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/cli/AdminSubcommand.java @@ -0,0 +1,23 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdds.cli; + +/** Marker interface for subcommands to be added to {@code OzoneAdmin}. */ +public interface AdminSubcommand { + // marker +} diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/cli/DebugSubcommand.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/cli/DebugSubcommand.java new file mode 100644 index 00000000000..3915fd86843 --- /dev/null +++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/cli/DebugSubcommand.java @@ -0,0 +1,23 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdds.cli; + +/** Marker interface for subcommands to be added to {@code OzoneDebug}. */ +public interface DebugSubcommand { + // marker +} diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/cli/OzoneAdmin.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/cli/OzoneAdmin.java index cc496a28e77..0c182d75e83 100644 --- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/cli/OzoneAdmin.java +++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/cli/OzoneAdmin.java @@ -17,11 +17,7 @@ */ package org.apache.hadoop.hdds.cli; -import com.google.common.annotations.VisibleForTesting; -import java.io.IOException; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.tracing.TracingUtil; -import org.apache.hadoop.security.UserGroupInformation; import picocli.CommandLine; @@ -33,41 +29,8 @@ description = "Developer tools for Ozone Admin operations", versionProvider = HddsVersionProvider.class, mixinStandardHelpOptions = true) -public class OzoneAdmin extends GenericCli { +public class OzoneAdmin extends GenericCli implements ExtensibleParentCommand { - private OzoneConfiguration ozoneConf; - - private UserGroupInformation user; - - public OzoneAdmin() { - super(OzoneAdmin.class); - } - - @VisibleForTesting - public OzoneAdmin(OzoneConfiguration conf) { - super(OzoneAdmin.class); - ozoneConf = conf; - } - - public OzoneConfiguration getOzoneConf() { - if (ozoneConf == null) { - ozoneConf = createOzoneConfiguration(); - } - return ozoneConf; - } - - public UserGroupInformation getUser() throws IOException { - if (user == null) { - user = UserGroupInformation.getCurrentUser(); - } - return user; - } - - /** - * Main for the Ozone Admin shell Command handling. - * - * @param argv - System Args Strings[] - */ public static void main(String[] argv) { new OzoneAdmin().run(argv); } @@ -79,4 +42,9 @@ public int execute(String[] argv) { return TracingUtil.executeInNewSpan(spanName, () -> super.execute(argv)); } + + @Override + public Class subcommandType() { + return AdminSubcommand.class; + } } diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/cli/RepairSubcommand.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/cli/RepairSubcommand.java new file mode 100644 index 00000000000..1eb12b01253 --- /dev/null +++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/cli/RepairSubcommand.java @@ -0,0 +1,23 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdds.cli; + +/** Marker interface for subcommands to be added to {@code OzoneRepair}. */ +public interface RepairSubcommand { + // marker +} diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ContainerBalancerCommands.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ContainerBalancerCommands.java index 7f24d843b0f..2264f096a28 100644 --- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ContainerBalancerCommands.java +++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ContainerBalancerCommands.java @@ -17,10 +17,9 @@ */ package org.apache.hadoop.hdds.scm.cli; +import org.apache.hadoop.hdds.cli.AdminSubcommand; import org.apache.hadoop.hdds.cli.GenericCli; import org.apache.hadoop.hdds.cli.HddsVersionProvider; -import org.apache.hadoop.hdds.cli.OzoneAdmin; -import org.apache.hadoop.hdds.cli.SubcommandWithParent; import org.kohsuke.MetaInfServices; import picocli.CommandLine.Command; import picocli.CommandLine.Model.CommandSpec; @@ -90,9 +89,8 @@ ContainerBalancerStopSubcommand.class, ContainerBalancerStatusSubcommand.class }) -@MetaInfServices(SubcommandWithParent.class) -public class ContainerBalancerCommands implements Callable, - SubcommandWithParent { +@MetaInfServices(AdminSubcommand.class) +public class ContainerBalancerCommands implements Callable, AdminSubcommand { @Spec private CommandSpec spec; @@ -102,9 +100,4 @@ public Void call() throws Exception { GenericCli.missingSubcommand(spec); return null; } - - @Override - public Class getParentType() { - return OzoneAdmin.class; - } } diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ContainerBalancerStatusSubcommand.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ContainerBalancerStatusSubcommand.java index e58074bf140..9d7c270c962 100644 --- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ContainerBalancerStatusSubcommand.java +++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ContainerBalancerStatusSubcommand.java @@ -19,9 +19,9 @@ import org.apache.hadoop.hdds.cli.HddsVersionProvider; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; -import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.ContainerBalancerStatusInfo; +import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.ContainerBalancerStatusInfoProto; import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.ContainerBalancerStatusInfoResponseProto; -import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.ContainerBalancerTaskIterationStatusInfo; +import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.ContainerBalancerTaskIterationStatusInfoProto; import org.apache.hadoop.hdds.scm.client.ScmClient; import org.apache.hadoop.ozone.OzoneConsts; import picocli.CommandLine; @@ -31,10 +31,14 @@ import java.time.Duration; import java.time.Instant; import java.time.LocalDateTime; +import java.time.OffsetDateTime; import java.time.ZoneId; import java.util.List; import java.util.stream.Collectors; +import static org.apache.hadoop.hdds.util.DurationUtil.getPrettyDuration; +import static org.apache.hadoop.util.StringUtils.byteDesc; + /** * Handler to query status of container balancer. */ @@ -58,27 +62,42 @@ public class ContainerBalancerStatusSubcommand extends ScmSubcommand { public void execute(ScmClient scmClient) throws IOException { ContainerBalancerStatusInfoResponseProto response = scmClient.getContainerBalancerStatusInfo(); boolean isRunning = response.getIsRunning(); - ContainerBalancerStatusInfo balancerStatusInfo = response.getContainerBalancerStatusInfo(); + ContainerBalancerStatusInfoProto balancerStatusInfo = response.getContainerBalancerStatusInfo(); if (isRunning) { + Instant startedAtInstant = Instant.ofEpochSecond(balancerStatusInfo.getStartedAt()); LocalDateTime dateTime = - LocalDateTime.ofInstant(Instant.ofEpochSecond(balancerStatusInfo.getStartedAt()), ZoneId.systemDefault()); + LocalDateTime.ofInstant(startedAtInstant, ZoneId.systemDefault()); System.out.println("ContainerBalancer is Running."); if (verbose) { - System.out.printf("Started at: %s %s%n%n", dateTime.toLocalDate(), dateTime.toLocalTime()); + System.out.printf("Started at: %s %s%n", dateTime.toLocalDate(), dateTime.toLocalTime()); + Duration balancingDuration = Duration.between(startedAtInstant, OffsetDateTime.now()); + System.out.printf("Balancing duration: %s%n%n", getPrettyDuration(balancingDuration)); System.out.println(getConfigurationPrettyString(balancerStatusInfo.getConfiguration())); - List iterationsStatusInfoList + List iterationsStatusInfoList = balancerStatusInfo.getIterationsStatusInfoList(); System.out.println("Current iteration info:"); - System.out.println( - getPrettyIterationStatusInfo(iterationsStatusInfoList.get(iterationsStatusInfoList.size() - 1)) - ); + ContainerBalancerTaskIterationStatusInfoProto currentIterationStatistic = iterationsStatusInfoList.stream() + .filter(it -> it.getIterationResult().isEmpty()) + .findFirst() + .orElse(null); + if (currentIterationStatistic == null) { + System.out.println("-\n"); + } else { + System.out.println( + getPrettyIterationStatusInfo(currentIterationStatistic) + ); + } + if (verboseWithHistory) { System.out.println("Iteration history list:"); System.out.println( - iterationsStatusInfoList.stream().map(this::getPrettyIterationStatusInfo) + iterationsStatusInfoList + .stream() + .filter(it -> !it.getIterationResult().isEmpty()) + .map(this::getPrettyIterationStatusInfo) .collect(Collectors.joining("\n")) ); } @@ -134,21 +153,28 @@ String getConfigurationPrettyString(HddsProtos.ContainerBalancerConfigurationPro configuration.getExcludeDatanodes().isEmpty() ? "None" : configuration.getExcludeDatanodes()); } - private String getPrettyIterationStatusInfo(ContainerBalancerTaskIterationStatusInfo iterationStatusInfo) { + private String getPrettyIterationStatusInfo(ContainerBalancerTaskIterationStatusInfoProto iterationStatusInfo) { int iterationNumber = iterationStatusInfo.getIterationNumber(); String iterationResult = iterationStatusInfo.getIterationResult(); - long sizeScheduledForMove = iterationStatusInfo.getSizeScheduledForMoveGB(); - long dataSizeMovedGB = iterationStatusInfo.getDataSizeMovedGB(); + long iterationDuration = iterationStatusInfo.getIterationDuration(); + long sizeScheduledForMove = iterationStatusInfo.getSizeScheduledForMove(); + long dataSizeMoved = iterationStatusInfo.getDataSizeMoved(); long containerMovesScheduled = iterationStatusInfo.getContainerMovesScheduled(); long containerMovesCompleted = iterationStatusInfo.getContainerMovesCompleted(); long containerMovesFailed = iterationStatusInfo.getContainerMovesFailed(); long containerMovesTimeout = iterationStatusInfo.getContainerMovesTimeout(); - String enteringDataNodeList = iterationStatusInfo.getSizeEnteringNodesGBList() - .stream().map(nodeInfo -> nodeInfo.getUuid() + " <- " + nodeInfo.getDataVolumeGB() + "\n") + String enteringDataNodeList = iterationStatusInfo.getSizeEnteringNodesList() + .stream().map(nodeInfo -> nodeInfo.getUuid() + " <- " + byteDesc(nodeInfo.getDataVolume()) + "\n") .collect(Collectors.joining()); - String leavingDataNodeList = iterationStatusInfo.getSizeLeavingNodesGBList() - .stream().map(nodeInfo -> nodeInfo.getUuid() + " -> " + nodeInfo.getDataVolumeGB() + "\n") + if (enteringDataNodeList.isEmpty()) { + enteringDataNodeList = " -\n"; + } + String leavingDataNodeList = iterationStatusInfo.getSizeLeavingNodesList() + .stream().map(nodeInfo -> nodeInfo.getUuid() + " -> " + byteDesc(nodeInfo.getDataVolume()) + "\n") .collect(Collectors.joining()); + if (leavingDataNodeList.isEmpty()) { + leavingDataNodeList = " -\n"; + } return String.format( "%-50s %s%n" + "%-50s %s%n" + @@ -159,14 +185,16 @@ private String getPrettyIterationStatusInfo(ContainerBalancerTaskIterationStatus "%-50s %s%n" + "%-50s %s%n" + "%-50s %s%n" + + "%-50s %s%n" + "%-50s %n%s" + "%-50s %n%s", "Key", "Value", - "Iteration number", iterationNumber, + "Iteration number", iterationNumber == 0 ? "-" : iterationNumber, + "Iteration duration", getPrettyDuration(Duration.ofSeconds(iterationDuration)), "Iteration result", - iterationResult.isEmpty() ? "IN_PROGRESS" : iterationResult, - "Size scheduled to move", sizeScheduledForMove, - "Moved data size", dataSizeMovedGB, + iterationResult.isEmpty() ? "-" : iterationResult, + "Size scheduled to move", byteDesc(sizeScheduledForMove), + "Moved data size", byteDesc(dataSizeMoved), "Scheduled to move containers", containerMovesScheduled, "Already moved containers", containerMovesCompleted, "Failed to move containers", containerMovesFailed, @@ -174,5 +202,6 @@ private String getPrettyIterationStatusInfo(ContainerBalancerTaskIterationStatus "Entered data to nodes", enteringDataNodeList, "Exited data from nodes", leavingDataNodeList); } + } diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ContainerOperationClient.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ContainerOperationClient.java index c3e379a5399..cf9ce1ca5d3 100644 --- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ContainerOperationClient.java +++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ContainerOperationClient.java @@ -37,6 +37,7 @@ import org.apache.hadoop.hdds.scm.XceiverClientManager; import org.apache.hadoop.hdds.scm.XceiverClientSpi; import org.apache.hadoop.hdds.scm.client.ClientTrustManager; +import org.apache.hadoop.hdds.scm.container.ContainerListResult; import org.apache.hadoop.hdds.security.x509.certificate.client.CACertificateProvider; import org.apache.hadoop.hdds.scm.client.ScmClient; import org.apache.hadoop.hdds.scm.container.ContainerID; @@ -83,6 +84,7 @@ public class ContainerOperationClient implements ScmClient { private final boolean containerTokenEnabled; private final OzoneConfiguration configuration; private XceiverClientManager xceiverClientManager; + private int maxCountOfContainerList; public synchronized XceiverClientManager getXceiverClientManager() throws IOException { @@ -110,6 +112,9 @@ public ContainerOperationClient(OzoneConfiguration conf) throws IOException { } containerTokenEnabled = conf.getBoolean(HDDS_CONTAINER_TOKEN_ENABLED, HDDS_CONTAINER_TOKEN_ENABLED_DEFAULT); + maxCountOfContainerList = conf + .getInt(ScmConfigKeys.OZONE_SCM_CONTAINER_LIST_MAX_COUNT, + ScmConfigKeys.OZONE_SCM_CONTAINER_LIST_MAX_COUNT_DEFAULT); } private XceiverClientManager newXCeiverClientManager(ConfigurationSource conf) @@ -339,17 +344,29 @@ public void deleteContainer(long containerID, boolean force) } @Override - public List listContainer(long startContainerID, + public ContainerListResult listContainer(long startContainerID, int count) throws IOException { + if (count > maxCountOfContainerList) { + LOG.warn("Attempting to list {} containers. However, this exceeds" + + " the cluster's current limit of {}. The results will be capped at the" + + " maximum allowed count.", count, maxCountOfContainerList); + count = maxCountOfContainerList; + } return storageContainerLocationClient.listContainer( startContainerID, count); } @Override - public List listContainer(long startContainerID, + public ContainerListResult listContainer(long startContainerID, int count, HddsProtos.LifeCycleState state, HddsProtos.ReplicationType repType, ReplicationConfig replicationConfig) throws IOException { + if (count > maxCountOfContainerList) { + LOG.warn("Attempting to list {} containers. However, this exceeds" + + " the cluster's current limit of {}. The results will be capped at the" + + " maximum allowed count.", count, maxCountOfContainerList); + count = maxCountOfContainerList; + } return storageContainerLocationClient.listContainer( startContainerID, count, state, repType, replicationConfig); } @@ -519,6 +536,11 @@ public List getScmRatisRoles() throws IOException { return storageContainerLocationClient.getScmInfo().getRatisPeerRoles(); } + @Override + public boolean isScmRatisEnable() throws IOException { + return storageContainerLocationClient.getScmInfo().getScmRatisEnabled(); + } + @Override public boolean rotateSecretKeys(boolean force) throws IOException { return secretKeyClient.checkAndRotate(force); diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ReplicationManagerCommands.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ReplicationManagerCommands.java index cd5aba3a82e..a16e5227514 100644 --- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ReplicationManagerCommands.java +++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ReplicationManagerCommands.java @@ -19,10 +19,9 @@ import java.util.concurrent.Callable; +import org.apache.hadoop.hdds.cli.AdminSubcommand; import org.apache.hadoop.hdds.cli.GenericCli; import org.apache.hadoop.hdds.cli.HddsVersionProvider; -import org.apache.hadoop.hdds.cli.OzoneAdmin; -import org.apache.hadoop.hdds.cli.SubcommandWithParent; import org.kohsuke.MetaInfServices; import picocli.CommandLine.Command; @@ -42,9 +41,8 @@ ReplicationManagerStopSubcommand.class, ReplicationManagerStatusSubcommand.class }) -@MetaInfServices(SubcommandWithParent.class) -public class ReplicationManagerCommands implements Callable, - SubcommandWithParent { +@MetaInfServices(AdminSubcommand.class) +public class ReplicationManagerCommands implements Callable, AdminSubcommand { @Spec private CommandSpec spec; @@ -54,9 +52,4 @@ public Void call() throws Exception { GenericCli.missingSubcommand(spec); return null; } - - @Override - public Class getParentType() { - return OzoneAdmin.class; - } } diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/SafeModeCommands.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/SafeModeCommands.java index 6ba7cf29547..49f73e6faea 100644 --- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/SafeModeCommands.java +++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/SafeModeCommands.java @@ -19,10 +19,9 @@ import java.util.concurrent.Callable; +import org.apache.hadoop.hdds.cli.AdminSubcommand; import org.apache.hadoop.hdds.cli.GenericCli; import org.apache.hadoop.hdds.cli.HddsVersionProvider; -import org.apache.hadoop.hdds.cli.OzoneAdmin; -import org.apache.hadoop.hdds.cli.SubcommandWithParent; import org.kohsuke.MetaInfServices; import picocli.CommandLine.Command; @@ -42,8 +41,8 @@ SafeModeExitSubcommand.class, SafeModeWaitSubcommand.class }) -@MetaInfServices(SubcommandWithParent.class) -public class SafeModeCommands implements Callable, SubcommandWithParent { +@MetaInfServices(AdminSubcommand.class) +public class SafeModeCommands implements Callable, AdminSubcommand { @Spec private CommandSpec spec; @@ -53,9 +52,4 @@ public Void call() throws Exception { GenericCli.missingSubcommand(spec); return null; } - - @Override - public Class getParentType() { - return OzoneAdmin.class; - } } diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/TopologySubcommand.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/TopologySubcommand.java index 9ac275fd5cb..72bca506939 100644 --- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/TopologySubcommand.java +++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/TopologySubcommand.java @@ -33,9 +33,8 @@ import com.fasterxml.jackson.databind.JsonSerializer; import com.fasterxml.jackson.databind.SerializerProvider; import com.fasterxml.jackson.databind.annotation.JsonSerialize; +import org.apache.hadoop.hdds.cli.AdminSubcommand; import org.apache.hadoop.hdds.cli.HddsVersionProvider; -import org.apache.hadoop.hdds.cli.OzoneAdmin; -import org.apache.hadoop.hdds.cli.SubcommandWithParent; import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.scm.client.ScmClient; @@ -56,9 +55,9 @@ description = "Print a tree of the network topology as reported by SCM", mixinStandardHelpOptions = true, versionProvider = HddsVersionProvider.class) -@MetaInfServices(SubcommandWithParent.class) +@MetaInfServices(AdminSubcommand.class) public class TopologySubcommand extends ScmSubcommand - implements SubcommandWithParent { + implements AdminSubcommand { private static final List STATES = new ArrayList<>(); @@ -137,11 +136,6 @@ public void execute(ScmClient scmClient) throws IOException { } } - @Override - public Class getParentType() { - return OzoneAdmin.class; - } - // Format // Location: rack1 // ipAddress(hostName) OperationalState diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/cert/CertCommands.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/cert/CertCommands.java index d466c9554ad..211e3bb0925 100644 --- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/cert/CertCommands.java +++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/cert/CertCommands.java @@ -19,10 +19,9 @@ import java.util.concurrent.Callable; +import org.apache.hadoop.hdds.cli.AdminSubcommand; import org.apache.hadoop.hdds.cli.GenericCli; import org.apache.hadoop.hdds.cli.HddsVersionProvider; -import org.apache.hadoop.hdds.cli.OzoneAdmin; -import org.apache.hadoop.hdds.cli.SubcommandWithParent; import org.kohsuke.MetaInfServices; import picocli.CommandLine.Command; @@ -43,8 +42,8 @@ CleanExpiredCertsSubcommand.class, }) -@MetaInfServices(SubcommandWithParent.class) -public class CertCommands implements Callable, SubcommandWithParent { +@MetaInfServices(AdminSubcommand.class) +public class CertCommands implements Callable, AdminSubcommand { @Spec private CommandSpec spec; @@ -54,9 +53,4 @@ public Void call() throws Exception { GenericCli.missingSubcommand(spec); return null; } - - @Override - public Class getParentType() { - return OzoneAdmin.class; - } } diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/ContainerCommands.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/ContainerCommands.java index 9f93c56f2db..89522ded68c 100644 --- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/ContainerCommands.java +++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/ContainerCommands.java @@ -19,13 +19,14 @@ import java.util.concurrent.Callable; +import org.apache.hadoop.hdds.cli.AdminSubcommand; import org.apache.hadoop.hdds.cli.GenericCli; import org.apache.hadoop.hdds.cli.HddsVersionProvider; import org.apache.hadoop.hdds.cli.OzoneAdmin; -import org.apache.hadoop.hdds.cli.SubcommandWithParent; import org.kohsuke.MetaInfServices; import picocli.CommandLine.Command; +import picocli.CommandLine.ParentCommand; import picocli.CommandLine.Model.CommandSpec; import picocli.CommandLine.Spec; @@ -46,20 +47,22 @@ UpgradeSubcommand.class, ReconcileSubcommand.class }) -@MetaInfServices(SubcommandWithParent.class) -public class ContainerCommands implements Callable, SubcommandWithParent { +@MetaInfServices(AdminSubcommand.class) +public class ContainerCommands implements Callable, AdminSubcommand { @Spec private CommandSpec spec; + @ParentCommand + private OzoneAdmin parent; + @Override public Void call() throws Exception { GenericCli.missingSubcommand(spec); return null; } - @Override - public Class getParentType() { - return OzoneAdmin.class; + public OzoneAdmin getParent() { + return parent; } } diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/ListSubcommand.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/ListSubcommand.java index ecc43d04087..88ccef702b3 100644 --- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/ListSubcommand.java +++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/ListSubcommand.java @@ -18,7 +18,6 @@ package org.apache.hadoop.hdds.scm.cli.container; import java.io.IOException; -import java.util.List; import com.google.common.base.Strings; import org.apache.hadoop.hdds.cli.HddsVersionProvider; @@ -26,7 +25,9 @@ import org.apache.hadoop.hdds.client.ReplicationType; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; +import org.apache.hadoop.hdds.scm.ScmConfigKeys; import org.apache.hadoop.hdds.scm.cli.ScmSubcommand; +import org.apache.hadoop.hdds.scm.container.ContainerListResult; import org.apache.hadoop.hdds.scm.client.ScmClient; import org.apache.hadoop.hdds.scm.container.ContainerInfo; @@ -37,6 +38,7 @@ import com.fasterxml.jackson.databind.SerializationFeature; import com.fasterxml.jackson.datatype.jsr310.JavaTimeModule; import picocli.CommandLine.Command; +import picocli.CommandLine.ParentCommand; import picocli.CommandLine.Help.Visibility; import picocli.CommandLine.Option; @@ -55,10 +57,15 @@ public class ListSubcommand extends ScmSubcommand { private long startId; @Option(names = {"-c", "--count"}, - description = "Maximum number of containers to list", + description = "Maximum number of containers to list.", defaultValue = "20", showDefaultValue = Visibility.ALWAYS) private int count; + @Option(names = {"-a", "--all"}, + description = "List all containers.", + defaultValue = "false") + private boolean all; + @Option(names = {"--state"}, description = "Container state(OPEN, CLOSING, QUASI_CLOSED, CLOSED, " + "DELETING, DELETED)") @@ -75,6 +82,9 @@ public class ListSubcommand extends ScmSubcommand { private static final ObjectWriter WRITER; + @ParentCommand + private ContainerCommands parent; + static { ObjectMapper mapper = new ObjectMapper() .registerModule(new JavaTimeModule()) @@ -105,12 +115,49 @@ public void execute(ScmClient scmClient) throws IOException { ReplicationType.fromProto(type), replication, new OzoneConfiguration()); } - List containerList = - scmClient.listContainer(startId, count, state, type, repConfig); - // Output data list - for (ContainerInfo container : containerList) { - outputContainerInfo(container); + int maxCountAllowed = parent.getParent().getOzoneConf() + .getInt(ScmConfigKeys.OZONE_SCM_CONTAINER_LIST_MAX_COUNT, + ScmConfigKeys.OZONE_SCM_CONTAINER_LIST_MAX_COUNT_DEFAULT); + + ContainerListResult containerListAndTotalCount; + + if (!all) { + if (count > maxCountAllowed) { + System.err.printf("Attempting to list the first %d records of containers." + + " However it exceeds the cluster's current limit of %d. The results will be capped at the" + + " maximum allowed count.%n", count, ScmConfigKeys.OZONE_SCM_CONTAINER_LIST_MAX_COUNT_DEFAULT); + count = maxCountAllowed; + } + containerListAndTotalCount = scmClient.listContainer(startId, count, state, type, repConfig); + for (ContainerInfo container : containerListAndTotalCount.getContainerInfoList()) { + outputContainerInfo(container); + } + + if (containerListAndTotalCount.getTotalCount() > count) { + System.err.printf("Displaying %d out of %d containers. " + + "Container list has more containers.%n", + count, containerListAndTotalCount.getTotalCount()); + } + } else { + // Batch size is either count passed through cli or maxCountAllowed + int batchSize = (count > 0) ? count : maxCountAllowed; + long currentStartId = startId; + int fetchedCount; + + do { + // Fetch containers in batches of 'batchSize' + containerListAndTotalCount = scmClient.listContainer(currentStartId, batchSize, state, type, repConfig); + fetchedCount = containerListAndTotalCount.getContainerInfoList().size(); + + for (ContainerInfo container : containerListAndTotalCount.getContainerInfoList()) { + outputContainerInfo(container); + } + + if (fetchedCount > 0) { + currentStartId = containerListAndTotalCount.getContainerInfoList().get(fetchedCount - 1).getContainerID() + 1; + } + } while (fetchedCount > 0); } } } diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/DatanodeCommands.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/DatanodeCommands.java index 8cb2114f57d..6c020e46f37 100644 --- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/DatanodeCommands.java +++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/DatanodeCommands.java @@ -17,10 +17,9 @@ */ package org.apache.hadoop.hdds.scm.cli.datanode; +import org.apache.hadoop.hdds.cli.AdminSubcommand; import org.apache.hadoop.hdds.cli.GenericCli; import org.apache.hadoop.hdds.cli.HddsVersionProvider; -import org.apache.hadoop.hdds.cli.OzoneAdmin; -import org.apache.hadoop.hdds.cli.SubcommandWithParent; import org.kohsuke.MetaInfServices; import picocli.CommandLine; import picocli.CommandLine.Model.CommandSpec; @@ -41,10 +40,11 @@ DecommissionSubCommand.class, MaintenanceSubCommand.class, RecommissionSubCommand.class, + StatusSubCommand.class, UsageInfoSubcommand.class }) -@MetaInfServices(SubcommandWithParent.class) -public class DatanodeCommands implements Callable, SubcommandWithParent { +@MetaInfServices(AdminSubcommand.class) +public class DatanodeCommands implements Callable, AdminSubcommand { @Spec private CommandSpec spec; @@ -54,9 +54,4 @@ public Void call() throws Exception { GenericCli.missingSubcommand(spec); return null; } - - @Override - public Class getParentType() { - return OzoneAdmin.class; - } } diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/StatusSubCommand.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/StatusSubCommand.java index 9edcd3425a0..b33a5d1ea96 100644 --- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/StatusSubCommand.java +++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/StatusSubCommand.java @@ -19,8 +19,6 @@ import org.apache.hadoop.hdds.cli.GenericCli; import org.apache.hadoop.hdds.cli.HddsVersionProvider; -import org.apache.hadoop.hdds.cli.SubcommandWithParent; -import org.kohsuke.MetaInfServices; import picocli.CommandLine; import picocli.CommandLine.Command; import java.util.concurrent.Callable; @@ -37,8 +35,7 @@ DecommissionStatusSubCommand.class }) -@MetaInfServices(SubcommandWithParent.class) -public class StatusSubCommand implements Callable, SubcommandWithParent { +public class StatusSubCommand implements Callable { @CommandLine.Spec private CommandLine.Model.CommandSpec spec; @@ -48,9 +45,4 @@ public Void call() throws Exception { GenericCli.missingSubcommand(spec); return null; } - - @Override - public Class getParentType() { - return DatanodeCommands.class; - } } diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/pipeline/PipelineCommands.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/pipeline/PipelineCommands.java index ba7371e6214..9c391035560 100644 --- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/pipeline/PipelineCommands.java +++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/pipeline/PipelineCommands.java @@ -19,10 +19,9 @@ import java.util.concurrent.Callable; +import org.apache.hadoop.hdds.cli.AdminSubcommand; import org.apache.hadoop.hdds.cli.GenericCli; import org.apache.hadoop.hdds.cli.HddsVersionProvider; -import org.apache.hadoop.hdds.cli.OzoneAdmin; -import org.apache.hadoop.hdds.cli.SubcommandWithParent; import org.kohsuke.MetaInfServices; import picocli.CommandLine.Command; @@ -44,8 +43,8 @@ CreatePipelineSubcommand.class, ClosePipelineSubcommand.class }) -@MetaInfServices(SubcommandWithParent.class) -public class PipelineCommands implements Callable, SubcommandWithParent { +@MetaInfServices(AdminSubcommand.class) +public class PipelineCommands implements Callable, AdminSubcommand { @Spec private CommandSpec spec; @@ -55,9 +54,4 @@ public Void call() throws Exception { GenericCli.missingSubcommand(spec); return null; } - - @Override - public Class getParentType() { - return OzoneAdmin.class; - } } diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/util/DurationUtil.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/util/DurationUtil.java new file mode 100644 index 00000000000..7b2ded9b13d --- /dev/null +++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/util/DurationUtil.java @@ -0,0 +1,52 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

    + * http://www.apache.org/licenses/LICENSE-2.0 + *

    + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdds.util; + +import java.time.Duration; + +import static java.lang.String.format; + +/** + * Pretty duration string representation. + */ +public final class DurationUtil { + + private DurationUtil() { + } + + /** + * Modify duration to string view. E.x. 1h 30m 45s, 2m 30s, 30s. + * + * @param duration duration + * @return duration in string format + */ + public static String getPrettyDuration(Duration duration) { + long hours = duration.toHours(); + long minutes = duration.getSeconds() / 60 % 60; + long seconds = duration.getSeconds() % 60; + if (hours > 0) { + return format("%dh %dm %ds", hours, minutes, seconds); + } else if (minutes > 0) { + return format("%dm %ds", minutes, seconds); + } else if (seconds >= 0) { + return format("%ds", seconds); + } else { + throw new IllegalStateException("Provided duration is incorrect: " + duration); + } + } +} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/cli/SubcommandWithParent.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/util/package-info.java similarity index 78% rename from hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/cli/SubcommandWithParent.java rename to hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/util/package-info.java index 8421ab19cfa..6dd25c12c53 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/cli/SubcommandWithParent.java +++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/util/package-info.java @@ -14,17 +14,9 @@ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. + *

    */ -package org.apache.hadoop.hdds.cli; - /** - * Defineds parent command for SPI based subcommand registration. + * SCM related cli utils. */ -public interface SubcommandWithParent { - - /** - * Java type of the parent command. - */ - Class getParentType(); - -} +package org.apache.hadoop.hdds.util; diff --git a/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/datanode/TestContainerBalancerSubCommand.java b/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/datanode/TestContainerBalancerSubCommand.java index 41b419d2326..bdce0f5d707 100644 --- a/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/datanode/TestContainerBalancerSubCommand.java +++ b/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/datanode/TestContainerBalancerSubCommand.java @@ -18,7 +18,7 @@ package org.apache.hadoop.hdds.scm.cli.datanode; import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos; -import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.ContainerBalancerStatusInfo; +import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.ContainerBalancerStatusInfoProto; import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.ContainerBalancerStatusInfoResponseProto; import org.apache.hadoop.hdds.scm.cli.ContainerBalancerStartSubcommand; import org.apache.hadoop.hdds.scm.cli.ContainerBalancerStatusSubcommand; @@ -28,6 +28,7 @@ import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; +import picocli.CommandLine; import java.io.ByteArrayOutputStream; import java.io.IOException; @@ -39,6 +40,8 @@ import java.util.regex.Matcher; import java.util.regex.Pattern; +import static org.apache.hadoop.ozone.OzoneConsts.GB; +import static org.junit.jupiter.api.Assertions.assertFalse; import static org.junit.jupiter.api.Assertions.assertTrue; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; @@ -58,175 +61,393 @@ class TestContainerBalancerSubCommand { private ContainerBalancerStartSubcommand startCmd; private ContainerBalancerStatusSubcommand statusCmd; - @BeforeEach - public void setup() throws UnsupportedEncodingException { - stopCmd = new ContainerBalancerStopSubcommand(); - startCmd = new ContainerBalancerStartSubcommand(); - statusCmd = new ContainerBalancerStatusSubcommand(); - System.setOut(new PrintStream(outContent, false, DEFAULT_ENCODING)); - System.setErr(new PrintStream(errContent, false, DEFAULT_ENCODING)); - } - - @AfterEach - public void tearDown() { - System.setOut(originalOut); - System.setErr(originalErr); - } - - @Test - public void testContainerBalancerStatusInfoSubcommandRunning() - throws IOException { - ScmClient scmClient = mock(ScmClient.class); - - ContainerBalancerConfiguration config = new ContainerBalancerConfiguration(); - config.setThreshold(10); - config.setMaxDatanodesPercentageToInvolvePerIteration(20); - config.setMaxSizeToMovePerIteration(53687091200L); - config.setMaxSizeEnteringTarget(27917287424L); - config.setMaxSizeLeavingSource(27917287424L); - config.setIterations(2); - config.setExcludeNodes(""); - config.setMoveTimeout(3900000); - config.setMoveReplicationTimeout(3000000); - config.setBalancingInterval(0); - config.setIncludeNodes(""); - config.setExcludeNodes(""); - config.setNetworkTopologyEnable(false); - config.setTriggerDuEnable(false); - - StorageContainerLocationProtocolProtos.ContainerBalancerTaskIterationStatusInfo iteration0StatusInfo = - StorageContainerLocationProtocolProtos.ContainerBalancerTaskIterationStatusInfo.newBuilder() - .setIterationNumber(0) + private static ContainerBalancerStatusInfoResponseProto getContainerBalancerStatusInfoResponseProto( + ContainerBalancerConfiguration config) { + StorageContainerLocationProtocolProtos.ContainerBalancerTaskIterationStatusInfoProto iteration1StatusInfo = + StorageContainerLocationProtocolProtos.ContainerBalancerTaskIterationStatusInfoProto.newBuilder() + .setIterationNumber(1) .setIterationResult("ITERATION_COMPLETED") - .setSizeScheduledForMoveGB(48) - .setDataSizeMovedGB(48) + .setIterationDuration(400L) + .setSizeScheduledForMove(54 * GB) + .setDataSizeMoved(54 * GB) .setContainerMovesScheduled(11) .setContainerMovesCompleted(11) .setContainerMovesFailed(0) .setContainerMovesTimeout(0) - .addSizeEnteringNodesGB( - StorageContainerLocationProtocolProtos.NodeTransferInfo.newBuilder() + .addSizeEnteringNodes( + StorageContainerLocationProtocolProtos.NodeTransferInfoProto.newBuilder() .setUuid("80f6bc27-e6f3-493e-b1f4-25f810ad960d") - .setDataVolumeGB(27) + .setDataVolume(28 * GB) .build() ) - .addSizeEnteringNodesGB( - StorageContainerLocationProtocolProtos.NodeTransferInfo.newBuilder() + .addSizeEnteringNodes( + StorageContainerLocationProtocolProtos.NodeTransferInfoProto.newBuilder() .setUuid("701ca98e-aa1a-4b36-b817-e28ed634bba6") - .setDataVolumeGB(23L) + .setDataVolume(26 * GB) .build() ) - .addSizeLeavingNodesGB( - StorageContainerLocationProtocolProtos.NodeTransferInfo.newBuilder() + .addSizeLeavingNodes( + StorageContainerLocationProtocolProtos.NodeTransferInfoProto.newBuilder() .setUuid("b8b9c511-c30f-4933-8938-2f272e307070") - .setDataVolumeGB(24L) + .setDataVolume(25 * GB) .build() ) - .addSizeLeavingNodesGB( - StorageContainerLocationProtocolProtos.NodeTransferInfo.newBuilder() + .addSizeLeavingNodes( + StorageContainerLocationProtocolProtos.NodeTransferInfoProto.newBuilder() .setUuid("7bd99815-47e7-4015-bc61-ca6ef6dfd130") - .setDataVolumeGB(26L) + .setDataVolume(29 * GB) .build() ) .build(); - StorageContainerLocationProtocolProtos.ContainerBalancerTaskIterationStatusInfo iteration1StatusInfo = - StorageContainerLocationProtocolProtos.ContainerBalancerTaskIterationStatusInfo.newBuilder() - .setIterationNumber(1) + StorageContainerLocationProtocolProtos.ContainerBalancerTaskIterationStatusInfoProto iteration2StatusInfo = + StorageContainerLocationProtocolProtos.ContainerBalancerTaskIterationStatusInfoProto.newBuilder() + .setIterationNumber(2) .setIterationResult("ITERATION_COMPLETED") - .setSizeScheduledForMoveGB(48) - .setDataSizeMovedGB(48) - .setContainerMovesScheduled(11) - .setContainerMovesCompleted(11) + .setIterationDuration(300L) + .setSizeScheduledForMove(30 * GB) + .setDataSizeMoved(30 * GB) + .setContainerMovesScheduled(8) + .setContainerMovesCompleted(8) .setContainerMovesFailed(0) .setContainerMovesTimeout(0) - .addSizeEnteringNodesGB( - StorageContainerLocationProtocolProtos.NodeTransferInfo.newBuilder() + .addSizeEnteringNodes( + StorageContainerLocationProtocolProtos.NodeTransferInfoProto.newBuilder() .setUuid("80f6bc27-e6f3-493e-b1f4-25f810ad960d") - .setDataVolumeGB(27L) + .setDataVolume(20 * GB) .build() ) - .addSizeEnteringNodesGB( - StorageContainerLocationProtocolProtos.NodeTransferInfo.newBuilder() + .addSizeEnteringNodes( + StorageContainerLocationProtocolProtos.NodeTransferInfoProto.newBuilder() .setUuid("701ca98e-aa1a-4b36-b817-e28ed634bba6") - .setDataVolumeGB(23L) + .setDataVolume(10 * GB) .build() ) - .addSizeLeavingNodesGB( - StorageContainerLocationProtocolProtos.NodeTransferInfo.newBuilder() + .addSizeLeavingNodes( + StorageContainerLocationProtocolProtos.NodeTransferInfoProto.newBuilder() .setUuid("b8b9c511-c30f-4933-8938-2f272e307070") - .setDataVolumeGB(24L) + .setDataVolume(15 * GB) .build() ) - .addSizeLeavingNodesGB( - StorageContainerLocationProtocolProtos.NodeTransferInfo.newBuilder() + .addSizeLeavingNodes( + StorageContainerLocationProtocolProtos.NodeTransferInfoProto.newBuilder() .setUuid("7bd99815-47e7-4015-bc61-ca6ef6dfd130") - .setDataVolumeGB(26L) + .setDataVolume(15 * GB) .build() ) .build(); - StorageContainerLocationProtocolProtos.ContainerBalancerTaskIterationStatusInfo iteration2StatusInfo = - StorageContainerLocationProtocolProtos.ContainerBalancerTaskIterationStatusInfo.newBuilder() - .setIterationNumber(1) + StorageContainerLocationProtocolProtos.ContainerBalancerTaskIterationStatusInfoProto iteration3StatusInfo = + StorageContainerLocationProtocolProtos.ContainerBalancerTaskIterationStatusInfoProto.newBuilder() + .setIterationNumber(3) .setIterationResult("") - .setSizeScheduledForMoveGB(48) - .setDataSizeMovedGB(48) - .setContainerMovesScheduled(11) - .setContainerMovesCompleted(11) + .setIterationDuration(370L) + .setSizeScheduledForMove(48 * GB) + .setDataSizeMoved(48 * GB) + .setContainerMovesScheduled(5) + .setContainerMovesCompleted(5) .setContainerMovesFailed(0) .setContainerMovesTimeout(0) - .addSizeEnteringNodesGB( - StorageContainerLocationProtocolProtos.NodeTransferInfo.newBuilder() + .addSizeEnteringNodes( + StorageContainerLocationProtocolProtos.NodeTransferInfoProto.newBuilder() .setUuid("80f6bc27-e6f3-493e-b1f4-25f810ad960d") - .setDataVolumeGB(27L) + .setDataVolume(20 * GB) .build() ) - .addSizeEnteringNodesGB( - StorageContainerLocationProtocolProtos.NodeTransferInfo.newBuilder() + .addSizeEnteringNodes( + StorageContainerLocationProtocolProtos.NodeTransferInfoProto.newBuilder() .setUuid("701ca98e-aa1a-4b36-b817-e28ed634bba6") - .setDataVolumeGB(23L) + .setDataVolume(28 * GB) .build() ) - .addSizeLeavingNodesGB( - StorageContainerLocationProtocolProtos.NodeTransferInfo.newBuilder() + .addSizeLeavingNodes( + StorageContainerLocationProtocolProtos.NodeTransferInfoProto.newBuilder() .setUuid("b8b9c511-c30f-4933-8938-2f272e307070") - .setDataVolumeGB(24L) + .setDataVolume(30 * GB) .build() ) - .addSizeLeavingNodesGB( - StorageContainerLocationProtocolProtos.NodeTransferInfo.newBuilder() + .addSizeLeavingNodes( + StorageContainerLocationProtocolProtos.NodeTransferInfoProto.newBuilder() .setUuid("7bd99815-47e7-4015-bc61-ca6ef6dfd130") - .setDataVolumeGB(26L) + .setDataVolume(18 * GB) .build() ) .build(); - ContainerBalancerStatusInfoResponseProto statusInfoResponseProto = - ContainerBalancerStatusInfoResponseProto.newBuilder() + return ContainerBalancerStatusInfoResponseProto.newBuilder() .setIsRunning(true) - .setContainerBalancerStatusInfo(ContainerBalancerStatusInfo.newBuilder() + .setContainerBalancerStatusInfo(ContainerBalancerStatusInfoProto.newBuilder() .setStartedAt(OffsetDateTime.now().toEpochSecond()) .setConfiguration(config.toProtobufBuilder().setShouldRun(true)) .addAllIterationsStatusInfo( - Arrays.asList(iteration0StatusInfo, iteration1StatusInfo, iteration2StatusInfo) + Arrays.asList(iteration1StatusInfo, iteration2StatusInfo, iteration3StatusInfo) ) ) .build(); + } + + private static ContainerBalancerConfiguration getContainerBalancerConfiguration() { + ContainerBalancerConfiguration config = new ContainerBalancerConfiguration(); + config.setThreshold(10); + config.setMaxDatanodesPercentageToInvolvePerIteration(20); + config.setMaxSizeToMovePerIteration(53687091200L); + config.setMaxSizeEnteringTarget(27917287424L); + config.setMaxSizeLeavingSource(27917287424L); + config.setIterations(3); + config.setExcludeNodes(""); + config.setMoveTimeout(3900000); + config.setMoveReplicationTimeout(3000000); + config.setBalancingInterval(0); + config.setIncludeNodes(""); + config.setExcludeNodes(""); + config.setNetworkTopologyEnable(false); + config.setTriggerDuEnable(false); + return config; + } + + @BeforeEach + public void setup() throws UnsupportedEncodingException { + stopCmd = new ContainerBalancerStopSubcommand(); + startCmd = new ContainerBalancerStartSubcommand(); + statusCmd = new ContainerBalancerStatusSubcommand(); + System.setOut(new PrintStream(outContent, false, DEFAULT_ENCODING)); + System.setErr(new PrintStream(errContent, false, DEFAULT_ENCODING)); + } + + @AfterEach + public void tearDown() { + System.setOut(originalOut); + System.setErr(originalErr); + } + + @Test + void testContainerBalancerStatusInfoSubcommandRunningWithoutFlags() + throws IOException { + ScmClient scmClient = mock(ScmClient.class); + + ContainerBalancerConfiguration config = + getContainerBalancerConfiguration(); + + ContainerBalancerStatusInfoResponseProto + statusInfoResponseProto = getContainerBalancerStatusInfoResponseProto(config); //test status is running when(scmClient.getContainerBalancerStatusInfo()).thenReturn(statusInfoResponseProto); - statusCmd.execute(scmClient); Pattern p = Pattern.compile( "^ContainerBalancer\\sis\\sRunning."); - Matcher m = p.matcher(outContent.toString(DEFAULT_ENCODING)); + String output = outContent.toString(DEFAULT_ENCODING); + Matcher m = p.matcher(output); assertTrue(m.find()); + + String balancerConfigOutput = + "Container Balancer Configuration values:\n" + + "Key Value\n" + + "Threshold 10.0\n" + + "Max Datanodes to Involve per Iteration(percent) 20\n" + + "Max Size to Move per Iteration 0GB\n" + + "Max Size Entering Target per Iteration 26GB\n" + + "Max Size Leaving Source per Iteration 26GB\n" + + "Number of Iterations 3\n" + + "Time Limit for Single Container's Movement 65min\n" + + "Time Limit for Single Container's Replication 50min\n" + + "Interval between each Iteration 0min\n" + + "Whether to Enable Network Topology false\n" + + "Whether to Trigger Refresh Datanode Usage Info false\n" + + "Container IDs to Exclude from Balancing None\n" + + "Datanodes Specified to be Balanced None\n" + + "Datanodes Excluded from Balancing None"; + assertFalse(output.contains(balancerConfigOutput)); + + String currentIterationOutput = + "Current iteration info:\n" + + "Key Value\n" + + "Iteration number 3\n" + + "Iteration duration 1h 6m 40s\n" + + "Iteration result IN_PROGRESS\n" + + "Size scheduled to move 48 GB\n" + + "Moved data size 48 GB\n" + + "Scheduled to move containers 11\n" + + "Already moved containers 11\n" + + "Failed to move containers 0\n" + + "Failed to move containers by timeout 0\n" + + "Entered data to nodes \n" + + "80f6bc27-e6f3-493e-b1f4-25f810ad960d <- 20 GB\n" + + "701ca98e-aa1a-4b36-b817-e28ed634bba6 <- 28 GB\n" + + "Exited data from nodes \n" + + "b8b9c511-c30f-4933-8938-2f272e307070 -> 30 GB\n" + + "7bd99815-47e7-4015-bc61-ca6ef6dfd130 -> 18 GB"; + assertFalse(output.contains(currentIterationOutput)); + + assertFalse(output.contains("Iteration history list:")); } @Test - public void testContainerBalancerStatusInfoSubcommandRunningOnStoppedBalancer() + void testContainerBalancerStatusInfoSubcommandVerboseHistory() throws IOException { ScmClient scmClient = mock(ScmClient.class); + ContainerBalancerConfiguration config = + getContainerBalancerConfiguration(); + + ContainerBalancerStatusInfoResponseProto + statusInfoResponseProto = getContainerBalancerStatusInfoResponseProto(config); //test status is running + when(scmClient.getContainerBalancerStatusInfo()).thenReturn(statusInfoResponseProto); + CommandLine c = new CommandLine(statusCmd); + c.parseArgs("--verbose", "--history"); + statusCmd.execute(scmClient); + String output = outContent.toString(DEFAULT_ENCODING); + Pattern p = Pattern.compile( + "^ContainerBalancer\\sis\\sRunning.$", Pattern.MULTILINE); + Matcher m = p.matcher(output); + assertTrue(m.find()); + + p = Pattern.compile( + "^Started at: (\\d{4}-\\d{2}-\\d{2} \\d{2}:\\d{2}:\\d{2})$", Pattern.MULTILINE); + m = p.matcher(output); + assertTrue(m.find()); + + p = Pattern.compile( + "^Balancing duration: \\d{1}s$", Pattern.MULTILINE); + m = p.matcher(output); + assertTrue(m.find()); + + String balancerConfigOutput = + "Container Balancer Configuration values:\n" + + "Key Value\n" + + "Threshold 10.0\n" + + "Max Datanodes to Involve per Iteration(percent) 20\n" + + "Max Size to Move per Iteration 0GB\n" + + "Max Size Entering Target per Iteration 26GB\n" + + "Max Size Leaving Source per Iteration 26GB\n" + + "Number of Iterations 3\n" + + "Time Limit for Single Container's Movement 65min\n" + + "Time Limit for Single Container's Replication 50min\n" + + "Interval between each Iteration 0min\n" + + "Whether to Enable Network Topology false\n" + + "Whether to Trigger Refresh Datanode Usage Info false\n" + + "Container IDs to Exclude from Balancing None\n" + + "Datanodes Specified to be Balanced None\n" + + "Datanodes Excluded from Balancing None"; + assertTrue(output.contains(balancerConfigOutput)); + + assertTrue(output.contains("Iteration history list:")); + String firstHistoryIterationOutput = + "Key Value\n" + + "Iteration number 3\n" + + "Iteration duration 6m 10s\n" + + "Iteration result -\n" + + "Size scheduled to move 48 GB\n" + + "Moved data size 48 GB\n" + + "Scheduled to move containers 5\n" + + "Already moved containers 5\n" + + "Failed to move containers 0\n" + + "Failed to move containers by timeout 0\n" + + "Entered data to nodes \n" + + "80f6bc27-e6f3-493e-b1f4-25f810ad960d <- 20 GB\n" + + "701ca98e-aa1a-4b36-b817-e28ed634bba6 <- 28 GB\n" + + "Exited data from nodes \n" + + "b8b9c511-c30f-4933-8938-2f272e307070 -> 30 GB\n" + + "7bd99815-47e7-4015-bc61-ca6ef6dfd130 -> 18 GB"; + assertTrue(output.contains(firstHistoryIterationOutput)); + + String secondHistoryIterationOutput = + "Key Value\n" + + "Iteration number 2\n" + + "Iteration duration 5m 0s\n" + + "Iteration result ITERATION_COMPLETED\n" + + "Size scheduled to move 30 GB\n" + + "Moved data size 30 GB\n" + + "Scheduled to move containers 8\n" + + "Already moved containers 8\n" + + "Failed to move containers 0\n" + + "Failed to move containers by timeout 0\n" + + "Entered data to nodes \n" + + "80f6bc27-e6f3-493e-b1f4-25f810ad960d <- 20 GB\n" + + "701ca98e-aa1a-4b36-b817-e28ed634bba6 <- 10 GB\n" + + "Exited data from nodes \n" + + "b8b9c511-c30f-4933-8938-2f272e307070 -> 15 GB\n" + + "7bd99815-47e7-4015-bc61-ca6ef6dfd130 -> 15 GB"; + assertTrue(output.contains(secondHistoryIterationOutput)); + } + + @Test + void testContainerBalancerStatusInfoSubcommandVerbose() + throws IOException { + ScmClient scmClient = mock(ScmClient.class); + + ContainerBalancerConfiguration config = + getContainerBalancerConfiguration(); + + ContainerBalancerStatusInfoResponseProto + statusInfoResponseProto = getContainerBalancerStatusInfoResponseProto(config); + //test status is running + when(scmClient.getContainerBalancerStatusInfo()).thenReturn(statusInfoResponseProto); + CommandLine c = new CommandLine(statusCmd); + c.parseArgs("--verbose"); + statusCmd.execute(scmClient); + String output = outContent.toString(DEFAULT_ENCODING); + Pattern p = Pattern.compile( + "^ContainerBalancer\\sis\\sRunning.$", Pattern.MULTILINE); + Matcher m = p.matcher(output); + assertTrue(m.find()); + + p = Pattern.compile( + "^Started at: (\\d{4}-\\d{2}-\\d{2} \\d{2}:\\d{2}:\\d{2})$", Pattern.MULTILINE); + m = p.matcher(output); + assertTrue(m.find()); + + p = Pattern.compile( + "^Balancing duration: \\d{1}s$", Pattern.MULTILINE); + m = p.matcher(output); + assertTrue(m.find()); + + String balancerConfigOutput = + "Container Balancer Configuration values:\n" + + "Key Value\n" + + "Threshold 10.0\n" + + "Max Datanodes to Involve per Iteration(percent) 20\n" + + "Max Size to Move per Iteration 0GB\n" + + "Max Size Entering Target per Iteration 26GB\n" + + "Max Size Leaving Source per Iteration 26GB\n" + + "Number of Iterations 3\n" + + "Time Limit for Single Container's Movement 65min\n" + + "Time Limit for Single Container's Replication 50min\n" + + "Interval between each Iteration 0min\n" + + "Whether to Enable Network Topology false\n" + + "Whether to Trigger Refresh Datanode Usage Info false\n" + + "Container IDs to Exclude from Balancing None\n" + + "Datanodes Specified to be Balanced None\n" + + "Datanodes Excluded from Balancing None"; + assertTrue(output.contains(balancerConfigOutput)); + + String currentIterationOutput = + "Current iteration info:\n" + + "Key Value\n" + + "Iteration number 3\n" + + "Iteration duration 6m 10s\n" + + "Iteration result -\n" + + "Size scheduled to move 48 GB\n" + + "Moved data size 48 GB\n" + + "Scheduled to move containers 5\n" + + "Already moved containers 5\n" + + "Failed to move containers 0\n" + + "Failed to move containers by timeout 0\n" + + "Entered data to nodes \n" + + "80f6bc27-e6f3-493e-b1f4-25f810ad960d <- 20 GB\n" + + "701ca98e-aa1a-4b36-b817-e28ed634bba6 <- 28 GB\n" + + "Exited data from nodes \n" + + "b8b9c511-c30f-4933-8938-2f272e307070 -> 30 GB\n" + + "7bd99815-47e7-4015-bc61-ca6ef6dfd130 -> 18 GB"; + assertTrue(output.contains(currentIterationOutput)); + + assertFalse(output.contains("Iteration history list:")); + } + + @Test + void testContainerBalancerStatusInfoSubcommandRunningOnStoppedBalancer() + throws IOException { + ScmClient scmClient = mock(ScmClient.class); + + //test status is not running when(scmClient.getContainerBalancerStatusInfo()).thenReturn( ContainerBalancerStatusInfoResponseProto.newBuilder() .setIsRunning(false) diff --git a/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/util/TestDurationUtil.java b/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/util/TestDurationUtil.java new file mode 100644 index 00000000000..7b0a9548639 --- /dev/null +++ b/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/util/TestDurationUtil.java @@ -0,0 +1,92 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

    + * http://www.apache.org/licenses/LICENSE-2.0 + *

    + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdds.util; + +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.Arguments; +import org.junit.jupiter.params.provider.MethodSource; + +import java.time.Duration; +import java.util.Arrays; +import java.util.Collection; +import java.util.stream.Stream; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.params.provider.Arguments.arguments; + +class TestDurationUtil { + + private static Stream paramsForPositiveCases() { + return Stream.of( + arguments( + "0s", + Duration.ZERO + ), + arguments( + "2562047788015215h 30m 7s", + Duration.ofSeconds(Long.MAX_VALUE) + ), + arguments( + "1s", + Duration.ofSeconds(1) + ), + arguments( + "30s", + Duration.ofSeconds(30) + ), + arguments( + "1m 0s", + Duration.ofMinutes(1) + ), + arguments( + "2m 30s", + Duration.ofMinutes(2).plusSeconds(30) + ), + arguments( + "1h 30m 45s", + Duration.ofHours(1).plusMinutes(30).plusSeconds(45) + ), + arguments( + "24h 0m 0s", + Duration.ofDays(1) + ), + arguments( + "48h 0m 0s", + Duration.ofDays(2) + ) + ); + } + + private static Collection paramsForNegativeCases() { + return Arrays.asList(Duration.ofSeconds(-1L), Duration.ofSeconds(Long.MIN_VALUE)); + } + + @ParameterizedTest + @MethodSource("paramsForPositiveCases") + void testDuration(String expected, Duration actual) { + assertEquals(expected, DurationUtil.getPrettyDuration(actual)); + } + + @ParameterizedTest + @MethodSource("paramsForNegativeCases") + void testDuration(Duration param) { + assertThrows(IllegalStateException.class, () -> DurationUtil.getPrettyDuration(param)); + } +} + diff --git a/hadoop-ozone/client/pom.xml b/hadoop-ozone/client/pom.xml index 545faba51ac..6b5a1ac0c8b 100644 --- a/hadoop-ozone/client/pom.xml +++ b/hadoop-ozone/client/pom.xml @@ -20,10 +20,10 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> org.apache.ozone ozone - 1.5.0-SNAPSHOT + 2.0.0-SNAPSHOT ozone-client - 1.5.0-SNAPSHOT + 2.0.0-SNAPSHOT Apache Ozone Client Apache Ozone Client jar diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneBucket.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneBucket.java index 216b51b8e86..1a40b536909 100644 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneBucket.java +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneBucket.java @@ -985,8 +985,23 @@ public OzoneDataStreamOutput createStreamFile(String keyName, long size, */ public List listStatus(String keyName, boolean recursive, String startKey, long numEntries) throws IOException { - return proxy - .listStatus(volumeName, name, keyName, recursive, startKey, numEntries); + return proxy.listStatus(volumeName, name, keyName, recursive, startKey, numEntries); + } + + /** + * List the lightweight status for a file or a directory and its contents. + * + * @param keyName Absolute path of the entry to be listed + * @param recursive For a directory if true all the descendants of a + * particular directory are listed + * @param startKey Key from which listing needs to start. If startKey exists + * its status is included in the final list. + * @param numEntries Number of entries to list from the start key + * @return list of file status + */ + public List listStatusLight(String keyName, boolean recursive, + String startKey, long numEntries) throws IOException { + return proxy.listStatusLight(volumeName, name, keyName, recursive, startKey, numEntries, false); } /** @@ -1046,6 +1061,37 @@ public void setTimes(String keyName, long mtime, long atime) proxy.setTimes(ozoneObj, keyName, mtime, atime); } + /** + * Gets the tags for an existing key. + * @param keyName Key name. + * @return Tags for the specified key. + * @throws IOException + */ + public Map getObjectTagging(String keyName) + throws IOException { + return proxy.getObjectTagging(volumeName, name, keyName); + } + + /** + * Sets the tags to an existing key. + * @param keyName Key name. + * @param tags Tags to set on the key. + * @throws IOException + */ + public void putObjectTagging(String keyName, Map tags) + throws IOException { + proxy.putObjectTagging(volumeName, name, keyName, tags); + } + + /** + * Removes all the tags from an existing key. + * @param keyName Key name + * @throws IOException + */ + public void deleteObjectTagging(String keyName) throws IOException { + proxy.deleteObjectTagging(volumeName, name, keyName); + } + public void setSourcePathExist(boolean b) { this.sourcePathExist = b; } @@ -1762,7 +1808,6 @@ private boolean getChildrenKeys(String keyPrefix, String startKey, // 1. Get immediate children of keyPrefix, starting with startKey List statuses = proxy.listStatusLight(volumeName, name, keyPrefix, false, startKey, listCacheSize, true); - boolean reachedLimitCacheSize = statuses.size() == listCacheSize; // 2. Special case: ListKey expects keyPrefix element should present in // the resultList, only if startKey is blank. If startKey is not blank @@ -1794,7 +1839,7 @@ private boolean getChildrenKeys(String keyPrefix, String startKey, // Return it so that the next iteration will be // started using the stacked items. return true; - } else if (reachedLimitCacheSize && indx == statuses.size() - 1) { + } else if (indx == statuses.size() - 1) { // The last element is a FILE and reaches the listCacheSize. // Now, sets next seek key to this element stack.push(new ImmutablePair<>(keyPrefix, keyInfo.getKeyName())); diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneClient.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneClient.java index 3a63a593469..8bd648545d4 100644 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneClient.java +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneClient.java @@ -26,6 +26,7 @@ import java.io.IOException; import com.google.common.annotations.VisibleForTesting; +import org.apache.ratis.util.UncheckedAutoCloseable; /** * OzoneClient connects to Ozone Cluster and @@ -76,6 +77,7 @@ public class OzoneClient implements Closeable { private final ClientProtocol proxy; private final ObjectStore objectStore; private ConfigurationSource conf; + private final UncheckedAutoCloseable leakTracker = OzoneClientFactory.track(this); /** * Creates a new OzoneClient object, generally constructed @@ -119,7 +121,11 @@ public ConfigurationSource getConfiguration() { */ @Override public void close() throws IOException { - proxy.close(); + try { + proxy.close(); + } finally { + leakTracker.close(); + } } /** diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneClientException.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneClientException.java index 2e9080a66f8..e2d87921b5c 100644 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneClientException.java +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneClientException.java @@ -17,10 +17,12 @@ */ package org.apache.hadoop.ozone.client; +import java.io.IOException; + /** * This exception is thrown by the Ozone Clients. */ -public class OzoneClientException extends Exception { +public class OzoneClientException extends IOException { public OzoneClientException() { } @@ -36,8 +38,4 @@ public OzoneClientException(Throwable throwable) { super(throwable); } - public OzoneClientException(String s, Throwable throwable, boolean b, - boolean b1) { - super(s, throwable, b, b1); - } } diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneClientFactory.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneClientFactory.java index 80a495a1d12..1c673618d07 100644 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneClientFactory.java +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneClientFactory.java @@ -23,9 +23,11 @@ import java.io.IOException; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hdds.HddsUtils; import org.apache.hadoop.hdds.conf.ConfigurationSource; import org.apache.hadoop.hdds.conf.MutableConfigurationSource; import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.utils.LeakDetector; import org.apache.hadoop.ipc.RemoteException; import org.apache.hadoop.ozone.OmUtils; import org.apache.hadoop.ozone.OzoneConsts; @@ -34,13 +36,17 @@ import org.apache.hadoop.ozone.security.OzoneTokenIdentifier; import org.apache.hadoop.security.token.Token; -import com.google.common.base.Preconditions; import org.apache.commons.lang3.StringUtils; -import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_ADDRESS_KEY; -import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_SERVICE_IDS_KEY; +import org.apache.ratis.util.UncheckedAutoCloseable; + +import com.google.common.base.Preconditions; + import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_ADDRESS_KEY; +import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_SERVICE_IDS_KEY; + /** * Factory class to create OzoneClients. */ @@ -54,6 +60,21 @@ public final class OzoneClientFactory { */ private OzoneClientFactory() { } + private static final LeakDetector OZONE_CLIENT_LEAK_DETECTOR = + new LeakDetector("OzoneClientObject"); + + public static UncheckedAutoCloseable track(AutoCloseable object) { + final Class clazz = object.getClass(); + final StackTraceElement[] stackTrace = HddsUtils.getStackTrace(LOG); + return OZONE_CLIENT_LEAK_DETECTOR.track(object, + () -> HddsUtils.reportLeak(clazz, + HddsUtils.formatStackTrace(stackTrace, 4), LOG)); + } + + public static Logger getLogger() { + return LOG; + } + /** * Constructs and return an OzoneClient with default configuration. diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneVolume.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneVolume.java index 9ab110aa2b5..e914b2db212 100644 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneVolume.java +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneVolume.java @@ -20,12 +20,13 @@ import java.io.IOException; import java.time.Instant; +import java.util.ArrayList; +import java.util.Collections; import java.util.Iterator; import java.util.List; import java.util.Map; import java.util.NoSuchElementException; -import org.apache.commons.collections.ListUtils; import org.apache.hadoop.hdds.client.OzoneQuota; import org.apache.hadoop.hdds.conf.ConfigurationSource; import org.apache.hadoop.hdds.scm.client.HddsClientUtils; @@ -124,7 +125,7 @@ protected OzoneVolume(Builder builder) { this.creationTime.getEpochSecond(), this.creationTime.getNano()); } } - this.acls = builder.acls; + this.acls = new ArrayList<>(builder.acls); if (builder.conf != null) { this.listCacheSize = HddsClientUtils.getListCacheSize(builder.conf); } @@ -203,7 +204,7 @@ public Instant getModificationTime() { * @return aclMap */ public List getAcls() { - return ListUtils.unmodifiableList(acls); + return Collections.unmodifiableList(acls); } /** diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/TenantArgs.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/TenantArgs.java index eed7a6829c9..9a0083b0210 100644 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/TenantArgs.java +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/TenantArgs.java @@ -59,14 +59,13 @@ public boolean getForceCreationWhenVolumeExists() { * * @return Builder */ - public static TenantArgs.Builder newBuilder() { - return new TenantArgs.Builder(); + public static Builder newBuilder() { + return new Builder(); } /** * Builder for TenantArgs. */ - @SuppressWarnings("checkstyle:hiddenfield") public static class Builder { private String volumeName; private boolean forceCreationWhenVolumeExists; @@ -77,12 +76,12 @@ public static class Builder { public Builder() { } - public TenantArgs.Builder setVolumeName(String volumeName) { + public Builder setVolumeName(String volumeName) { this.volumeName = volumeName; return this; } - public TenantArgs.Builder setForceCreationWhenVolumeExists( + public Builder setForceCreationWhenVolumeExists( boolean forceCreationWhenVolumeExists) { this.forceCreationWhenVolumeExists = forceCreationWhenVolumeExists; return this; diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/checksum/BaseFileChecksumHelper.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/checksum/BaseFileChecksumHelper.java index 2d83d88ed5e..76baefd71dd 100644 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/checksum/BaseFileChecksumHelper.java +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/checksum/BaseFileChecksumHelper.java @@ -20,6 +20,8 @@ import org.apache.hadoop.fs.FileChecksum; import org.apache.hadoop.fs.MD5MD5CRC32CastagnoliFileChecksum; import org.apache.hadoop.fs.MD5MD5CRC32GzipFileChecksum; +import org.apache.hadoop.fs.PathIOException; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.scm.OzoneClientConfig; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; import org.apache.hadoop.hdds.scm.XceiverClientFactory; @@ -38,6 +40,7 @@ import org.slf4j.LoggerFactory; import java.io.IOException; +import java.nio.ByteBuffer; import java.util.List; /** @@ -150,6 +153,90 @@ protected void setChecksumType(ContainerProtos.ChecksumType type) { checksumType = type; } + protected abstract AbstractBlockChecksumComputer getBlockChecksumComputer(List chunkInfos, + long blockLength); + + protected abstract String populateBlockChecksumBuf(ByteBuffer blockChecksumByteBuffer) throws IOException; + + protected abstract List getChunkInfos( + OmKeyLocationInfo keyLocationInfo) throws IOException; + + protected ByteBuffer getBlockChecksumFromChunkChecksums(AbstractBlockChecksumComputer blockChecksumComputer) + throws IOException { + blockChecksumComputer.compute(getCombineMode()); + return blockChecksumComputer.getOutByteBuffer(); + } + + /** + * Compute block checksums block by block and append the raw bytes of the + * block checksums into getBlockChecksumBuf(). + * + * @throws IOException + */ + protected void checksumBlocks() throws IOException { + long currentLength = 0; + for (int blockIdx = 0; + blockIdx < getKeyLocationInfoList().size() && getRemaining() >= 0; + blockIdx++) { + OmKeyLocationInfo keyLocationInfo = + getKeyLocationInfoList().get(blockIdx); + if (currentLength > getLength()) { + return; + } + + if (!checksumBlock(keyLocationInfo)) { + throw new PathIOException(getSrc(), + "Fail to get block checksum for " + keyLocationInfo + + ", checksum combine mode: " + getCombineMode()); + } + + currentLength += keyLocationInfo.getLength(); + } + } + + /** + * Return true when sounds good to continue or retry, false when severe + * condition or totally failed. + */ + protected boolean checksumBlock(OmKeyLocationInfo keyLocationInfo) + throws IOException { + // for each block, send request + List chunkInfos = + getChunkInfos(keyLocationInfo); + if (chunkInfos.isEmpty()) { + return false; + } + + long blockNumBytes = keyLocationInfo.getLength(); + + if (getRemaining() < blockNumBytes) { + blockNumBytes = getRemaining(); + } + setRemaining(getRemaining() - blockNumBytes); + + ContainerProtos.ChecksumData checksumData = + chunkInfos.get(0).getChecksumData(); + setChecksumType(checksumData.getType()); + int bytesPerChecksum = checksumData.getBytesPerChecksum(); + setBytesPerCRC(bytesPerChecksum); + + AbstractBlockChecksumComputer blockChecksumComputer = getBlockChecksumComputer(chunkInfos, + keyLocationInfo.getLength()); + ByteBuffer blockChecksumByteBuffer = + getBlockChecksumFromChunkChecksums(blockChecksumComputer); + String blockChecksumForDebug = + populateBlockChecksumBuf(blockChecksumByteBuffer); + + LOG.debug("Got reply from {} {} for block {}: blockChecksum={}, " + + "blockChecksumType={}", + keyInfo.getReplicationConfig().getReplicationType() == HddsProtos.ReplicationType.EC + ? "EC pipeline" : "pipeline", + keyLocationInfo.getPipeline(), keyLocationInfo.getBlockID(), + blockChecksumForDebug, checksumData.getType()); + + return true; + } + /** * Request the blocks created in the most recent version from Ozone Manager. * @@ -219,14 +306,6 @@ public void compute() throws IOException { } } - /** - * Compute block checksums block by block and append the raw bytes of the - * block checksums into getBlockChecksumBuf(). - * - * @throws IOException - */ - protected abstract void checksumBlocks() throws IOException; - /** * Make final file checksum result given the per-block or per-block-group * checksums collected into getBlockChecksumBuf(). diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/checksum/ECBlockChecksumComputer.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/checksum/ECBlockChecksumComputer.java index b2c30ed9e08..a4c24768cdd 100644 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/checksum/ECBlockChecksumComputer.java +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/checksum/ECBlockChecksumComputer.java @@ -45,12 +45,14 @@ public class ECBlockChecksumComputer extends AbstractBlockChecksumComputer { private final List chunkInfoList; private final OmKeyInfo keyInfo; + private final long blockLength; public ECBlockChecksumComputer( - List chunkInfoList, OmKeyInfo keyInfo) { + List chunkInfoList, OmKeyInfo keyInfo, long blockLength) { this.chunkInfoList = chunkInfoList; this.keyInfo = keyInfo; + this.blockLength = blockLength; } @Override @@ -72,15 +74,13 @@ public void compute(OzoneClientConfig.ChecksumCombineMode combineMode) private void computeMd5Crc() { Preconditions.checkArgument(chunkInfoList.size() > 0); - final ContainerProtos.ChunkInfo firstChunkInfo = chunkInfoList.get(0); - long chunkSize = firstChunkInfo.getLen(); - long bytesPerCrc = firstChunkInfo.getChecksumData().getBytesPerChecksum(); - // Total parity checksum bytes per stripe to remove - int parityBytes = getParityBytes(chunkSize, bytesPerCrc); - final MessageDigest digester = MD5Hash.getDigester(); for (ContainerProtos.ChunkInfo chunkInfo : chunkInfoList) { + long chunkSize = chunkInfo.getLen(); + long bytesPerCrc = chunkInfo.getChecksumData().getBytesPerChecksum(); + // Total parity checksum bytes per stripe to remove + int parityBytes = getParityBytes(chunkSize, bytesPerCrc); ByteString stripeChecksum = chunkInfo.getStripeChecksum(); Preconditions.checkNotNull(stripeChecksum); @@ -121,66 +121,40 @@ private void computeCompositeCrc() throws IOException { // Bytes required to create a CRC long bytesPerCrc = firstChunkInfo.getChecksumData().getBytesPerChecksum(); - long chunkSize = firstChunkInfo.getLen(); - - //When EC chunk size is not a multiple of ozone.client.bytes.per.checksum - // (default = 16KB) the last checksum in an EC chunk is only generated for - // offset. - long bytesPerCrcOffset = chunkSize % bytesPerCrc; - - long keySize = keyInfo.getDataSize(); - // Total parity checksum bytes per stripe to remove - int parityBytes = getParityBytes(chunkSize, bytesPerCrc); - - // Number of checksum per chunk, Eg: 2MB EC chunk will - // have 2 checksum per chunk. - int numChecksumPerChunk = (int) - (Math.ceil((double) chunkSize / bytesPerCrc)); + long blockSize = blockLength; CrcComposer blockCrcComposer = CrcComposer.newCrcComposer(dataChecksumType, bytesPerCrc); for (ContainerProtos.ChunkInfo chunkInfo : chunkInfoList) { ByteString stripeChecksum = chunkInfo.getStripeChecksum(); + long chunkSize = chunkInfo.getLen(); + + // Total parity checksum bytes per stripe to remove + int parityBytes = getParityBytes(chunkSize, bytesPerCrc); Preconditions.checkNotNull(stripeChecksum); final int checksumSize = stripeChecksum.size(); Preconditions.checkArgument(checksumSize % 4 == 0, "Checksum Bytes size does not match"); - CrcComposer chunkCrcComposer = - CrcComposer.newCrcComposer(dataChecksumType, bytesPerCrc); // Limit parity bytes as they do not contribute to fileChecksum final ByteBuffer byteWrap = stripeChecksum.asReadOnlyByteBuffer(); byteWrap.limit(checksumSize - parityBytes); - long chunkOffsetIndex = 1; while (byteWrap.hasRemaining()) { - - /* - When chunk size is not a multiple of bytes.per.crc we get an offset. - For eg, RS-3-2-1524k is not a multiple of 1MB. So two checksums are - generated 1st checksum for 1024k bytes and 2nd checksum for 500k bytes. - When we reach the 2nd Checksum we need to modify the bytesPerCrc as in - this case 500k is the bytes for which the checksum is generated. - */ - long currentChunkOffset = Long.MAX_VALUE; - if ((chunkOffsetIndex % numChecksumPerChunk == 0) - && (bytesPerCrcOffset > 0)) { - currentChunkOffset = bytesPerCrcOffset; + // Here Math.min in mainly required for last stripe's last chunk. The last chunk of the last stripe can be + // less than the chunkSize, chunkSize is only calculated from each stripe's first chunk. This would be fine + // for rest of the stripe because all the chunks are of the same size. But for the last stripe we don't know + // the exact size of the last chunk. So we calculate it with the of blockSize. If the block size is smaller + // than the chunk size, then we know it is the last stripe' last chunk. + long remainingChunkSize = Math.min(blockSize, chunkSize); + while (byteWrap.hasRemaining() && remainingChunkSize > 0) { + final int checksumData = byteWrap.getInt(); + blockCrcComposer.update(checksumData, Math.min(bytesPerCrc, remainingChunkSize)); + remainingChunkSize -= bytesPerCrc; } - - final int checksumDataCrc = byteWrap.getInt(); - //To handle last chunk when it size is lower than 1524K in the case - // of rs-3-2-1524k. - long chunkSizePerChecksum = Math.min(Math.min(keySize, bytesPerCrc), - currentChunkOffset); - chunkCrcComposer.update(checksumDataCrc, chunkSizePerChecksum); - - int chunkChecksumCrc = CrcUtil.readInt(chunkCrcComposer.digest(), 0); - blockCrcComposer.update(chunkChecksumCrc, chunkSizePerChecksum); - keySize -= Math.min(bytesPerCrc, currentChunkOffset); - ++chunkOffsetIndex; + blockSize -= chunkSize; } } diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/checksum/ECFileChecksumHelper.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/checksum/ECFileChecksumHelper.java index 13ba5716987..db36b9837ad 100644 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/checksum/ECFileChecksumHelper.java +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/checksum/ECFileChecksumHelper.java @@ -17,7 +17,6 @@ */ package org.apache.hadoop.ozone.client.checksum; -import org.apache.hadoop.fs.PathIOException; import org.apache.hadoop.hdds.client.BlockID; import org.apache.hadoop.hdds.client.ECReplicationConfig; import org.apache.hadoop.hdds.client.StandaloneReplicationConfig; @@ -46,7 +45,6 @@ * The helper class to compute file checksum for EC files. */ public class ECFileChecksumHelper extends BaseFileChecksumHelper { - private int blockIdx; public ECFileChecksumHelper(OzoneVolume volume, OzoneBucket bucket, String keyName, long length, OzoneClientConfig.ChecksumCombineMode @@ -57,63 +55,13 @@ public ECFileChecksumHelper(OzoneVolume volume, OzoneBucket bucket, } @Override - protected void checksumBlocks() throws IOException { - long currentLength = 0; - for (blockIdx = 0; - blockIdx < getKeyLocationInfoList().size() && getRemaining() >= 0; - blockIdx++) { - OmKeyLocationInfo keyLocationInfo = - getKeyLocationInfoList().get(blockIdx); - - if (currentLength > getLength()) { - return; - } - - if (!checksumBlock(keyLocationInfo)) { - throw new PathIOException(getSrc(), - "Fail to get block checksum for " + keyLocationInfo - + ", checksum combine mode: " + getCombineMode()); - } - - currentLength += keyLocationInfo.getLength(); - } - } - - private boolean checksumBlock(OmKeyLocationInfo keyLocationInfo) - throws IOException { - // for each block, send request - List chunkInfos = - getChunkInfos(keyLocationInfo); - if (chunkInfos.size() == 0) { - return false; - } - - long blockNumBytes = keyLocationInfo.getLength(); - - if (getRemaining() < blockNumBytes) { - blockNumBytes = getRemaining(); - } - setRemaining(getRemaining() - blockNumBytes); - - ContainerProtos.ChecksumData checksumData = - chunkInfos.get(0).getChecksumData(); - setChecksumType(checksumData.getType()); - int bytesPerChecksum = checksumData.getBytesPerChecksum(); - setBytesPerCRC(bytesPerChecksum); - - ByteBuffer blockChecksumByteBuffer = - getBlockChecksumFromChunkChecksums(chunkInfos); - String blockChecksumForDebug = - populateBlockChecksumBuf(blockChecksumByteBuffer); - - LOG.debug("Got reply from EC pipeline {} for block {}: blockChecksum={}, " + - "blockChecksumType={}", - keyLocationInfo.getPipeline(), keyLocationInfo.getBlockID(), - blockChecksumForDebug, checksumData.getType()); - return true; + protected AbstractBlockChecksumComputer getBlockChecksumComputer(List chunkInfos, + long blockLength) { + return new ECBlockChecksumComputer(chunkInfos, getKeyInfo(), blockLength); } - private String populateBlockChecksumBuf( + @Override + protected String populateBlockChecksumBuf( ByteBuffer blockChecksumByteBuffer) throws IOException { String blockChecksumForDebug = null; switch (getCombineMode()) { @@ -139,18 +87,9 @@ private String populateBlockChecksumBuf( return blockChecksumForDebug; } - private ByteBuffer getBlockChecksumFromChunkChecksums( - List chunkInfos) throws IOException { - - AbstractBlockChecksumComputer blockChecksumComputer = - new ECBlockChecksumComputer(chunkInfos, getKeyInfo()); - blockChecksumComputer.compute(getCombineMode()); - - return blockChecksumComputer.getOutByteBuffer(); - } - - private List getChunkInfos(OmKeyLocationInfo - keyLocationInfo) throws IOException { + @Override + protected List getChunkInfos(OmKeyLocationInfo + keyLocationInfo) throws IOException { // To read an EC block, we create a STANDALONE pipeline that contains the // single location for the block index we want to read. The EC blocks are // indexed from 1 to N, however the data locations are stored in the diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/checksum/ReplicatedFileChecksumHelper.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/checksum/ReplicatedFileChecksumHelper.java index 016121ce1a9..9c2df0fdb47 100644 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/checksum/ReplicatedFileChecksumHelper.java +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/checksum/ReplicatedFileChecksumHelper.java @@ -17,7 +17,6 @@ */ package org.apache.hadoop.ozone.client.checksum; -import org.apache.hadoop.fs.PathIOException; import org.apache.hadoop.hdds.client.BlockID; import org.apache.hadoop.hdds.client.ReplicationConfig; import org.apache.hadoop.hdds.client.StandaloneReplicationConfig; @@ -44,7 +43,6 @@ * The helper class to compute file checksum for replicated files. */ public class ReplicatedFileChecksumHelper extends BaseFileChecksumHelper { - private int blockIdx; public ReplicatedFileChecksumHelper( OzoneVolume volume, OzoneBucket bucket, String keyName, long length, @@ -61,65 +59,10 @@ public ReplicatedFileChecksumHelper(OzoneVolume volume, OzoneBucket bucket, keyInfo); } - @Override - protected void checksumBlocks() throws IOException { - long currentLength = 0; - for (blockIdx = 0; - blockIdx < getKeyLocationInfoList().size() && getRemaining() >= 0; - blockIdx++) { - OmKeyLocationInfo keyLocationInfo = - getKeyLocationInfoList().get(blockIdx); - if (currentLength > getLength()) { - return; - } - - if (!checksumBlock(keyLocationInfo)) { - throw new PathIOException(getSrc(), - "Fail to get block checksum for " + keyLocationInfo - + ", checksum combine mode: " + getCombineMode()); - } - - currentLength += keyLocationInfo.getLength(); - } - } - - /** - * Return true when sounds good to continue or retry, false when severe - * condition or totally failed. - */ - private boolean checksumBlock(OmKeyLocationInfo keyLocationInfo) - throws IOException { - // for each block, send request - List chunkInfos = - getChunkInfos(keyLocationInfo); - if (chunkInfos.size() == 0) { - return false; - } - - long blockNumBytes = keyLocationInfo.getLength(); - - if (getRemaining() < blockNumBytes) { - blockNumBytes = getRemaining(); - } - setRemaining(getRemaining() - blockNumBytes); - - ContainerProtos.ChecksumData checksumData = - chunkInfos.get(0).getChecksumData(); - setChecksumType(checksumData.getType()); - int bytesPerChecksum = checksumData.getBytesPerChecksum(); - setBytesPerCRC(bytesPerChecksum); - - ByteBuffer blockChecksumByteBuffer = getBlockChecksumFromChunkChecksums( - keyLocationInfo, chunkInfos); - String blockChecksumForDebug = - populateBlockChecksumBuf(blockChecksumByteBuffer); - - LOG.debug("got reply from pipeline {} for block {}: blockChecksum={}, " + - "blockChecksumType={}", - keyLocationInfo.getPipeline(), keyLocationInfo.getBlockID(), - blockChecksumForDebug, checksumData.getType()); - return true; + protected AbstractBlockChecksumComputer getBlockChecksumComputer(List chunkInfos, + long blockLength) { + return new ReplicatedBlockChecksumComputer(chunkInfos); } // copied from BlockInputStream @@ -127,6 +70,7 @@ private boolean checksumBlock(OmKeyLocationInfo keyLocationInfo) * Send RPC call to get the block info from the container. * @return List of chunks in this block. */ + @Override protected List getChunkInfos( OmKeyLocationInfo keyLocationInfo) throws IOException { // irrespective of the container state, we will always read via Standalone @@ -164,18 +108,6 @@ protected List getChunkInfos( return chunks; } - // TODO: copy BlockChecksumHelper here - ByteBuffer getBlockChecksumFromChunkChecksums( - OmKeyLocationInfo keyLocationInfo, - List chunkInfoList) - throws IOException { - AbstractBlockChecksumComputer blockChecksumComputer = - new ReplicatedBlockChecksumComputer(chunkInfoList); - blockChecksumComputer.compute(getCombineMode()); - - return blockChecksumComputer.getOutByteBuffer(); - } - /** * Parses out the raw blockChecksum bytes from {@code checksumData} byte * buffer according to the blockChecksumType and populates the cumulative @@ -184,7 +116,8 @@ ByteBuffer getBlockChecksumFromChunkChecksums( * @return a debug-string representation of the parsed checksum if * debug is enabled, otherwise null. */ - String populateBlockChecksumBuf(ByteBuffer checksumData) + @Override + protected String populateBlockChecksumBuf(ByteBuffer checksumData) throws IOException { String blockChecksumForDebug = null; switch (getCombineMode()) { diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/KeyOutputStream.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/KeyOutputStream.java index 4f9e5db49a9..b5f8191d368 100644 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/KeyOutputStream.java +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/KeyOutputStream.java @@ -303,7 +303,7 @@ private int writeToOutputStream(BlockOutputStreamEntry current, if (retry) { current.writeOnRetry(len); } else { - waitForRetryHandling(current); + current.waitForRetryHandling(retryHandlingCondition); current.write(b, off, writeLen); offset += writeLen; } @@ -584,7 +584,7 @@ private void handleFlushOrClose(StreamAction op) throws IOException { blockOutputStreamEntryPool.getCurrentStreamEntry(); if (entry != null) { // If the current block is to handle retries, wait until all the retries are done. - waitForRetryHandling(entry); + doInWriteLock(() -> entry.waitForRetryHandling(retryHandlingCondition)); entry.registerCallReceived(); try { handleStreamAction(entry, op); @@ -608,10 +608,6 @@ private void handleFlushOrClose(StreamAction op) throws IOException { } } - private void waitForRetryHandling(BlockOutputStreamEntry currentEntry) throws InterruptedException { - doInWriteLock(() -> currentEntry.waitForRetryHandling(retryHandlingCondition)); - } - private void handleStreamAction(BlockOutputStreamEntry entry, StreamAction op) throws IOException { Collection failedServers = entry.getFailedServers(); diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/protocol/ClientProtocol.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/protocol/ClientProtocol.java index 8d9614b554a..c0bffaf8950 100644 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/protocol/ClientProtocol.java +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/protocol/ClientProtocol.java @@ -1311,4 +1311,38 @@ void setTimes(OzoneObj obj, String keyName, long mtime, long atime) * @throws IOException */ void recoverKey(OmKeyArgs args, long clientID) throws IOException; + + /** + * Gets the tags for an existing key. + * @param volumeName Volume name. + * @param bucketName Bucket name. + * @param keyName Key name. + * @return tags for the specified key. + * @throws IOException + */ + Map getObjectTagging(String volumeName, String bucketName, String keyName) + throws IOException; + + /** + * Sets the tags to an existing key. + * @param volumeName Volume name. + * @param bucketName Bucket name. + * @param keyName Key name. + * @param tags Tags to set on the key. + * @throws IOException + */ + void putObjectTagging(String volumeName, String bucketName, String keyName, + Map tags) throws IOException; + + + /** + * Removes all the tags from the specified key. + * @param volumeName Volume name. + * @param bucketName Bucket name. + * @param keyName Key name. + * @throws IOException + */ + void deleteObjectTagging(String volumeName, String bucketName, String keyName) + throws IOException; + } diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java index fe986640176..93c675d9b90 100644 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java @@ -121,7 +121,6 @@ import org.apache.hadoop.ozone.om.helpers.OmTenantArgs; import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs; import org.apache.hadoop.ozone.om.helpers.OpenKeySession; -import org.apache.hadoop.ozone.om.helpers.OzoneAclUtil; import org.apache.hadoop.ozone.om.helpers.OzoneFileStatus; import org.apache.hadoop.ozone.om.helpers.OzoneFileStatusLight; import org.apache.hadoop.ozone.om.helpers.S3SecretValue; @@ -141,9 +140,6 @@ import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRoleInfo; import org.apache.hadoop.ozone.security.GDPRSymmetricKey; import org.apache.hadoop.ozone.security.OzoneTokenIdentifier; -import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLIdentityType; -import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLType; -import org.apache.hadoop.ozone.security.acl.OzoneAclConfig; import org.apache.hadoop.ozone.security.acl.OzoneObj; import org.apache.hadoop.ozone.snapshot.CancelSnapshotDiffResponse; import org.apache.hadoop.ozone.snapshot.ListSnapshotResponse; @@ -161,7 +157,6 @@ import java.security.InvalidKeyException; import java.security.PrivilegedExceptionAction; import java.util.ArrayList; -import java.util.Arrays; import java.util.Collections; import java.util.HashMap; import java.util.LinkedHashMap; @@ -176,7 +171,7 @@ import java.util.function.Function; import java.util.stream.Collectors; -import static org.apache.hadoop.ozone.OzoneAcl.AclScope.ACCESS; +import static org.apache.hadoop.ozone.OzoneAcl.LINK_BUCKET_DEFAULT_ACL; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_CLIENT_KEY_PROVIDER_CACHE_EXPIRY; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_CLIENT_KEY_PROVIDER_CACHE_EXPIRY_DEFAULT; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_CLIENT_REQUIRED_OM_VERSION_MIN_KEY; @@ -185,8 +180,6 @@ import static org.apache.hadoop.ozone.OzoneConsts.MAXIMUM_NUMBER_OF_PARTS_PER_UPLOAD; import static org.apache.hadoop.ozone.OzoneConsts.OLD_QUOTA_DEFAULT; import static org.apache.hadoop.ozone.OzoneConsts.OZONE_MAXIMUM_ACCESS_ID_LENGTH; -import static org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLType.READ; -import static org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLType.WRITE; /** * Ozone RPC Client Implementation, it connects to OM, SCM and DataNode @@ -210,8 +203,6 @@ public class RpcClient implements ClientProtocol { private final XceiverClientFactory xceiverClientManager; private final UserGroupInformation ugi; private UserGroupInformation s3gUgi; - private final ACLType userRights; - private final ACLType groupRights; private final ClientId clientId = ClientId.randomId(); private final boolean unsafeByteBufferConversion; private Text dtService; @@ -244,12 +235,8 @@ public RpcClient(ConfigurationSource conf, String omServiceId) Preconditions.checkNotNull(conf); this.conf = conf; this.ugi = UserGroupInformation.getCurrentUser(); - // Get default acl rights for user and group. - OzoneAclConfig aclConfig = this.conf.getObject(OzoneAclConfig.class); replicationConfigValidator = this.conf.getObject(ReplicationConfigValidator.class); - this.userRights = aclConfig.getUserDefaultRights(); - this.groupRights = aclConfig.getGroupDefaultRights(); this.clientConfig = conf.getObject(OzoneClientConfig.class); this.ecReconstructExecutor = MemoizedSupplier.valueOf(() -> createThreadPoolExecutor( @@ -450,20 +437,6 @@ public void createVolume(String volumeName, VolumeArgs volArgs) ugi.getShortUserName() : volArgs.getOwner(); long quotaInNamespace = volArgs.getQuotaInNamespace(); long quotaInBytes = volArgs.getQuotaInBytes(); - List listOfAcls = new ArrayList<>(); - //User ACL - listOfAcls.add(new OzoneAcl(ACLIdentityType.USER, - owner, ACCESS, userRights)); - //Group ACLs of the User - List userGroups = Arrays.asList(UserGroupInformation - .createRemoteUser(owner).getGroupNames()); - userGroups.stream().forEach((group) -> listOfAcls.add( - new OzoneAcl(ACLIdentityType.GROUP, group, ACCESS, groupRights))); - //ACLs from VolumeArgs - List volumeAcls = volArgs.getAcls(); - if (volumeAcls != null) { - listOfAcls.addAll(volumeAcls); - } OmVolumeArgs.Builder builder = OmVolumeArgs.newBuilder(); builder.setVolume(volumeName); @@ -473,11 +446,14 @@ public void createVolume(String volumeName, VolumeArgs volArgs) builder.setQuotaInNamespace(quotaInNamespace); builder.setUsedNamespace(0L); builder.addAllMetadata(volArgs.getMetadata()); - - //Remove duplicates and add ACLs - for (OzoneAcl ozoneAcl : - listOfAcls.stream().distinct().collect(Collectors.toList())) { - builder.addOzoneAcls(ozoneAcl); + //ACLs from VolumeArgs + List volumeAcls = volArgs.getAcls(); + if (volumeAcls != null) { + //Remove duplicates and add ACLs + for (OzoneAcl ozoneAcl : + volumeAcls.stream().distinct().collect(Collectors.toList())) { + builder.addOzoneAcls(ozoneAcl); + } } if (volArgs.getQuotaInBytes() == 0) { @@ -667,17 +643,6 @@ public void createBucket( .setKeyName(bucketArgs.getEncryptionKey()).build(); } - List listOfAcls = getAclList(); - //ACLs from BucketArgs - if (bucketArgs.getAcls() != null) { - listOfAcls.addAll(bucketArgs.getAcls()); - } - // Link bucket default acl - if (bucketArgs.getSourceVolume() != null - && bucketArgs.getSourceBucket() != null) { - listOfAcls.add(linkBucketDefaultAcl()); - } - OmBucketInfo.Builder builder = OmBucketInfo.newBuilder(); builder.setVolumeName(volumeName) .setBucketName(bucketName) @@ -688,10 +653,19 @@ public void createBucket( .setSourceBucket(bucketArgs.getSourceBucket()) .setQuotaInBytes(bucketArgs.getQuotaInBytes()) .setQuotaInNamespace(bucketArgs.getQuotaInNamespace()) - .setAcls(listOfAcls.stream().distinct().collect(Collectors.toList())) .setBucketLayout(bucketLayout) .setOwner(owner); + if (bucketArgs.getAcls() != null) { + builder.setAcls(bucketArgs.getAcls()); + } + + // Link bucket default acl + if (bucketArgs.getSourceVolume() != null + && bucketArgs.getSourceBucket() != null) { + builder.addAcl(LINK_BUCKET_DEFAULT_ACL); + } + if (bek != null) { builder.setBucketEncryptionKey(bek); } @@ -752,17 +726,6 @@ private static void verifySpaceQuota(long quota) throws OMException { } } - /** - * Helper function to get default acl list for current user. - * - * @return listOfAcls - * */ - private List getAclList() { - UserGroupInformation realUserInfo = getRealUserInfo(); - return OzoneAclUtil.getAclList(realUserInfo.getUserName(), - realUserInfo.getGroupNames(), userRights, groupRights); - } - /** * Helper function to get the actual operating user. * @@ -778,16 +741,6 @@ private UserGroupInformation getRealUserInfo() { return ugi; } - /** - * Link bucket default acl defined [world::rw] - * which is similar to Linux POSIX symbolic. - * - * @return OzoneAcl - */ - private OzoneAcl linkBucketDefaultAcl() { - return new OzoneAcl(ACLIdentityType.WORLD, "", ACCESS, READ, WRITE); - } - /** * Get a valid Delegation Token. * @@ -1427,7 +1380,6 @@ public OzoneOutputStream createKey( .setReplicationConfig(replicationConfig) .addAllMetadataGdpr(metadata) .addAllTags(tags) - .setAcls(getAclList()) .setLatestVersionLocation(getLatestVersionLocation) .setOwnerName(ownerName); @@ -1536,7 +1488,6 @@ public OzoneDataStreamOutput createStreamKey( .addAllMetadataGdpr(metadata) .addAllTags(tags) .setSortDatanodesInPipeline(true) - .setAcls(getAclList()) .setOwnerName(ownerName); OpenKeySession openKey = ozoneManagerClient.openKey(builder.build()); @@ -1955,7 +1906,6 @@ public OmMultipartInfo initiateMultipartUpload(String volumeName, .setBucketName(bucketName) .setKeyName(keyName) .setReplicationConfig(replicationConfig) - .setAcls(getAclList()) .addAllMetadataGdpr(metadata) .setOwnerName(ownerName) .addAllTags(tags) @@ -1992,7 +1942,6 @@ private OpenKeySession newMultipartOpenKey( .setMultipartUploadID(uploadID) .setMultipartUploadPartNumber(partNumber) .setSortDatanodesInPipeline(sortDatanodesInPipeline) - .setAcls(getAclList()) .setOwnerName(ownerName) .build(); return ozoneManagerClient.openKey(keyArgs); @@ -2064,7 +2013,6 @@ public OmMultipartUploadCompleteInfo completeMultipartUpload( .setBucketName(bucketName) .setKeyName(keyName) .setMultipartUploadID(uploadID) - .setAcls(getAclList()) .setOwnerName(ownerName) .build(); @@ -2169,7 +2117,6 @@ public void createDirectory(String volumeName, String bucketName, OmKeyArgs keyArgs = new OmKeyArgs.Builder().setVolumeName(volumeName) .setBucketName(bucketName) .setKeyName(keyName) - .setAcls(getAclList()) .setOwnerName(ownerName) .build(); ozoneManagerClient.createDirectory(keyArgs); @@ -2250,7 +2197,6 @@ public OzoneOutputStream createFile(String volumeName, String bucketName, .setKeyName(keyName) .setDataSize(size) .setReplicationConfig(replicationConfig) - .setAcls(getAclList()) .setLatestVersionLocation(getLatestVersionLocation) .setOwnerName(ownerName) .build(); @@ -2282,7 +2228,6 @@ public OzoneDataStreamOutput createStreamFile(String volumeName, .setKeyName(keyName) .setDataSize(size) .setReplicationConfig(replicationConfig) - .setAcls(getAclList()) .setLatestVersionLocation(getLatestVersionLocation) .setSortDatanodesInPipeline(true) .setOwnerName(ownerName) @@ -2727,6 +2672,61 @@ public void recoverKey(OmKeyArgs args, long clientID) throws IOException { ozoneManagerClient.recoverKey(args, clientID); } + @Override + public Map getObjectTagging(String volumeName, String bucketName, String keyName) + throws IOException { + if (omVersion.compareTo(OzoneManagerVersion.S3_OBJECT_TAGGING_API) < 0) { + throw new IOException("OzoneManager does not support S3 object tagging API"); + } + + verifyVolumeName(volumeName); + verifyBucketName(bucketName); + Preconditions.checkNotNull(keyName); + OmKeyArgs keyArgs = new OmKeyArgs.Builder() + .setVolumeName(volumeName) + .setBucketName(bucketName) + .setKeyName(keyName) + .build(); + return ozoneManagerClient.getObjectTagging(keyArgs); + } + + @Override + public void putObjectTagging(String volumeName, String bucketName, + String keyName, Map tags) throws IOException { + if (omVersion.compareTo(OzoneManagerVersion.S3_OBJECT_TAGGING_API) < 0) { + throw new IOException("OzoneManager does not support S3 object tagging API"); + } + + verifyVolumeName(volumeName); + verifyBucketName(bucketName); + Preconditions.checkNotNull(keyName); + OmKeyArgs keyArgs = new OmKeyArgs.Builder() + .setVolumeName(volumeName) + .setBucketName(bucketName) + .setKeyName(keyName) + .addAllTags(tags) + .build(); + ozoneManagerClient.putObjectTagging(keyArgs); + } + + @Override + public void deleteObjectTagging(String volumeName, String bucketName, + String keyName) throws IOException { + if (omVersion.compareTo(OzoneManagerVersion.S3_OBJECT_TAGGING_API) < 0) { + throw new IOException("OzoneManager does not support S3 object tagging API"); + } + + verifyVolumeName(volumeName); + verifyBucketName(bucketName); + Preconditions.checkNotNull(keyName); + OmKeyArgs keyArgs = new OmKeyArgs.Builder() + .setVolumeName(volumeName) + .setBucketName(bucketName) + .setKeyName(keyName) + .build(); + ozoneManagerClient.deleteObjectTagging(keyArgs); + } + private static ExecutorService createThreadPoolExecutor( int corePoolSize, int maximumPoolSize, String threadNameFormat) { return new ThreadPoolExecutor(corePoolSize, maximumPoolSize, diff --git a/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/TestBlockOutputStreamIncrementalPutBlock.java b/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/TestBlockOutputStreamIncrementalPutBlock.java index 361dcb1fd0a..0db67441fb5 100644 --- a/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/TestBlockOutputStreamIncrementalPutBlock.java +++ b/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/TestBlockOutputStreamIncrementalPutBlock.java @@ -73,8 +73,12 @@ private void init(boolean incrementalChunkList) throws IOException { ((InMemoryConfiguration) config).setBoolean( OzoneConfigKeys.OZONE_HBASE_ENHANCEMENTS_ALLOWED, true); + ((InMemoryConfiguration) config).setBoolean( + "ozone.client.hbase.enhancements.allowed", true); ((InMemoryConfiguration) config).setBoolean( OzoneConfigKeys.OZONE_FS_HSYNC_ENABLED, true); + ((InMemoryConfiguration) config).setInt( + "ozone.client.bytes.per.checksum", 8192); RpcClient rpcClient = new RpcClient(config, null) { diff --git a/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/TestOzoneClient.java b/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/TestOzoneClient.java index 09a6c0a5c0e..e03fa461cc6 100644 --- a/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/TestOzoneClient.java +++ b/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/TestOzoneClient.java @@ -117,7 +117,7 @@ public void testDeleteVolume() @Test public void testCreateVolumeWithMetadata() - throws IOException, OzoneClientException { + throws IOException { String volumeName = UUID.randomUUID().toString(); VolumeArgs volumeArgs = VolumeArgs.newBuilder() .addMetadata("key1", "val1") diff --git a/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/TestOzoneECClient.java b/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/TestOzoneECClient.java index 25a3ad2d9c8..1b67f024bbe 100644 --- a/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/TestOzoneECClient.java +++ b/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/TestOzoneECClient.java @@ -153,7 +153,7 @@ public void testPutECKeyAndCheckDNStoredData() throws IOException { Map storages = factoryStub.getStorages(); DatanodeDetails[] dnDetails = - storages.keySet().toArray(new DatanodeDetails[storages.size()]); + storages.keySet().toArray(new DatanodeDetails[0]); Arrays.sort(dnDetails); for (int i = 0; i < inputChunks.length; i++) { MockDatanodeStorage datanodeStorage = storages.get(dnDetails[i]); @@ -182,7 +182,7 @@ public void testPutECKeyAndCheckParityData() throws IOException { Map storages = factoryStub.getStorages(); DatanodeDetails[] dnDetails = - storages.keySet().toArray(new DatanodeDetails[storages.size()]); + storages.keySet().toArray(new DatanodeDetails[0]); Arrays.sort(dnDetails); for (int i = dataBlocks; i < parityBlocks + dataBlocks; i++) { diff --git a/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/checksum/TestReplicatedFileChecksumHelper.java b/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/checksum/TestFileChecksumHelper.java similarity index 74% rename from hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/checksum/TestReplicatedFileChecksumHelper.java rename to hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/checksum/TestFileChecksumHelper.java index 702a450ee75..83feb378c56 100644 --- a/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/checksum/TestReplicatedFileChecksumHelper.java +++ b/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/checksum/TestFileChecksumHelper.java @@ -21,7 +21,9 @@ import org.apache.hadoop.fs.MD5MD5CRC32FileChecksum; import org.apache.hadoop.fs.MD5MD5CRC32GzipFileChecksum; import org.apache.hadoop.hdds.client.BlockID; +import org.apache.hadoop.hdds.client.ECReplicationConfig; import org.apache.hadoop.hdds.client.RatisReplicationConfig; +import org.apache.hadoop.hdds.client.ReplicationConfig; import org.apache.hadoop.hdds.client.ReplicationType; import org.apache.hadoop.hdds.conf.ConfigurationSource; import org.apache.hadoop.hdds.conf.InMemoryConfiguration; @@ -56,10 +58,11 @@ import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.EnumSource; import java.io.IOException; import java.util.ArrayList; -import java.util.Arrays; import java.util.Collections; import java.util.HashMap; import java.util.List; @@ -77,9 +80,10 @@ import static org.mockito.Mockito.mock; /** - * Unit tests for ReplicatedFileChecksumHelper class. + * Unit tests for Replicated and EC FileChecksumHelper class. */ -public class TestReplicatedFileChecksumHelper { +public class TestFileChecksumHelper { + private final FileChecksum noCachedChecksum = null; private OzoneClient client; private ObjectStore store; private OzoneVolume volume; @@ -119,128 +123,126 @@ public void close() throws IOException { client.close(); } + private OmKeyInfo omKeyInfo(ReplicationType type, FileChecksum cachedChecksum, List locationInfo) { + ReplicationConfig config = type == ReplicationType.EC ? new ECReplicationConfig(6, 3) + : RatisReplicationConfig.getInstance(HddsProtos.ReplicationFactor.ONE); - @Test - public void testEmptyBlock() throws IOException { - // test the file checksum of a file with an empty block. - RpcClient mockRpcClient = mock(RpcClient.class); - - OzoneManagerProtocol om = mock(OzoneManagerProtocol.class); - when(mockRpcClient.getOzoneManagerClient()).thenReturn(om); - - OmKeyInfo omKeyInfo = new OmKeyInfo.Builder() + return new OmKeyInfo.Builder() .setVolumeName(null) .setBucketName(null) .setKeyName(null) .setOmKeyLocationInfos(Collections.singletonList( - new OmKeyLocationInfoGroup(0, new ArrayList<>()))) + new OmKeyLocationInfoGroup(0, locationInfo))) .setCreationTime(Time.now()) .setModificationTime(Time.now()) .setDataSize(0) - .setReplicationConfig(RatisReplicationConfig.getInstance( - HddsProtos.ReplicationFactor.ONE)) + .setReplicationConfig(config) .setFileEncryptionInfo(null) + .setFileChecksum(cachedChecksum) .setAcls(null) .build(); + } - when(om.lookupKey(any())).thenReturn(omKeyInfo); + private BaseFileChecksumHelper checksumHelper(ReplicationType type, OzoneVolume mockVolume, OzoneBucket mockBucket, + int length, OzoneClientConfig.ChecksumCombineMode combineMode, RpcClient mockRpcClient, OmKeyInfo keyInfo) + throws IOException { + return type == ReplicationType.RATIS ? new ReplicatedFileChecksumHelper( + mockVolume, mockBucket, "dummy", length, combineMode, mockRpcClient) + : new ECFileChecksumHelper( + mockVolume, mockBucket, "dummy", length, combineMode, mockRpcClient, keyInfo); + } - OzoneVolume mockVolume = mock(OzoneVolume.class); - when(mockVolume.getName()).thenReturn("vol1"); - OzoneBucket bucket = mock(OzoneBucket.class); - when(bucket.getName()).thenReturn("bucket1"); + private Pipeline pipeline(ReplicationType type, List datanodeDetails) { + ReplicationConfig config = type == ReplicationType.RATIS ? RatisReplicationConfig + .getInstance(HddsProtos.ReplicationFactor.THREE) + : new ECReplicationConfig(6, 3); + + return Pipeline.newBuilder() + .setId(PipelineID.randomId()) + .setReplicationConfig(config) + .setState(Pipeline.PipelineState.CLOSED) + .setNodes(datanodeDetails) + .build(); + } + @ParameterizedTest + @EnumSource(names = {"EC", "RATIS"}) + public void testEmptyBlock(ReplicationType helperType) throws IOException { + // test the file checksum of a file with an empty block. + RpcClient mockRpcClient = mock(RpcClient.class); + OmKeyInfo omKeyInfo = omKeyInfo(helperType, noCachedChecksum, new ArrayList<>()); + OzoneVolume mockVolume = mock(OzoneVolume.class); + OzoneBucket mockBucket = mock(OzoneBucket.class); OzoneClientConfig.ChecksumCombineMode combineMode = OzoneClientConfig.ChecksumCombineMode.MD5MD5CRC; - ReplicatedFileChecksumHelper helper = new ReplicatedFileChecksumHelper( - mockVolume, bucket, "dummy", 10, combineMode, mockRpcClient); + OzoneManagerProtocol om = mock(OzoneManagerProtocol.class); + when(mockRpcClient.getOzoneManagerClient()).thenReturn(om); + when(om.lookupKey(any())).thenReturn(omKeyInfo); + when(mockVolume.getName()).thenReturn("vol1"); + when(mockBucket.getName()).thenReturn("bucket1"); + + + BaseFileChecksumHelper helper = + checksumHelper(helperType, mockVolume, mockBucket, 10, combineMode, mockRpcClient, omKeyInfo); helper.compute(); FileChecksum fileChecksum = helper.getFileChecksum(); assertInstanceOf(MD5MD5CRC32GzipFileChecksum.class, fileChecksum); assertEquals(DataChecksum.Type.CRC32, - ((MD5MD5CRC32GzipFileChecksum)fileChecksum).getCrcType()); + ((MD5MD5CRC32GzipFileChecksum) fileChecksum).getCrcType()); // test negative length - helper = new ReplicatedFileChecksumHelper( - mockVolume, bucket, "dummy", -1, combineMode, mockRpcClient); + helper = + checksumHelper(helperType, mockVolume, mockBucket, -1, combineMode, mockRpcClient, omKeyInfo); helper.compute(); assertNull(helper.getKeyLocationInfoList()); } - @Test - public void testOneBlock() throws IOException { + @ParameterizedTest + @EnumSource(names = {"EC", "RATIS"}) + public void testOneBlock(ReplicationType helperType) throws IOException { // test the file checksum of a file with one block. OzoneConfiguration conf = new OzoneConfiguration(); - RpcClient mockRpcClient = mock(RpcClient.class); - - List dns = Arrays.asList( + List dns = Collections.singletonList( DatanodeDetails.newBuilder().setUuid(UUID.randomUUID()).build()); - Pipeline pipeline; - pipeline = Pipeline.newBuilder() - .setId(PipelineID.randomId()) - .setReplicationConfig( - RatisReplicationConfig - .getInstance(HddsProtos.ReplicationFactor.THREE)) - .setState(Pipeline.PipelineState.CLOSED) - .setNodes(dns) - .build(); - + Pipeline pipeline = pipeline(helperType, dns); + BlockID blockID = new BlockID(1, 1); + OmKeyLocationInfo omKeyLocationInfo = + new OmKeyLocationInfo.Builder() + .setPipeline(pipeline) + .setBlockID(blockID) + .build(); + List omKeyLocationInfoList = + Collections.singletonList(omKeyLocationInfo); + OmKeyInfo omKeyInfo = omKeyInfo(helperType, noCachedChecksum, omKeyLocationInfoList); XceiverClientGrpc xceiverClientGrpc = new XceiverClientGrpc(pipeline, conf) { @Override public XceiverClientReply sendCommandAsync( ContainerProtos.ContainerCommandRequestProto request, DatanodeDetails dn) { - return buildValidResponse(); + return buildValidResponse(helperType); } }; XceiverClientFactory factory = mock(XceiverClientFactory.class); + OzoneManagerProtocol om = mock(OzoneManagerProtocol.class); when(factory.acquireClientForReadData(any())). thenReturn(xceiverClientGrpc); - when(mockRpcClient.getXceiverClientManager()).thenReturn(factory); - - OzoneManagerProtocol om = mock(OzoneManagerProtocol.class); when(mockRpcClient.getOzoneManagerClient()).thenReturn(om); - - BlockID blockID = new BlockID(1, 1); - OmKeyLocationInfo omKeyLocationInfo = - new OmKeyLocationInfo.Builder().setPipeline(pipeline) - .setBlockID(blockID) - .build(); - - List omKeyLocationInfoList = - Arrays.asList(omKeyLocationInfo); - - OmKeyInfo omKeyInfo = new OmKeyInfo.Builder() - .setVolumeName(null) - .setBucketName(null) - .setKeyName(null) - .setOmKeyLocationInfos(Collections.singletonList( - new OmKeyLocationInfoGroup(0, omKeyLocationInfoList))) - .setCreationTime(Time.now()) - .setModificationTime(Time.now()) - .setDataSize(0) - .setReplicationConfig(RatisReplicationConfig.getInstance( - HddsProtos.ReplicationFactor.ONE)) - .setFileEncryptionInfo(null) - .setAcls(null) - .build(); - when(om.lookupKey(any())).thenReturn(omKeyInfo); OzoneVolume mockVolume = mock(OzoneVolume.class); when(mockVolume.getName()).thenReturn("vol1"); - OzoneBucket bucket = mock(OzoneBucket.class); - when(bucket.getName()).thenReturn("bucket1"); + OzoneBucket mockBucket = mock(OzoneBucket.class); + when(mockBucket.getName()).thenReturn("bucket1"); OzoneClientConfig.ChecksumCombineMode combineMode = OzoneClientConfig.ChecksumCombineMode.MD5MD5CRC; - ReplicatedFileChecksumHelper helper = new ReplicatedFileChecksumHelper( - mockVolume, bucket, "dummy", 10, combineMode, mockRpcClient); + BaseFileChecksumHelper helper = checksumHelper( + helperType, mockVolume, mockBucket, 10, combineMode, mockRpcClient, omKeyInfo); helper.compute(); FileChecksum fileChecksum = helper.getFileChecksum(); @@ -249,28 +251,12 @@ public XceiverClientReply sendCommandAsync( FileChecksum cachedChecksum = new MD5MD5CRC32GzipFileChecksum(); /// test cached checksum - OmKeyInfo omKeyInfoWithChecksum = new OmKeyInfo.Builder() - .setVolumeName(null) - .setBucketName(null) - .setKeyName(null) - .setOmKeyLocationInfos(Collections.singletonList( - new OmKeyLocationInfoGroup(0, omKeyLocationInfoList))) - .setCreationTime(Time.now()) - .setModificationTime(Time.now()) - .setDataSize(0) - .setReplicationConfig( - RatisReplicationConfig.getInstance( - HddsProtos.ReplicationFactor.ONE)) - .setFileEncryptionInfo(null) - .setAcls(null) - .setFileChecksum(cachedChecksum) - .build(); + OmKeyInfo omKeyInfoWithChecksum = omKeyInfo(helperType, cachedChecksum, omKeyLocationInfoList); when(om.lookupKey(any())). thenReturn(omKeyInfoWithChecksum); - helper = new ReplicatedFileChecksumHelper( - mockVolume, bucket, "dummy", 10, combineMode, - mockRpcClient); + helper = checksumHelper( + helperType, mockVolume, mockBucket, 10, combineMode, mockRpcClient, omKeyInfo); helper.compute(); fileChecksum = helper.getFileChecksum(); @@ -278,7 +264,7 @@ public XceiverClientReply sendCommandAsync( assertEquals(1, helper.getKeyLocationInfoList().size()); } - private XceiverClientReply buildValidResponse() { + private XceiverClientReply buildValidResponse(ReplicationType type) { // return a GetBlockResponse message of a block and its chunk checksums. ContainerProtos.DatanodeBlockID blockID = ContainerProtos.DatanodeBlockID.newBuilder() @@ -286,7 +272,7 @@ private XceiverClientReply buildValidResponse() { .setLocalID(1) .setBlockCommitSequenceId(1).build(); - byte[] byteArray = new byte[10]; + byte[] byteArray = new byte[12]; ByteString byteString = ByteString.copyFrom(byteArray); ContainerProtos.ChecksumData checksumData = @@ -296,13 +282,17 @@ private XceiverClientReply buildValidResponse() { .addChecksums(byteString) .build(); - ContainerProtos.ChunkInfo chunkInfo = - ContainerProtos.ChunkInfo.newBuilder() + ContainerProtos.ChunkInfo.Builder chunkInfoBuilder = ContainerProtos.ChunkInfo.newBuilder() .setChunkName("dummy_chunk") .setOffset(1) .setLen(10) - .setChecksumData(checksumData) - .build(); + .setChecksumData(checksumData); + + if (type == ReplicationType.EC) { + chunkInfoBuilder.setStripeChecksum(byteString); + } + + ContainerProtos.ChunkInfo chunkInfo = chunkInfoBuilder.build(); ContainerProtos.BlockData blockData = ContainerProtos.BlockData.newBuilder() @@ -337,6 +327,7 @@ private OzoneBucket getOzoneBucket() throws IOException { /** * Write a real key and compute file checksum of it. + * * @throws IOException */ @Test diff --git a/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/io/TestECBlockOutputStreamEntry.java b/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/io/TestECBlockOutputStreamEntry.java index 718e724e585..10f90544de0 100644 --- a/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/io/TestECBlockOutputStreamEntry.java +++ b/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/io/TestECBlockOutputStreamEntry.java @@ -126,7 +126,7 @@ private DatanodeDetails aNode(String ip, String hostName, int port) { .setIpAddress(ip) .setHostName(hostName) .addPort( - DatanodeDetails.newPort(DatanodeDetails.Port.Name.STANDALONE, port)) + DatanodeDetails.newStandalonePort(port)) .build(); } } diff --git a/hadoop-ozone/common/pom.xml b/hadoop-ozone/common/pom.xml index bd16a0a5dfe..f7f60dcd1d1 100644 --- a/hadoop-ozone/common/pom.xml +++ b/hadoop-ozone/common/pom.xml @@ -20,10 +20,10 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> org.apache.ozone ozone - 1.5.0-SNAPSHOT + 2.0.0-SNAPSHOT ozone-common - 1.5.0-SNAPSHOT + 2.0.0-SNAPSHOT Apache Ozone Common Apache Ozone Common jar diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/fs/ozone/OzoneTrashPolicy.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/fs/ozone/OzoneTrashPolicy.java new file mode 100644 index 00000000000..a250832215b --- /dev/null +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/fs/ozone/OzoneTrashPolicy.java @@ -0,0 +1,208 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.fs.ozone; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileAlreadyExistsException; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.InvalidPathException; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.TrashPolicyDefault; +import org.apache.hadoop.fs.permission.FsAction; +import org.apache.hadoop.fs.permission.FsPermission; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.ozone.OFSPath; +import org.apache.hadoop.ozone.OzoneConsts; +import org.apache.hadoop.ozone.om.OMConfigKeys; +import org.apache.hadoop.ozone.om.helpers.OzoneFSUtils; +import org.apache.hadoop.util.Time; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.IOException; + +import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_TRASH_INTERVAL_DEFAULT; +import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_TRASH_INTERVAL_KEY; + + +/** + * TrashPolicy for Ozone Specific Trash Operations. + */ +public class OzoneTrashPolicy extends TrashPolicyDefault { + + private static final Logger LOG = + LoggerFactory.getLogger(OzoneTrashPolicy.class); + + protected static final Path CURRENT = new Path("Current"); + + protected static final int MSECS_PER_MINUTE = 60 * 1000; + + private static final FsPermission PERMISSION = + new FsPermission(FsAction.ALL, FsAction.NONE, FsAction.NONE); + private OzoneConfiguration ozoneConfiguration; + + public OzoneConfiguration getOzoneConfiguration() { + return ozoneConfiguration; + } + + @Override + public void initialize(Configuration conf, FileSystem fs) { + this.fs = fs; + ozoneConfiguration = OzoneConfiguration.of(conf); + float hadoopTrashInterval = conf.getFloat( + FS_TRASH_INTERVAL_KEY, FS_TRASH_INTERVAL_DEFAULT); + // check whether user has configured ozone specific trash-interval + // if not fall back to hadoop configuration + this.deletionInterval = (long)(conf.getFloat( + OMConfigKeys.OZONE_FS_TRASH_INTERVAL_KEY, hadoopTrashInterval) + * MSECS_PER_MINUTE); + } + + @Override + public boolean moveToTrash(Path path) throws IOException { + if (validatePath(path)) { + if (!isEnabled()) { + return false; + } + + if (!path.isAbsolute()) { // make path absolute + path = new Path(fs.getWorkingDirectory(), path); + } + + // check that path exists + fs.getFileStatus(path); + String qpath = fs.makeQualified(path).toString(); + + Path trashRoot = fs.getTrashRoot(path); + Path trashCurrent = new Path(trashRoot, CURRENT); + if (qpath.startsWith(trashRoot.toString())) { + return false; // already in trash + } + + if (trashRoot.getParent().toString().startsWith(qpath)) { + throw new IOException("Cannot move \"" + path + + "\" to the trash, as it contains the trash"); + } + + Path trashPath; + Path baseTrashPath; + if (fs.getUri().getScheme().equals(OzoneConsts.OZONE_OFS_URI_SCHEME)) { + OFSPath ofsPath = new OFSPath(path, ozoneConfiguration); + // trimming volume and bucket in order to be compatible with o3fs + // Also including volume and bucket name in the path is redundant as + // the key is already in a particular volume and bucket. + Path trimmedVolumeAndBucket = + new Path(OzoneConsts.OZONE_URI_DELIMITER + + ofsPath.getKeyName()); + trashPath = makeTrashRelativePath(trashCurrent, trimmedVolumeAndBucket); + baseTrashPath = makeTrashRelativePath(trashCurrent, + trimmedVolumeAndBucket.getParent()); + } else { + trashPath = makeTrashRelativePath(trashCurrent, path); + baseTrashPath = makeTrashRelativePath(trashCurrent, path.getParent()); + } + + IOException cause = null; + + // try twice, in case checkpoint between the mkdirs() & rename() + for (int i = 0; i < 2; i++) { + try { + if (!fs.mkdirs(baseTrashPath, PERMISSION)) { // create current + LOG.warn("Can't create(mkdir) trash directory: " + baseTrashPath); + return false; + } + } catch (FileAlreadyExistsException e) { + // find the path which is not a directory, and modify baseTrashPath + // & trashPath, then mkdirs + Path existsFilePath = baseTrashPath; + while (!fs.exists(existsFilePath)) { + existsFilePath = existsFilePath.getParent(); + } + baseTrashPath = new Path(baseTrashPath.toString() + .replace(existsFilePath.toString(), + existsFilePath.toString() + Time.now())); + trashPath = new Path(baseTrashPath, trashPath.getName()); + // retry, ignore current failure + --i; + continue; + } catch (IOException e) { + LOG.warn("Can't create trash directory: " + baseTrashPath, e); + cause = e; + break; + } + try { + // if the target path in Trash already exists, then append with + // a current time in millisecs. + String orig = trashPath.toString(); + + while (fs.exists(trashPath)) { + trashPath = new Path(orig + Time.now()); + } + + // move to current trash + boolean renamed = fs.rename(path, trashPath); + if (!renamed) { + LOG.error("Failed to move to trash: {}", path); + throw new IOException("Failed to move to trash: " + path); + } + LOG.info("Moved: '" + path + "' to trash at: " + trashPath); + return true; + } catch (IOException e) { + cause = e; + } + } + throw (IOException) new IOException("Failed to move to trash: " + path) + .initCause(cause); + } + return false; + } + + private boolean validatePath(Path path) throws IOException { + String key = path.toUri().getPath(); + // Check to see if bucket is path item to be deleted. + // Cannot moveToTrash if bucket is deleted, + // return error for this condition + OFSPath ofsPath = new OFSPath(key.substring(1), ozoneConfiguration); + if (path.isRoot() || ofsPath.isBucket()) { + throw new IOException("Recursive rm of bucket " + + path + " not permitted"); + } + + Path trashRoot = this.fs.getTrashRoot(path); + + LOG.debug("Key path to moveToTrash: {}", key); + String trashRootKey = trashRoot.toUri().getPath(); + LOG.debug("TrashrootKey for moveToTrash: {}", trashRootKey); + + if (!OzoneFSUtils.isValidName(key)) { + throw new InvalidPathException("Invalid path Name " + key); + } + // first condition tests when length key is <= length trash + // and second when length key > length trash + if ((key.contains(this.fs.TRASH_PREFIX)) && (trashRootKey.startsWith(key)) + || key.startsWith(trashRootKey)) { + return false; + } + return true; + } + + private Path makeTrashRelativePath(Path basePath, Path rmFilePath) { + return Path.mergePaths(basePath, rmFilePath); + } + +} diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/fs/ozone/package-info.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/fs/ozone/package-info.java new file mode 100644 index 00000000000..17803f7af06 --- /dev/null +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/fs/ozone/package-info.java @@ -0,0 +1,20 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + *

    + * http://www.apache.org/licenses/LICENSE-2.0 + *

    + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ +package org.apache.hadoop.fs.ozone; +/** + * Ozone trash policy implementation. + */ diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OFSPath.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OFSPath.java index c5985f82093..b58e1021d98 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OFSPath.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OFSPath.java @@ -301,6 +301,19 @@ public boolean isSnapshotPath() { return false; } + /** + * If the path is a snapshot path get the snapshot name from the key name. + */ + public String getSnapshotName() { + if (keyName.startsWith(OM_SNAPSHOT_INDICATOR)) { + if (!bucketName.isEmpty() && !volumeName.isEmpty()) { + String[] keyNames = keyName.split(OZONE_URI_DELIMITER); + return keyNames.length > 1 ? keyNames[1] : null; + } + } + return null; + } + /** * If key name is not empty, the given path is a key. * e.g. /volume1/bucket2/key3 is a key. diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OmUtils.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OmUtils.java index d6320061253..8d24f2de155 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OmUtils.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OmUtils.java @@ -278,6 +278,7 @@ public static boolean isReadOnly( case SetSafeMode: case PrintCompactionLogDag: case GetSnapshotInfo: + case GetObjectTagging: case GetQuotaRepairStatus: case StartQuotaRepair: return true; @@ -339,6 +340,8 @@ public static boolean isReadOnly( case AbortExpiredMultiPartUploads: case SetSnapshotProperty: case QuotaRepair: + case PutObjectTagging: + case DeleteObjectTagging: case UnknownCommand: return false; case EchoRPC: @@ -767,7 +770,7 @@ public static String normalizeKey(String keyName, normalizedKeyName = new Path(OM_KEY_PREFIX + keyName) .toUri().getPath(); } - if (!keyName.equals(normalizedKeyName) && LOG.isDebugEnabled()) { + if (LOG.isDebugEnabled() && !keyName.equals(normalizedKeyName)) { LOG.debug("Normalized key {} to {} ", keyName, normalizedKeyName.substring(1)); } diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OzoneAcl.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OzoneAcl.java index 26693d19c64..e2b2f61a368 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OzoneAcl.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OzoneAcl.java @@ -27,6 +27,7 @@ import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OzoneAclInfo; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OzoneAclInfo.OzoneAclScope; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OzoneAclInfo.OzoneAclType; +import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer; import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLIdentityType; import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLType; import org.apache.ratis.util.MemoizedSupplier; @@ -41,8 +42,11 @@ import java.util.function.IntFunction; import java.util.function.Supplier; +import static org.apache.hadoop.ozone.OzoneAcl.AclScope.ACCESS; import static org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLType.ALL; import static org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLType.NONE; +import static org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLType.READ; +import static org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLType.WRITE; /** * OzoneACL classes define bucket ACLs used in OZONE. @@ -58,6 +62,13 @@ public class OzoneAcl { private static final String ACL_SCOPE_REGEX = ".*\\[(ACCESS|DEFAULT)\\]"; + /** + * Link bucket default acl defined [world::rw] + * which is similar to Linux POSIX symbolic. + */ + public static final OzoneAcl LINK_BUCKET_DEFAULT_ACL = + new OzoneAcl(IAccessAuthorizer.ACLIdentityType.WORLD, "", ACCESS, READ, WRITE); + private final ACLIdentityType type; private final String name; @JsonIgnore diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/IOmMetadataReader.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/IOmMetadataReader.java index 7d4e769365f..99e2759117e 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/IOmMetadataReader.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/IOmMetadataReader.java @@ -30,6 +30,7 @@ import java.io.IOException; import java.util.List; +import java.util.Map; /** * Protocol for OmMetadataReader's. @@ -165,4 +166,11 @@ ListKeysLightResult listKeysLight(String volumeName, String bucketName, * @throws IOException if there is error. */ List getAcl(OzoneObj obj) throws IOException; + + /** + * Gets the tags for the specified key. + * @param args Key args + * @return Tags associated with the key. + */ + Map getObjectTagging(OmKeyArgs args) throws IOException; } diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/OMConfigKeys.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/OMConfigKeys.java index a77bc4f5304..880fe8614b2 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/OMConfigKeys.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/OMConfigKeys.java @@ -421,6 +421,11 @@ private OMConfigKeys() { // resulting 24MB public static final int OZONE_PATH_DELETING_LIMIT_PER_TASK_DEFAULT = 6000; + public static final String OZONE_THREAD_NUMBER_DIR_DELETION = + "ozone.thread.number.dir.deletion"; + + public static final int OZONE_THREAD_NUMBER_DIR_DELETION_DEFAULT = 10; + public static final String SNAPSHOT_SST_DELETING_LIMIT_PER_TASK = "ozone.snapshot.filtering.limit.per.task"; public static final int SNAPSHOT_SST_DELETING_LIMIT_PER_TASK_DEFAULT = 2; @@ -625,4 +630,9 @@ private OMConfigKeys() { public static final String OZONE_OM_MAX_BUCKET = "ozone.om.max.buckets"; public static final int OZONE_OM_MAX_BUCKET_DEFAULT = 100000; + /** + * Configuration property to configure the max server side response size for list calls. + */ + public static final String OZONE_OM_SERVER_LIST_MAX_SIZE = "ozone.om.server.list.max.size"; + public static final int OZONE_OM_SERVER_LIST_MAX_SIZE_DEFAULT = 1000; } diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/ha/GrpcOMFailoverProxyProvider.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/ha/GrpcOMFailoverProxyProvider.java index 65d9e559005..744ada797e7 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/ha/GrpcOMFailoverProxyProvider.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/ha/GrpcOMFailoverProxyProvider.java @@ -18,12 +18,9 @@ package org.apache.hadoop.ozone.om.ha; import io.grpc.Status; -import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdds.conf.ConfigurationException; import org.apache.hadoop.hdds.conf.ConfigurationSource; import org.apache.hadoop.hdds.HddsUtils; -import org.apache.hadoop.hdds.utils.LegacyHadoopConfigurationSource; -import org.apache.hadoop.ipc.RPC; import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.ozone.OmUtils; import org.apache.hadoop.ozone.OzoneConsts; @@ -41,6 +38,7 @@ import java.util.Optional; import java.util.OptionalInt; import io.grpc.StatusRuntimeException; +import org.apache.hadoop.security.UserGroupInformation; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -60,9 +58,10 @@ public class GrpcOMFailoverProxyProvider extends LoggerFactory.getLogger(GrpcOMFailoverProxyProvider.class); public GrpcOMFailoverProxyProvider(ConfigurationSource configuration, + UserGroupInformation ugi, String omServiceId, Class protocol) throws IOException { - super(configuration, omServiceId, protocol); + super(configuration, ugi, omServiceId, protocol); } @Override @@ -116,9 +115,7 @@ protected void loadOMClientConfigs(ConfigurationSource config, String omSvcId) private T createOMProxy() throws IOException { InetSocketAddress addr = new InetSocketAddress(0); - Configuration hadoopConf = - LegacyHadoopConfigurationSource.asHadoopConfiguration(getConf()); - return (T) RPC.getProxy(getInterface(), 0, addr, hadoopConf); + return createOMProxy(addr); } /** diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/ha/HadoopRpcOMFailoverProxyProvider.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/ha/HadoopRpcOMFailoverProxyProvider.java index 543d2e4aed3..4447a72ab13 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/ha/HadoopRpcOMFailoverProxyProvider.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/ha/HadoopRpcOMFailoverProxyProvider.java @@ -29,15 +29,9 @@ import java.util.List; import java.util.Map; -import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdds.conf.ConfigurationSource; -import org.apache.hadoop.hdds.utils.LegacyHadoopConfigurationSource; import org.apache.hadoop.io.Text; -import org.apache.hadoop.io.retry.RetryPolicies; -import org.apache.hadoop.io.retry.RetryPolicy; -import org.apache.hadoop.ipc.ProtobufRpcEngine; import org.apache.hadoop.ipc.RPC; -import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.ozone.OmUtils; import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.ha.ConfUtils; @@ -59,9 +53,7 @@ public class HadoopRpcOMFailoverProxyProvider extends public static final Logger LOG = LoggerFactory.getLogger(HadoopRpcOMFailoverProxyProvider.class); - private final long omVersion; private final Text delegationTokenService; - private final UserGroupInformation ugi; private Map omProxyInfos; private List retryExceptions = new ArrayList<>(); @@ -75,9 +67,7 @@ public HadoopRpcOMFailoverProxyProvider(ConfigurationSource configuration, UserGroupInformation ugi, String omServiceId, Class protocol) throws IOException { - super(configuration, omServiceId, protocol); - this.ugi = ugi; - this.omVersion = RPC.getProtocolVersion(protocol); + super(configuration, ugi, omServiceId, protocol); this.delegationTokenService = computeDelegationTokenService(); } @@ -130,24 +120,6 @@ protected void loadOMClientConfigs(ConfigurationSource config, String omSvcId) setOmNodeAddressMap(omNodeAddressMap); } - private T createOMProxy(InetSocketAddress omAddress) throws IOException { - Configuration hadoopConf = - LegacyHadoopConfigurationSource.asHadoopConfiguration(getConf()); - RPC.setProtocolEngine(hadoopConf, getInterface(), ProtobufRpcEngine.class); - - // FailoverOnNetworkException ensures that the IPC layer does not attempt - // retries on the same OM in case of connection exception. This retry - // policy essentially results in TRY_ONCE_THEN_FAIL. - RetryPolicy connectionRetryPolicy = RetryPolicies - .failoverOnNetworkException(0); - - return (T) RPC.getProtocolProxy(getInterface(), omVersion, - omAddress, ugi, hadoopConf, NetUtils.getDefaultSocketFactory( - hadoopConf), (int) OmUtils.getOMClientRpcTimeOut(getConf()), - connectionRetryPolicy).getProxy(); - - } - /** * Get the proxy object which should be used until the next failover event * occurs. RPC proxy object is intialized lazily. diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/ha/OMFailoverProxyProviderBase.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/ha/OMFailoverProxyProviderBase.java index 1a738b2ac84..5045a32bdcd 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/ha/OMFailoverProxyProviderBase.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/ha/OMFailoverProxyProviderBase.java @@ -21,17 +21,25 @@ import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Preconditions; import com.google.protobuf.ServiceException; +import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdds.HddsUtils; import org.apache.hadoop.hdds.conf.ConfigurationSource; +import org.apache.hadoop.hdds.utils.LegacyHadoopConfigurationSource; import org.apache.hadoop.io.retry.FailoverProxyProvider; +import org.apache.hadoop.io.retry.RetryPolicies; import org.apache.hadoop.io.retry.RetryPolicy; import org.apache.hadoop.io.retry.RetryPolicy.RetryAction.RetryDecision; +import org.apache.hadoop.ipc.ProtobufRpcEngine; +import org.apache.hadoop.ipc.RPC; import org.apache.hadoop.ipc.RemoteException; +import org.apache.hadoop.net.NetUtils; +import org.apache.hadoop.ozone.OmUtils; import org.apache.hadoop.ozone.OzoneConfigKeys; import org.apache.hadoop.ozone.om.exceptions.OMException; import org.apache.hadoop.ozone.om.exceptions.OMLeaderNotReadyException; import org.apache.hadoop.ozone.om.exceptions.OMNotLeaderException; import org.apache.hadoop.security.AccessControlException; +import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.token.SecretManager; import org.apache.ratis.protocol.exceptions.StateMachineException; import org.slf4j.Logger; @@ -85,13 +93,17 @@ public abstract class OMFailoverProxyProviderBase implements private Set accessControlExceptionOMs = new HashSet<>(); private boolean performFailoverDone; + private final UserGroupInformation ugi; + public OMFailoverProxyProviderBase(ConfigurationSource configuration, + UserGroupInformation ugi, String omServiceId, Class protocol) throws IOException { this.conf = configuration; this.protocolClass = protocol; this.performFailoverDone = true; this.omServiceId = omServiceId; + this.ugi = ugi; waitBetweenRetries = conf.getLong( OzoneConfigKeys.OZONE_CLIENT_WAIT_BETWEEN_RETRIES_MILLIS_KEY, @@ -112,6 +124,35 @@ protected abstract void loadOMClientConfigs(ConfigurationSource config, String omSvcId) throws IOException; + /** + * Get the protocol proxy for provided address. + * @param omAddress An instance of {@link InetSocketAddress} which contains the address to connect + * @return the proxy connection to the address and the set of methods supported by the server at the address + * @throws IOException if any error occurs while trying to get the proxy + */ + protected T createOMProxy(InetSocketAddress omAddress) throws IOException { + Configuration hadoopConf = + LegacyHadoopConfigurationSource.asHadoopConfiguration(getConf()); + + // TODO: Post upgrade to Protobuf 3.x we need to use ProtobufRpcEngine2 + RPC.setProtocolEngine(hadoopConf, getInterface(), ProtobufRpcEngine.class); + + // Ensure we do not attempt retry on the same OM in case of exceptions + RetryPolicy connectionRetryPolicy = RetryPolicies.failoverOnNetworkException(0); + + return (T) RPC.getProtocolProxy( + getInterface(), + RPC.getProtocolVersion(protocolClass), + omAddress, + ugi, + hadoopConf, + NetUtils.getDefaultSocketFactory(hadoopConf), + (int) OmUtils.getOMClientRpcTimeOut(getConf()), + connectionRetryPolicy + ).getProxy(); + } + + protected synchronized boolean shouldFailover(Exception ex) { Throwable unwrappedException = HddsUtils.getUnwrappedException(ex); if (unwrappedException instanceof AccessControlException || diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/BasicOmKeyInfo.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/BasicOmKeyInfo.java index a9fa742a108..82b9d8cccfb 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/BasicOmKeyInfo.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/BasicOmKeyInfo.java @@ -110,6 +110,10 @@ public String getOwnerName() { return ownerName; } + public long getReplicatedSize() { + return QuotaUtil.getReplicatedSize(getDataSize(), replicationConfig); + } + /** * Builder of BasicOmKeyInfo. */ diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/DeleteTenantState.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/DeleteTenantState.java index 5fe53ee1ea3..1ffae273f0f 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/DeleteTenantState.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/DeleteTenantState.java @@ -66,7 +66,6 @@ public static DeleteTenantState.Builder newBuilder() { /** * Builder for TenantDeleted. */ - @SuppressWarnings("checkstyle:hiddenfield") public static final class Builder { private String volumeName; private long volRefCount; diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/KeyValueUtil.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/KeyValueUtil.java index db07a19b211..0b9b4b38a51 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/KeyValueUtil.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/KeyValueUtil.java @@ -34,8 +34,8 @@ private KeyValueUtil() { /** * Parse Key,Value map data from protobuf representation. */ - public static Map getFromProtobuf(List metadata) { - return metadata.stream() + public static Map getFromProtobuf(List keyValueList) { + return keyValueList.stream() .collect(Collectors.toMap(KeyValue::getKey, KeyValue::getValue)); } diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmBucketInfo.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmBucketInfo.java index 5a83f6dbba6..42c97211c97 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmBucketInfo.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmBucketInfo.java @@ -48,7 +48,8 @@ public final class OmBucketInfo extends WithObjectID implements Auditable, CopyO private static final Codec CODEC = new DelegatedCodec<>( Proto2Codec.get(BucketInfo.getDefaultInstance()), OmBucketInfo::getFromProtobuf, - OmBucketInfo::getProtobuf); + OmBucketInfo::getProtobuf, + OmBucketInfo.class); public static Codec getCodec() { return CODEC; diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmDBAccessIdInfo.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmDBAccessIdInfo.java index 8ca0054b347..258aa1ace98 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmDBAccessIdInfo.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmDBAccessIdInfo.java @@ -34,6 +34,7 @@ public final class OmDBAccessIdInfo { Proto2Codec.get(ExtendedUserAccessIdInfo.getDefaultInstance()), OmDBAccessIdInfo::getFromProtobuf, OmDBAccessIdInfo::getProtobuf, + OmDBAccessIdInfo.class, DelegatedCodec.CopyType.SHALLOW); public static Codec getCodec() { @@ -111,7 +112,6 @@ public boolean getIsDelegatedAdmin() { /** * Builder for OmDBAccessIdInfo. */ - @SuppressWarnings("checkstyle:hiddenfield") public static final class Builder { private String tenantId; private String userPrincipal; diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmDBTenantState.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmDBTenantState.java index bb356eafdd9..9aaf04f640b 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmDBTenantState.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmDBTenantState.java @@ -34,6 +34,7 @@ public final class OmDBTenantState implements Comparable { Proto2Codec.get(TenantState.getDefaultInstance()), OmDBTenantState::getFromProtobuf, OmDBTenantState::getProtobuf, + OmDBTenantState.class, DelegatedCodec.CopyType.SHALLOW); public static Codec getCodec() { @@ -167,7 +168,6 @@ public static OmDBTenantState getFromProtobuf(TenantState proto) { /** * Builder for OmDBTenantState. */ - @SuppressWarnings("checkstyle:hiddenfield") public static final class Builder { private String tenantId; private String bucketNamespaceName; diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmDBUserPrincipalInfo.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmDBUserPrincipalInfo.java index 75b01a04171..a511e2cb047 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmDBUserPrincipalInfo.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmDBUserPrincipalInfo.java @@ -33,11 +33,11 @@ * principal. */ public final class OmDBUserPrincipalInfo { - private static final Codec CODEC - = new DelegatedCodec<>( - Proto2Codec.get(TenantUserPrincipalInfo.getDefaultInstance()), - OmDBUserPrincipalInfo::getFromProtobuf, - OmDBUserPrincipalInfo::getProtobuf); + private static final Codec CODEC = new DelegatedCodec<>( + Proto2Codec.get(TenantUserPrincipalInfo.getDefaultInstance()), + OmDBUserPrincipalInfo::getFromProtobuf, + OmDBUserPrincipalInfo::getProtobuf, + OmDBUserPrincipalInfo.class); public static Codec getCodec() { return CODEC; @@ -90,7 +90,6 @@ public static OmDBUserPrincipalInfo getFromProtobuf( /** * Builder for OmDBUserPrincipalInfo. */ - @SuppressWarnings("checkstyle:hiddenfield") public static final class Builder { private Set accessIds; diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmDirectoryInfo.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmDirectoryInfo.java index 55e138dbd10..69ed1b613bd 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmDirectoryInfo.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmDirectoryInfo.java @@ -40,7 +40,8 @@ public class OmDirectoryInfo extends WithParentObjectId private static final Codec CODEC = new DelegatedCodec<>( Proto2Codec.get(DirectoryInfo.getDefaultInstance()), OmDirectoryInfo::getFromProtobuf, - OmDirectoryInfo::getProtobuf); + OmDirectoryInfo::getProtobuf, + OmDirectoryInfo.class); public static Codec getCodec() { return CODEC; diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyInfo.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyInfo.java index 0507a27de61..5c480860d2b 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyInfo.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyInfo.java @@ -65,7 +65,8 @@ private static Codec newCodec(boolean ignorePipeline) { return new DelegatedCodec<>( Proto2Codec.get(KeyInfo.getDefaultInstance()), OmKeyInfo::getFromProtobuf, - k -> k.getProtobuf(ignorePipeline, ClientVersion.CURRENT_VERSION)); + k -> k.getProtobuf(ignorePipeline, ClientVersion.CURRENT_VERSION), + OmKeyInfo.class); } public static Codec getCodec(boolean ignorePipeline) { diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmMultipartKeyInfo.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmMultipartKeyInfo.java index 61402ee28e6..7c1e01d2ae5 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmMultipartKeyInfo.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmMultipartKeyInfo.java @@ -42,7 +42,8 @@ public final class OmMultipartKeyInfo extends WithObjectID implements CopyObject private static final Codec CODEC = new DelegatedCodec<>( Proto2Codec.get(MultipartKeyInfo.getDefaultInstance()), OmMultipartKeyInfo::getFromProto, - OmMultipartKeyInfo::getProto); + OmMultipartKeyInfo::getProto, + OmMultipartKeyInfo.class); public static Codec getCodec() { return CODEC; diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmRangerSyncArgs.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmRangerSyncArgs.java index fff6f38a37f..a09d5ef0902 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmRangerSyncArgs.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmRangerSyncArgs.java @@ -45,7 +45,6 @@ public static OmRangerSyncArgs.Builder newBuilder() { /** * Builder for OmRangerSyncArgs. */ - @SuppressWarnings("checkstyle:hiddenfield") public static class Builder { private long newServiceVersion; /** diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmTenantArgs.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmTenantArgs.java index bf331c48a14..bd1997641a7 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmTenantArgs.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmTenantArgs.java @@ -77,7 +77,6 @@ public static OmTenantArgs.Builder newBuilder() { /** * Builder for OmTenantArgs. */ - @SuppressWarnings("checkstyle:hiddenfield") public static class Builder { private String tenantId; private String volumeName; diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmVolumeArgs.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmVolumeArgs.java index 499b4878362..65182a860d9 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmVolumeArgs.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmVolumeArgs.java @@ -46,7 +46,8 @@ public final class OmVolumeArgs extends WithObjectID private static final Codec CODEC = new DelegatedCodec<>( Proto2Codec.get(VolumeInfo.getDefaultInstance()), OmVolumeArgs::getFromProtobuf, - OmVolumeArgs::getProtobuf); + OmVolumeArgs::getProtobuf, + OmVolumeArgs.class); public static Codec getCodec() { return CODEC; diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OzoneAclUtil.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OzoneAclUtil.java index 517f0c14ce0..083b1329db6 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OzoneAclUtil.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OzoneAclUtil.java @@ -18,21 +18,25 @@ package org.apache.hadoop.ozone.om.helpers; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.ozone.OzoneAcl; import org.apache.hadoop.ozone.om.exceptions.OMException; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OzoneAclInfo; import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer; import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLType; +import org.apache.hadoop.ozone.security.acl.OzoneAclConfig; import org.apache.hadoop.ozone.security.acl.RequestContext; +import java.io.IOException; import java.util.ArrayList; -import java.util.Arrays; import java.util.List; import java.util.Objects; import java.util.stream.Collectors; import java.util.stream.Stream; import org.apache.hadoop.security.UserGroupInformation; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import static org.apache.hadoop.ozone.OzoneAcl.AclScope.ACCESS; import static org.apache.hadoop.ozone.OzoneAcl.AclScope.DEFAULT; @@ -43,28 +47,51 @@ * Helper class for ozone acls operations. */ public final class OzoneAclUtil { + static final Logger LOG = LoggerFactory.getLogger(OzoneAclUtil.class); private OzoneAclUtil() { } + private static ACLType[] userRights; + private static ACLType[] groupRights; + /** - * Helper function to get access acl list for current user. + * Helper function to get default access acl list for current user. * - * @param userName - * @param userGroups + * @param ugi current login user + * @param conf current configuration * @return list of OzoneAcls * */ - public static List getAclList(String userName, - String[] userGroups, ACLType userRights, ACLType groupRights) { - + public static List getDefaultAclList(UserGroupInformation ugi, OzoneConfiguration conf) { + // Get default acl rights for user and group. + if (userRights == null || groupRights == null) { + OzoneAclConfig aclConfig = conf.getObject(OzoneAclConfig.class); + userRights = aclConfig.getUserDefaultRights(); + groupRights = aclConfig.getGroupDefaultRights(); + } List listOfAcls = new ArrayList<>(); + // User ACL. + listOfAcls.add(new OzoneAcl(USER, ugi.getShortUserName(), ACCESS, userRights)); + try { + String groupName = ugi.getPrimaryGroupName(); + listOfAcls.add(new OzoneAcl(GROUP, groupName, ACCESS, groupRights)); + } catch (IOException e) { + // do nothing, since user has the permission, user can add ACL for selected groups later. + LOG.warn("Failed to get primary group from user {}", ugi); + } + return listOfAcls; + } + public static List getAclList(UserGroupInformation ugi, ACLType userPrivilege, ACLType groupPrivilege) { + List listOfAcls = new ArrayList<>(); // User ACL. - listOfAcls.add(new OzoneAcl(USER, userName, ACCESS, userRights)); - if (userGroups != null) { - // Group ACLs of the User. - Arrays.asList(userGroups).forEach((group) -> listOfAcls.add( - new OzoneAcl(GROUP, group, ACCESS, groupRights))); + listOfAcls.add(new OzoneAcl(USER, ugi.getShortUserName(), ACCESS, userPrivilege)); + try { + String groupName = ugi.getPrimaryGroupName(); + listOfAcls.add(new OzoneAcl(GROUP, groupName, ACCESS, groupPrivilege)); + } catch (IOException e) { + // do nothing, since user has the permission, user can add ACL for selected groups later. + LOG.warn("Failed to get primary group from user {}", ugi); } return listOfAcls; } diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/RepeatedOmKeyInfo.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/RepeatedOmKeyInfo.java index f1dd1e9eeba..2d0f92a1f0c 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/RepeatedOmKeyInfo.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/RepeatedOmKeyInfo.java @@ -47,7 +47,8 @@ private static Codec newCodec(boolean ignorePipeline) { return new DelegatedCodec<>( Proto2Codec.get(RepeatedKeyInfo.getDefaultInstance()), RepeatedOmKeyInfo::getFromProto, - k -> k.getProto(ignorePipeline, ClientVersion.CURRENT_VERSION)); + k -> k.getProto(ignorePipeline, ClientVersion.CURRENT_VERSION), + RepeatedOmKeyInfo.class); } public static Codec getCodec(boolean ignorePipeline) { diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/S3SecretValue.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/S3SecretValue.java index 20c145bd0c0..7ea932c5716 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/S3SecretValue.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/S3SecretValue.java @@ -31,7 +31,8 @@ public final class S3SecretValue { private static final Codec CODEC = new DelegatedCodec<>( Proto2Codec.get(S3Secret.getDefaultInstance()), S3SecretValue::fromProtobuf, - S3SecretValue::getProtobuf); + S3SecretValue::getProtobuf, + S3SecretValue.class); public static Codec getCodec() { return CODEC; diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/S3VolumeContext.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/S3VolumeContext.java index dbbc3544765..4763c411934 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/S3VolumeContext.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/S3VolumeContext.java @@ -69,7 +69,6 @@ public static S3VolumeContext.Builder newBuilder() { /** * Builder for S3VolumeContext. */ - @SuppressWarnings("checkstyle:hiddenfield") public static final class Builder { private OmVolumeArgs omVolumeArgs; private String userPrincipal; diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/SnapshotDiffJob.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/SnapshotDiffJob.java index 0d221dc1cd4..c3c8efc11ad 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/SnapshotDiffJob.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/SnapshotDiffJob.java @@ -261,6 +261,11 @@ private static final class SnapshotDiffJobCodec .setSerializationInclusion(JsonInclude.Include.NON_NULL) .configure(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, false); + @Override + public Class getTypeClass() { + return SnapshotDiffJob.class; + } + @Override public byte[] toPersistedFormat(SnapshotDiffJob object) throws IOException { diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/SnapshotInfo.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/SnapshotInfo.java index 7feefdb0b22..cf0a60dd353 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/SnapshotInfo.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/SnapshotInfo.java @@ -56,10 +56,10 @@ */ public final class SnapshotInfo implements Auditable, CopyObject { private static final Codec CODEC = new DelegatedCodec<>( - Proto2Codec.get( - OzoneManagerProtocolProtos.SnapshotInfo.getDefaultInstance()), + Proto2Codec.get(OzoneManagerProtocolProtos.SnapshotInfo.getDefaultInstance()), SnapshotInfo::getFromProtobuf, - SnapshotInfo::getProtobuf); + SnapshotInfo::getProtobuf, + SnapshotInfo.class); public static Codec getCodec() { return CODEC; diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocol/OzoneManagerProtocol.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocol/OzoneManagerProtocol.java index 94822630f8e..7f633d7ea73 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocol/OzoneManagerProtocol.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocol/OzoneManagerProtocol.java @@ -1145,6 +1145,31 @@ void setTimes(OmKeyArgs keyArgs, long mtime, long atime) boolean setSafeMode(SafeModeAction action, boolean isChecked) throws IOException; + /** + * Gets the tags for the specified key. + * @param args Key args + * @return Tags associated with the key. + */ + Map getObjectTagging(OmKeyArgs args) throws IOException; + + /** + * Sets the tags to an existing key. + * @param args Key args + */ + default void putObjectTagging(OmKeyArgs args) throws IOException { + throw new UnsupportedOperationException("OzoneManager does not require " + + "this to be implemented, as write requests use a new approach."); + } + + /** + * Removes all the tags from the specified key. + * @param args Key args + */ + default void deleteObjectTagging(OmKeyArgs args) throws IOException { + throw new UnsupportedOperationException("OzoneManager does not require " + + "this to be implemented, as write requests use a new approach."); + } + /** * Get status of last triggered quota repair in OM. * @return String diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/GrpcOmTransport.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/GrpcOmTransport.java index ac2e85da84d..c9eb9cbb44f 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/GrpcOmTransport.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/GrpcOmTransport.java @@ -121,6 +121,7 @@ public GrpcOmTransport(ConfigurationSource conf, omFailoverProxyProvider = new GrpcOMFailoverProxyProvider( conf, + ugi, omServiceId, OzoneManagerProtocolPB.class); diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OzoneManagerProtocolClientSideTranslatorPB.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OzoneManagerProtocolClientSideTranslatorPB.java index b140cf95e69..6b23b0f2682 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OzoneManagerProtocolClientSideTranslatorPB.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OzoneManagerProtocolClientSideTranslatorPB.java @@ -108,6 +108,7 @@ import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.DeleteKeyArgs; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.DeleteKeyRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.DeleteKeysRequest; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.DeleteObjectTaggingRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.DeleteSnapshotRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.DeleteTenantRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.DeleteTenantResponse; @@ -125,6 +126,8 @@ import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.GetFileStatusResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.GetKeyInfoRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.GetKeyInfoResponse; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.GetObjectTaggingRequest; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.GetObjectTaggingResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.GetS3SecretRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.GetS3SecretResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.GetS3VolumeContextRequest; @@ -174,6 +177,7 @@ import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.PrepareStatusRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.PrepareStatusResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.PrintCompactionLogDagRequest; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.PutObjectTaggingRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.RangerBGSyncRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.RangerBGSyncResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.RecoverLeaseRequest; @@ -260,8 +264,8 @@ public final class OzoneManagerProtocolClientSideTranslatorPB = new ThreadLocal<>(); private boolean s3AuthCheck; - public static final int BLOCK_ALLOCATION_RETRY_COUNT = 5; - public static final int BLOCK_ALLOCATION_RETRY_WAIT_TIME_MS = 3000; + public static final int BLOCK_ALLOCATION_RETRY_COUNT = 90; + public static final int BLOCK_ALLOCATION_RETRY_WAIT_TIME_MS = 1000; public OzoneManagerProtocolClientSideTranslatorPB(OmTransport omTransport, String clientId) { @@ -1648,10 +1652,13 @@ public OmMultipartInfo initiateMultipartUpload(OmKeyArgs omKeyArgs) throws .setKeyName(omKeyArgs.getKeyName()) .addAllMetadata(KeyValueUtil.toProtobuf(omKeyArgs.getMetadata())) .setOwnerName(omKeyArgs.getOwner()) - .addAllAcls(omKeyArgs.getAcls().stream().map(a -> - OzoneAcl.toProtobuf(a)).collect(Collectors.toList())) .addAllTags(KeyValueUtil.toProtobuf(omKeyArgs.getTags())); + if (omKeyArgs.getAcls() != null) { + keyArgs.addAllAcls(omKeyArgs.getAcls().stream().map(a -> + OzoneAcl.toProtobuf(a)).collect(Collectors.toList())); + } + setReplicationConfig(omKeyArgs.getReplicationConfig(), keyArgs); multipartInfoInitiateRequest.setKeyArgs(keyArgs.build()); @@ -1722,10 +1729,12 @@ public OmMultipartUploadCompleteInfo completeMultipartUpload( .setVolumeName(omKeyArgs.getVolumeName()) .setBucketName(omKeyArgs.getBucketName()) .setKeyName(omKeyArgs.getKeyName()) - .addAllAcls(omKeyArgs.getAcls().stream().map(a -> - OzoneAcl.toProtobuf(a)).collect(Collectors.toList())) .setOwnerName(omKeyArgs.getOwner()) .setMultipartUploadID(omKeyArgs.getMultipartUploadID()); + if (omKeyArgs.getAcls() != null) { + keyArgs.addAllAcls(omKeyArgs.getAcls().stream().map(a -> + OzoneAcl.toProtobuf(a)).collect(Collectors.toList())); + } multipartUploadCompleteRequest.setKeyArgs(keyArgs.build()); multipartUploadCompleteRequest.addAllPartsList(multipartUploadList @@ -2121,16 +2130,17 @@ public OzoneFileStatus getFileStatus(OmKeyArgs args) throws IOException { @Override public void createDirectory(OmKeyArgs args) throws IOException { - KeyArgs keyArgs = KeyArgs.newBuilder() + KeyArgs.Builder keyArgsBuilder = KeyArgs.newBuilder() .setVolumeName(args.getVolumeName()) .setBucketName(args.getBucketName()) .setKeyName(args.getKeyName()) - .addAllAcls(args.getAcls().stream().map(a -> - OzoneAcl.toProtobuf(a)).collect(Collectors.toList())) - .setOwnerName(args.getOwner()) - .build(); + .setOwnerName(args.getOwner()); + if (args.getAcls() != null) { + keyArgsBuilder.addAllAcls(args.getAcls().stream().map(a -> + OzoneAcl.toProtobuf(a)).collect(Collectors.toList())); + } CreateDirectoryRequest request = CreateDirectoryRequest.newBuilder() - .setKeyArgs(keyArgs) + .setKeyArgs(keyArgsBuilder.build()) .build(); OMRequest omRequest = createOMRequest(Type.CreateDirectory) @@ -2292,9 +2302,11 @@ public OpenKeySession createFile(OmKeyArgs args, .setBucketName(args.getBucketName()) .setKeyName(args.getKeyName()) .setDataSize(args.getDataSize()) - .addAllAcls(args.getAcls().stream().map(a -> - OzoneAcl.toProtobuf(a)).collect(Collectors.toList())) .setOwnerName(args.getOwner()); + if (args.getAcls() != null) { + keyArgsBuilder.addAllAcls(args.getAcls().stream().map(a -> + OzoneAcl.toProtobuf(a)).collect(Collectors.toList())); + } if (args.getReplicationConfig() != null) { if (args.getReplicationConfig() instanceof ECReplicationConfig) { keyArgsBuilder.setEcReplicationConfig( @@ -2580,6 +2592,72 @@ public void startQuotaRepair(List buckets) throws IOException { handleError(submitRequest(omRequest)); } + @Override + public Map getObjectTagging(OmKeyArgs args) throws IOException { + KeyArgs keyArgs = KeyArgs.newBuilder() + .setVolumeName(args.getVolumeName()) + .setBucketName(args.getBucketName()) + .setKeyName(args.getKeyName()) + .build(); + + GetObjectTaggingRequest req = + GetObjectTaggingRequest.newBuilder() + .setKeyArgs(keyArgs) + .build(); + + OMRequest omRequest = createOMRequest(Type.GetObjectTagging) + .setGetObjectTaggingRequest(req) + .build(); + + GetObjectTaggingResponse resp = + handleError(submitRequest(omRequest)).getGetObjectTaggingResponse(); + + return KeyValueUtil.getFromProtobuf(resp.getTagsList()); + } + + @Override + public void putObjectTagging(OmKeyArgs args) throws IOException { + KeyArgs keyArgs = KeyArgs.newBuilder() + .setVolumeName(args.getVolumeName()) + .setBucketName(args.getBucketName()) + .setKeyName(args.getKeyName()) + .addAllTags(KeyValueUtil.toProtobuf(args.getTags())) + .build(); + + PutObjectTaggingRequest req = + PutObjectTaggingRequest.newBuilder() + .setKeyArgs(keyArgs) + .build(); + + OMRequest omRequest = createOMRequest(Type.PutObjectTagging) + .setPutObjectTaggingRequest(req) + .build(); + + OMResponse omResponse = submitRequest(omRequest); + handleError(omResponse); + } + + @Override + public void deleteObjectTagging(OmKeyArgs args) throws IOException { + KeyArgs keyArgs = KeyArgs.newBuilder() + .setVolumeName(args.getVolumeName()) + .setBucketName(args.getBucketName()) + .setKeyName(args.getKeyName()) + .build(); + + DeleteObjectTaggingRequest req = + DeleteObjectTaggingRequest.newBuilder() + .setKeyArgs(keyArgs) + .build(); + + OMRequest omRequest = createOMRequest(Type.DeleteObjectTagging) + .setDeleteObjectTaggingRequest(req) + .build(); + + OMResponse omResponse = submitRequest(omRequest); + handleError(omResponse); + } + private SafeMode toProtoBuf(SafeModeAction action) { switch (action) { case ENTER: diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/OzoneTokenIdentifier.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/OzoneTokenIdentifier.java index 19f3e7c4a25..d4db2689612 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/OzoneTokenIdentifier.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/OzoneTokenIdentifier.java @@ -23,12 +23,14 @@ import java.io.IOException; import java.time.Instant; import java.util.Arrays; +import java.util.UUID; +import com.google.common.base.Preconditions; +import org.apache.commons.lang3.StringUtils; import org.apache.commons.lang3.builder.EqualsBuilder; import org.apache.hadoop.hdds.annotation.InterfaceAudience; import org.apache.hadoop.hdds.annotation.InterfaceStability; import org.apache.hadoop.io.DataInputBuffer; -import org.apache.hadoop.io.DataOutputBuffer; import org.apache.hadoop.io.Text; import org.apache.hadoop.io.WritableUtils; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMTokenProto; @@ -47,7 +49,11 @@ public class OzoneTokenIdentifier extends AbstractDelegationTokenIdentifier { public static final Text KIND_NAME = new Text("OzoneToken"); + @Deprecated + // the certificate id of this OM, deprecated since HDDS-8829 private String omCertSerialId; + // shared secret key id generated by SCM. + private String secretKeyId; private Type tokenType; private String awsAccessId; private String signature; @@ -82,31 +88,6 @@ public Text getKind() { return KIND_NAME; } - /** Instead of relying on proto serialization, this - * provides explicit serialization for OzoneTokenIdentifier. - * @return byte[] - */ - public byte[] toUniqueSerializedKey() { - DataOutputBuffer buf = new DataOutputBuffer(); - try { - super.write(buf); - WritableUtils.writeVInt(buf, getTokenType().getNumber()); - // Set s3 specific fields. - if (getTokenType().equals(S3AUTHINFO)) { - WritableUtils.writeString(buf, getAwsAccessId()); - WritableUtils.writeString(buf, getSignature()); - WritableUtils.writeString(buf, getStrToSign()); - } else { - WritableUtils.writeString(buf, getOmCertSerialId()); - WritableUtils.writeString(buf, getOmServiceId()); - } - } catch (java.io.IOException e) { - throw new IllegalArgumentException( - "Can't encode the the raw data ", e); - } - return buf.getData(); - } - /** Instead of relying on proto deserialization, this * provides explicit deserialization for OzoneTokenIdentifier. * @return byte[] @@ -125,20 +106,19 @@ public OzoneTokenIdentifier fromUniqueSerializedKey(byte[] rawData) setStrToSign(WritableUtils.readString(in)); } else { this.tokenType = Type.DELEGATION_TOKEN; - setOmCertSerialId(WritableUtils.readString(in)); + String value = WritableUtils.readString(in); + try { + UUID.fromString(value); + setSecretKeyId(value); + } catch (IllegalArgumentException e) { + setOmCertSerialId(value); + } setOmServiceId(WritableUtils.readString(in)); } return this; } - /** - * Overrides default implementation to write using Protobuf. - * - * @param out output stream - * @throws IOException - */ - @Override - public void write(DataOutput out) throws IOException { + public OMTokenProto toProtoBuf() throws IOException { OMTokenProto.Builder builder = OMTokenProto.newBuilder() .setMaxDate(getMaxDate()) .setType(getTokenType()) @@ -155,14 +135,28 @@ public void write(DataOutput out) throws IOException { .setSignature(getSignature()) .setStrToSign(getStrToSign()); } else { - builder.setOmCertSerialId(getOmCertSerialId()); + if (StringUtils.isNotEmpty(getOmCertSerialId())) { + builder.setOmCertSerialId(getOmCertSerialId()); + } + if (StringUtils.isNotEmpty(getSecretKeyId())) { + builder.setSecretKeyId(getSecretKeyId()); + } if (getOmServiceId() != null) { builder.setOmServiceId(getOmServiceId()); } } + return builder.build(); + } - OMTokenProto token = builder.build(); - out.write(token.toByteArray()); + /** + * Overrides default implementation to write using Protobuf. + * + * @param out output stream + * @throws IOException + */ + @Override + public void write(DataOutput out) throws IOException { + out.write(toProtoBuf().toByteArray()); } /** @@ -183,7 +177,12 @@ public void readFields(DataInput in) throws IOException { setMaxDate(token.getMaxDate()); setSequenceNumber(token.getSequenceNumber()); setMasterKeyId(token.getMasterKeyId()); - setOmCertSerialId(token.getOmCertSerialId()); + if (token.hasOmCertSerialId()) { + setOmCertSerialId(token.getOmCertSerialId()); + } + if (token.hasSecretKeyId()) { + setSecretKeyId(token.getSecretKeyId()); + } // Set s3 specific fields. if (getTokenType().equals(S3AUTHINFO)) { @@ -221,7 +220,12 @@ public static OzoneTokenIdentifier readProtoBuf(DataInput in) identifier.setSequenceNumber(token.getSequenceNumber()); identifier.setMasterKeyId(token.getMasterKeyId()); } - identifier.setOmCertSerialId(token.getOmCertSerialId()); + if (token.hasOmCertSerialId()) { + identifier.setOmCertSerialId(token.getOmCertSerialId()); + } + if (token.hasSecretKeyId()) { + identifier.setSecretKeyId(token.getSecretKeyId()); + } identifier.setOmServiceId(token.getOmServiceId()); return identifier; } @@ -264,6 +268,7 @@ public boolean equals(Object obj) { } OzoneTokenIdentifier that = (OzoneTokenIdentifier) obj; return new EqualsBuilder() + .append(getSecretKeyId(), that.getSecretKeyId()) .append(getOmCertSerialId(), that.getOmCertSerialId()) .append(getMaxDate(), that.getMaxDate()) .append(getIssueDate(), that.getIssueDate()) @@ -326,6 +331,18 @@ public String getOmCertSerialId() { public void setOmCertSerialId(String omCertSerialId) { this.omCertSerialId = omCertSerialId; + Preconditions.checkArgument(this.omCertSerialId == null || this.secretKeyId == null, + "omCertSerialId and secretKeyId cannot both be valid"); + } + + public String getSecretKeyId() { + return secretKeyId; + } + + public void setSecretKeyId(String id) { + this.secretKeyId = id; + Preconditions.checkArgument(this.omCertSerialId == null || this.secretKeyId == null, + "omCertSerialId and secretKeyId cannot both be valid"); } public String getOmServiceId() { @@ -383,7 +400,8 @@ public String toString() { .append(", signature=").append(getSignature()) .append(", awsAccessKeyId=").append(getAwsAccessId()) .append(", omServiceId=").append(getOmServiceId()) - .append(", omCertSerialId=").append(getOmCertSerialId()); + .append(", omCertSerialId=").append(getOmCertSerialId()) + .append(", secretKeyId=").append(getSecretKeyId()); return buffer.toString(); } } diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/acl/OzoneAclConfig.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/acl/OzoneAclConfig.java index 75dee0b8a45..7fbf5a92065 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/acl/OzoneAclConfig.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/acl/OzoneAclConfig.java @@ -23,6 +23,10 @@ import org.apache.hadoop.hdds.conf.ConfigType; import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLType; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; + /** * Ozone ACL config pojo. * */ @@ -40,7 +44,7 @@ public class OzoneAclConfig { private String userDefaultRights; @Config(key = "group.rights", - defaultValue = "ALL", + defaultValue = "READ, LIST", type = ConfigType.STRING, tags = {ConfigTag.OM, ConfigTag.SECURITY}, description = "Default group permissions set for an object in " + @@ -48,18 +52,26 @@ public class OzoneAclConfig { ) private String groupDefaultRights; - public ACLType getUserDefaultRights() { + public ACLType[] getUserDefaultRights() { + List types = new ArrayList(); if (userDefaultRights == null) { - return ACLType.ALL; + types.add(ACLType.ALL); + } else { + String[] array = userDefaultRights.trim().split(","); + Arrays.stream(array).forEach(t -> types.add(ACLType.valueOf(t.trim()))); } - return ACLType.valueOf(userDefaultRights); + return types.toArray(new ACLType[0]); } - public ACLType getGroupDefaultRights() { + public ACLType[] getGroupDefaultRights() { + List types = new ArrayList(); if (groupDefaultRights == null) { - return ACLType.ALL; + types.add(ACLType.READ); + types.add(ACLType.LIST); + } else { + String[] array = groupDefaultRights.trim().split(","); + Arrays.stream(array).forEach(t -> types.add(ACLType.valueOf(t.trim()))); } - return ACLType.valueOf(groupDefaultRights); + return types.toArray(new ACLType[0]); } - } diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/snapshot/SnapshotDiffReportOzone.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/snapshot/SnapshotDiffReportOzone.java index a9e89033129..83300d5689a 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/snapshot/SnapshotDiffReportOzone.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/snapshot/SnapshotDiffReportOzone.java @@ -47,6 +47,7 @@ public class SnapshotDiffReportOzone Proto2Codec.get(DiffReportEntryProto.getDefaultInstance()), SnapshotDiffReportOzone::fromProtobufDiffReportEntry, SnapshotDiffReportOzone::toProtobufDiffReportEntry, + DiffReportEntry.class, DelegatedCodec.CopyType.SHALLOW); public static Codec getDiffReportEntryCodec() { diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/util/OzoneVersionInfo.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/util/OzoneVersionInfo.java index b5a15db39cd..289fc42b4ed 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/util/OzoneVersionInfo.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/util/OzoneVersionInfo.java @@ -69,8 +69,6 @@ public static void main(String[] args) { System.out.println( "Source code repository " + OZONE_VERSION_INFO.getUrl() + " -r " + OZONE_VERSION_INFO.getRevision()); - System.out.println("Compiled by " + OZONE_VERSION_INFO.getUser() + " on " - + OZONE_VERSION_INFO.getDate()); System.out.println( "Compiled with protoc " + OZONE_VERSION_INFO.getHadoopProtoc2Version() + ", " + OZONE_VERSION_INFO.getGrpcProtocVersion() + diff --git a/hadoop-ozone/common/src/main/resources/ozone-version-info.properties b/hadoop-ozone/common/src/main/resources/ozone-version-info.properties index 1a6e3b61519..73f02760d6f 100644 --- a/hadoop-ozone/common/src/main/resources/ozone-version-info.properties +++ b/hadoop-ozone/common/src/main/resources/ozone-version-info.properties @@ -19,9 +19,6 @@ version=${declared.ozone.version} release=${ozone.release} revision=${version-info.scm.commit} -branch=${version-info.scm.branch} -user=${user.name} -date=${version-info.build.time} url=${version-info.scm.uri} srcChecksum=${version-info.source.md5} hadoopProtoc2Version=${proto2.hadooprpc.protobuf.version} diff --git a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/helpers/TestOzoneAclUtil.java b/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/helpers/TestOzoneAclUtil.java index 35a8a95d8d0..a6b5d9c0196 100644 --- a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/helpers/TestOzoneAclUtil.java +++ b/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/helpers/TestOzoneAclUtil.java @@ -186,8 +186,8 @@ private static List getDefaultAcls() { } OzoneAclConfig aclConfig = newInstanceOf(OzoneAclConfig.class); - IAccessAuthorizer.ACLType userRights = aclConfig.getUserDefaultRights(); - IAccessAuthorizer.ACLType groupRights = aclConfig.getGroupDefaultRights(); + IAccessAuthorizer.ACLType[] userRights = aclConfig.getUserDefaultRights(); + IAccessAuthorizer.ACLType[] groupRights = aclConfig.getGroupDefaultRights(); OzoneAclUtil.addAcl(ozoneAcls, new OzoneAcl(USER, ugi.getUserName(), ACCESS, userRights)); diff --git a/hadoop-ozone/csi/pom.xml b/hadoop-ozone/csi/pom.xml index a0565d7e890..ba66c5d5272 100644 --- a/hadoop-ozone/csi/pom.xml +++ b/hadoop-ozone/csi/pom.xml @@ -20,15 +20,16 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd"> org.apache.ozone ozone - 1.5.0-SNAPSHOT + 2.0.0-SNAPSHOT ozone-csi - 1.5.0-SNAPSHOT + 2.0.0-SNAPSHOT Apache Ozone CSI service Apache Ozone CSI service jar + false true diff --git a/hadoop-ozone/datanode/pom.xml b/hadoop-ozone/datanode/pom.xml index 29c10671d9d..733f0837fda 100644 --- a/hadoop-ozone/datanode/pom.xml +++ b/hadoop-ozone/datanode/pom.xml @@ -19,14 +19,15 @@ org.apache.ozone ozone - 1.5.0-SNAPSHOT + 2.0.0-SNAPSHOT ozone-datanode Apache Ozone Datanode jar - 1.5.0-SNAPSHOT + 2.0.0-SNAPSHOT + false true true diff --git a/hadoop-ozone/dev-support/checks/_build.sh b/hadoop-ozone/dev-support/checks/_build.sh new file mode 100755 index 00000000000..b1f23a9ba8a --- /dev/null +++ b/hadoop-ozone/dev-support/checks/_build.sh @@ -0,0 +1,31 @@ +#!/usr/bin/env bash +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" +cd "$DIR/../../.." || exit 1 + +: ${OZONE_WITH_COVERAGE:="false"} + +MAVEN_OPTIONS='-V -B -DskipTests -DskipDocs --no-transfer-progress' + +if [[ "${OZONE_WITH_COVERAGE}" == "true" ]]; then + MAVEN_OPTIONS="${MAVEN_OPTIONS} -Pcoverage" +else + MAVEN_OPTIONS="${MAVEN_OPTIONS} -Djacoco.skip" +fi + +export MAVEN_OPTS="-Xmx4096m ${MAVEN_OPTS:-}" +mvn ${MAVEN_OPTIONS} clean "$@" +rc=$? diff --git a/hadoop-ozone/dev-support/checks/_diffoscope.sh b/hadoop-ozone/dev-support/checks/_diffoscope.sh new file mode 100755 index 00000000000..cc7cc700c82 --- /dev/null +++ b/hadoop-ozone/dev-support/checks/_diffoscope.sh @@ -0,0 +1,45 @@ +#!/usr/bin/env bash +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Helper script to compare jars reported by maven-artifact-plugin + +set -e -u -o pipefail + +DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" +cd "$DIR/../../.." || exit 1 + +BASE_DIR="$(pwd -P)" +: ${OUTPUT_LOG:="${BASE_DIR}/target/repro/output.log"} + +for jar in $(grep -o "investigate with diffoscope [^ ]*\.jar [^ ]*\.jar" "${OUTPUT_LOG}" | awk '{ print $NF }'); do + jarname=$(basename "$jar") + if [[ ! -e "$jar" ]]; then + echo "$jar does not exist" + continue + fi + + ref=$(find target/reference -name "$jarname") + if [[ -z "$ref" ]]; then + ref=$(find ~/.m2/repository -name "$jarname") + fi + + if [[ ! -e "$ref" ]]; then + echo "Reference not found for: $jarname" + continue + fi + + diffoscope "$ref" "$jar" +done diff --git a/hadoop-ozone/dev-support/checks/_lib.sh b/hadoop-ozone/dev-support/checks/_lib.sh index 134c8f53c6e..632aecb8296 100644 --- a/hadoop-ozone/dev-support/checks/_lib.sh +++ b/hadoop-ozone/dev-support/checks/_lib.sh @@ -160,7 +160,11 @@ download_hadoop_aws() { if [[ ! -e "${dir}" ]] || [[ ! -d "${dir}"/src/test/resources ]]; then mkdir -p "${dir}" - [[ -f "${dir}.tar.gz" ]] || curl -LSs -o "${dir}.tar.gz" https://archive.apache.org/dist/hadoop/common/hadoop-${HADOOP_VERSION}/hadoop-${HADOOP_VERSION}-src.tar.gz + if [[ ! -f "${dir}.tar.gz" ]]; then + local url="https://www.apache.org/dyn/closer.lua?action=download&filename=hadoop/common/hadoop-${HADOOP_VERSION}/hadoop-${HADOOP_VERSION}-src.tar.gz" + echo "Downloading Hadoop from ${url}" + curl -LSs --fail -o "${dir}.tar.gz" "$url" || return 1 + fi tar -x -z -C "${dir}" --strip-components=3 -f "${dir}.tar.gz" --wildcards 'hadoop-*-src/hadoop-tools/hadoop-aws' || return 1 fi } diff --git a/hadoop-ozone/dev-support/checks/_mvn_unit_report.sh b/hadoop-ozone/dev-support/checks/_mvn_unit_report.sh index 36205c69bb6..0249c7a498d 100755 --- a/hadoop-ozone/dev-support/checks/_mvn_unit_report.sh +++ b/hadoop-ozone/dev-support/checks/_mvn_unit_report.sh @@ -29,16 +29,20 @@ _realpath() { tempfile="${REPORT_DIR}/summary.tmp" ## generate summary txt file +failures=${REPORT_DIR}/failures.txt find "." -not -path '*/iteration*' -name 'TEST*.xml' -print0 \ | xargs -n1 -0 "grep" -l -E " "${tempfile}" + > "${failures}" +cat ${failures} > "${tempfile}" +leaks=${REPORT_DIR}/leaks.txt if [[ "${CHECK:-unit}" == "integration" ]]; then find hadoop-ozone/integration-test -not -path '*/iteration*' -name '*-output.txt' -print0 \ | xargs -n1 -0 "grep" -l -E "not closed properly|was not shutdown properly" \ | awk -F/ '{sub("-output.txt",""); print $NF}' \ - >> "${tempfile}" + > "${leaks}" + cat ${leaks} >> "${tempfile}" fi #Copy heap dump and dump leftovers @@ -50,11 +54,13 @@ find "." -not -path '*/iteration*' \ -exec mv {} "$REPORT_DIR/" \; ## Add the tests where the JVM is crashed +crashes=${REPORT_DIR}/crashes.txt grep -A1 'Crashed tests' "${REPORT_DIR}/output.log" \ | grep -v -e 'Crashed tests' -e '--' \ | cut -f2- -d' ' \ | sort -u \ - >> "${tempfile}" + > "${crashes}" +cat "${crashes}" >> "${tempfile}" # Check for tests that started but were not finished if grep -q 'There was a timeout.*in the fork' "${REPORT_DIR}/output.log"; then @@ -93,20 +99,24 @@ fi ## generate summary markdown file export SUMMARY_FILE="$REPORT_DIR/summary.md" -for TEST_RESULT_FILE in $(find "$REPORT_DIR" -name "*.txt" | grep -v output); do - - FAILURES=$(grep FAILURE "$TEST_RESULT_FILE" | grep "Tests run" | awk '{print $18}' | sort | uniq) +echo -n > "$SUMMARY_FILE" +if [ -s "${failures}" ]; then + printf "# Failed Tests\n\n" >> "$SUMMARY_FILE" + cat "${failures}" | sed 's/^/ * /' >> "$SUMMARY_FILE" +fi +rm -f "${failures}" - for FAILURE in $FAILURES; do - TEST_RESULT_LOCATION="$(_realpath --relative-to="$REPORT_DIR" "$TEST_RESULT_FILE")" - TEST_OUTPUT_LOCATION="${TEST_RESULT_LOCATION//.txt/-output.txt}" - printf " * [%s](%s) ([output](%s))\n" "$FAILURE" "$TEST_RESULT_LOCATION" "$TEST_OUTPUT_LOCATION" >> "$SUMMARY_FILE" - done -done +if [[ -s "${leaks}" ]]; then + printf "# Leaks Detected\n\n" >> "$SUMMARY_FILE" + cat "${leaks}" | sed 's/^/ * /' >> "$SUMMARY_FILE" +fi +rm -f "${leaks}" -if [ -s "$SUMMARY_FILE" ]; then - printf "# Failing tests: \n\n" | cat - "$SUMMARY_FILE" > temp && mv temp "$SUMMARY_FILE" +if [[ -s "${crashes}" ]]; then + printf "# Crashed Tests\n\n" >> "$SUMMARY_FILE" + cat "${crashes}" | sed 's/^/ * /' >> "$SUMMARY_FILE" fi +rm -f "${crashes}" ## generate counter wc -l "$REPORT_DIR/summary.txt" | awk '{print $1}'> "$REPORT_DIR/failures" diff --git a/hadoop-ozone/dev-support/checks/_post_process.sh b/hadoop-ozone/dev-support/checks/_post_process.sh new file mode 100644 index 00000000000..555a281445a --- /dev/null +++ b/hadoop-ozone/dev-support/checks/_post_process.sh @@ -0,0 +1,50 @@ +#!/usr/bin/env bash +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# This script does common processing after Maven-based checks. +# +# - ensures Maven error is reported as failure +# - writes number of failures into file +# - exits with the correct code + +# Prerequisites: +# - $rc should be set to Maven exit code +# - $REPORT_DIR should be defined +# - $REPORT_FILE should be defined +# - Maven output should be saved in $REPORT_DIR/output.log + +# script failed, but report file is empty (does not reflect failure) +if [[ ${rc} -ne 0 ]] && [[ ! -s "${REPORT_FILE}" ]]; then + # do we know what to look for? + if [[ -n "${ERROR_PATTERN:-}" ]]; then + grep -m25 "${ERROR_PATTERN}" "${REPORT_DIR}/output.log" > "${REPORT_FILE}" + fi + if [[ ! -s "${REPORT_FILE}" ]]; then + echo "Unknown failure, check output.log" > "${REPORT_FILE}" + fi +fi + +# number of failures = number of lines in report, unless file already created with custom count +if [[ ! -s "${REPORT_DIR}/failures" ]]; then + wc -l "$REPORT_FILE" | awk '{ print $1 }' > "$REPORT_DIR/failures" +fi + +# exit with failure if report is not empty +if [[ -s "${REPORT_FILE}" ]]; then + rc=1 +fi + +exit ${rc} diff --git a/hadoop-ozone/dev-support/checks/acceptance.sh b/hadoop-ozone/dev-support/checks/acceptance.sh index 3425f66605e..ea9fa819ec3 100755 --- a/hadoop-ozone/dev-support/checks/acceptance.sh +++ b/hadoop-ozone/dev-support/checks/acceptance.sh @@ -30,6 +30,7 @@ OZONE_ROOT=$(pwd -P) source "${DIR}/_lib.sh" REPORT_DIR=${OUTPUT_DIR:-"${OZONE_ROOT}/target/acceptance"} +REPORT_FILE="$REPORT_DIR/summary.txt" OZONE_VERSION=$(mvn help:evaluate -Dexpression=ozone.version -q -DforceStdout -Dscan=false) DIST_DIR="${OZONE_ROOT}/hadoop-ozone/dist/target/ozone-$OZONE_VERSION" @@ -49,14 +50,17 @@ if [[ "${OZONE_ACCEPTANCE_SUITE}" == "s3a" ]]; then export HADOOP_AWS_DIR=${OZONE_ROOT}/target/hadoop-src fi - download_hadoop_aws "${HADOOP_AWS_DIR}" + if ! download_hadoop_aws "${HADOOP_AWS_DIR}"; then + echo "Failed to download Hadoop ${HADOOP_VERSION}" > "${REPORT_FILE}" + exit 1 + fi fi export OZONE_ACCEPTANCE_SUITE OZONE_ACCEPTANCE_TEST_TYPE cd "$DIST_DIR/compose" || exit 1 ./test-all.sh 2>&1 | tee "${REPORT_DIR}/output.log" -RES=$? +rc=$? if [[ "${OZONE_ACCEPTANCE_TEST_TYPE}" == "maven" ]]; then pushd result @@ -64,14 +68,13 @@ if [[ "${OZONE_ACCEPTANCE_TEST_TYPE}" == "maven" ]]; then find . -name junit -print0 | xargs -r -0 rm -frv cp -rv * "${REPORT_DIR}"/ popd + ERROR_PATTERN="\[ERROR\]" else cp -rv result/* "$REPORT_DIR/" - if [[ -f "${REPORT_DIR}/log.html" ]]; then - cp "$REPORT_DIR/log.html" "$REPORT_DIR/summary.html" - fi - grep -A1 FAIL "${REPORT_DIR}/output.log" | grep -v '^Output' > "${REPORT_DIR}/summary.txt" + grep -A1 FAIL "${REPORT_DIR}/output.log" | grep -v '^Output' > "${REPORT_FILE}" + ERROR_PATTERN="FAIL" fi find "$REPORT_DIR" -type f -empty -not -name summary.txt -print0 | xargs -0 rm -v -exit $RES +source "${DIR}/_post_process.sh" diff --git a/hadoop-ozone/dev-support/checks/author.sh b/hadoop-ozone/dev-support/checks/author.sh index 7c95b5d775b..2764edb2928 100755 --- a/hadoop-ozone/dev-support/checks/author.sh +++ b/hadoop-ozone/dev-support/checks/author.sh @@ -16,6 +16,8 @@ #checks:basic +set -u -o pipefail + DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" cd "$DIR/../../.." || exit 1 @@ -23,10 +25,10 @@ REPORT_DIR=${OUTPUT_DIR:-"$DIR/../../../target/author"} mkdir -p "$REPORT_DIR" REPORT_FILE="$REPORT_DIR/summary.txt" -grep -r --include="*.java" "@author" . | tee "$REPORT_FILE" - -wc -l "$REPORT_FILE" | awk '{print $1}'> "$REPORT_DIR/failures" - -if [[ -s "${REPORT_FILE}" ]]; then - exit 1 +rc=0 +if grep -r --include="*.java" "@author" . | tee "$REPORT_FILE"; then + rc=1 fi + +ERROR_PATTERN="" +source "${DIR}/_post_process.sh" diff --git a/hadoop-ozone/dev-support/checks/bats.sh b/hadoop-ozone/dev-support/checks/bats.sh index e4e18957a62..f94ba583ee2 100755 --- a/hadoop-ozone/dev-support/checks/bats.sh +++ b/hadoop-ozone/dev-support/checks/bats.sh @@ -16,6 +16,8 @@ #checks:basic +set -u -o pipefail + DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" cd "${DIR}/../../.." || exit 1 @@ -39,11 +41,11 @@ find * \( \ \) -print0 \ | xargs -0 -n1 bats --formatter tap \ | tee -a "${REPORT_DIR}/output.log" +rc=$? grep '^\(not ok\|#\)' "${REPORT_DIR}/output.log" > "${REPORT_FILE}" grep -c '^not ok' "${REPORT_FILE}" > "${REPORT_DIR}/failures" -if [[ -s "${REPORT_FILE}" ]]; then - exit 1 -fi +ERROR_PATTERN="" +source "${DIR}/_post_process.sh" diff --git a/hadoop-ozone/dev-support/checks/build.sh b/hadoop-ozone/dev-support/checks/build.sh index f9938e70387..ac524f755e3 100755 --- a/hadoop-ozone/dev-support/checks/build.sh +++ b/hadoop-ozone/dev-support/checks/build.sh @@ -13,20 +13,8 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" -cd "$DIR/../../.." || exit 1 - -: ${OZONE_WITH_COVERAGE:="false"} -MAVEN_OPTIONS='-V -B -DskipTests -DskipDocs --no-transfer-progress' +set -eu -o pipefail -if [[ "${OZONE_WITH_COVERAGE}" == "true" ]]; then - MAVEN_OPTIONS="${MAVEN_OPTIONS} -Pcoverage" -else - MAVEN_OPTIONS="${MAVEN_OPTIONS} -Djacoco.skip" -fi - -export MAVEN_OPTS="-Xmx4096m $MAVEN_OPTS" -echo "${MAVEN_OPTIONS}" -mvn ${MAVEN_OPTIONS} clean install "$@" -exit $? +DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" +source "${DIR}"/_build.sh install "$@" diff --git a/hadoop-ozone/dev-support/checks/checkstyle.sh b/hadoop-ozone/dev-support/checks/checkstyle.sh index 18ae3905975..09b24b93a8b 100755 --- a/hadoop-ozone/dev-support/checks/checkstyle.sh +++ b/hadoop-ozone/dev-support/checks/checkstyle.sh @@ -24,7 +24,7 @@ REPORT_DIR=${OUTPUT_DIR:-"$DIR/../../../target/checkstyle"} mkdir -p "$REPORT_DIR" REPORT_FILE="$REPORT_DIR/summary.txt" -MAVEN_OPTIONS='-B -fae -Dskip.npx -Dskip.installnpx -Dcheckstyle.failOnViolation=false --no-transfer-progress' +MAVEN_OPTIONS='-B -fae -DskipRecon -Dcheckstyle.failOnViolation=false --no-transfer-progress' declare -i rc mvn ${MAVEN_OPTIONS} checkstyle:check > "${REPORT_DIR}/output.log" @@ -53,15 +53,8 @@ find "." -name checkstyle-errors.xml -print0 \ -e "s/>/>/g" \ | tee "$REPORT_FILE" -# check if Maven failed due to some error other than checkstyle violation -if [[ ${rc} -ne 0 ]] && [[ ! -s "${REPORT_FILE}" ]]; then - grep -m1 -F '[ERROR]' "${REPORT_DIR}/output.log" > "${REPORT_FILE}" -fi - ## generate counter grep -c ':' "$REPORT_FILE" > "$REPORT_DIR/failures" -if [[ -s "${REPORT_FILE}" ]]; then - exit 1 -fi -exit ${rc} +ERROR_PATTERN="\[ERROR\]" +source "${DIR}/_post_process.sh" diff --git a/hadoop-ozone/dev-support/checks/coverage.sh b/hadoop-ozone/dev-support/checks/coverage.sh index 04961921d96..67161d01a05 100755 --- a/hadoop-ozone/dev-support/checks/coverage.sh +++ b/hadoop-ozone/dev-support/checks/coverage.sh @@ -53,4 +53,5 @@ find target/coverage-classes -type d \( -name proto -or -name proto3 -or -name g | xargs rm -rf #generate the reports -jacoco report "$REPORT_DIR/jacoco-all.exec" --classfiles target/coverage-classes --html "$REPORT_DIR/all" --xml "$REPORT_DIR/all.xml" +src=$(find hadoop-* -path '*/src/main/java' | sed 's/^/--sourcefiles /g' | xargs echo) +jacoco report "$REPORT_DIR/jacoco-all.exec" $src --classfiles target/coverage-classes --html "$REPORT_DIR/all" --xml "$REPORT_DIR/all.xml" diff --git a/hadoop-ozone/dev-support/checks/dependency.sh b/hadoop-ozone/dev-support/checks/dependency.sh index 116664df81b..dc95a25e47c 100755 --- a/hadoop-ozone/dev-support/checks/dependency.sh +++ b/hadoop-ozone/dev-support/checks/dependency.sh @@ -32,8 +32,8 @@ cp ${src_dir}/current.txt "$REPORT_DIR"/ #implementation of sort cli is not exactly the same everywhere. It's better to sort with the same command locally (diff -uw \ - <(sort ${src_dir}/jar-report.txt) \ - <(sort ${src_dir}/current.txt) \ + <(sort -u ${src_dir}/jar-report.txt) \ + <(sort -u ${src_dir}/current.txt) \ || true) \ > "$REPORT_FILE" diff --git a/hadoop-ozone/dev-support/checks/docs.sh b/hadoop-ozone/dev-support/checks/docs.sh index ce80c3f3e5c..7ebf64ef190 100755 --- a/hadoop-ozone/dev-support/checks/docs.sh +++ b/hadoop-ozone/dev-support/checks/docs.sh @@ -33,10 +33,5 @@ rc=$? grep -o 'ERROR.*' "${REPORT_DIR}/output.log" > "${REPORT_FILE}" -wc -l "${REPORT_FILE}" | awk '{ print $1 }' > "${REPORT_DIR}/failures" - -if [[ -s "${REPORT_FILE}" ]]; then - exit 1 -fi - -exit ${rc} +ERROR_PATTERN="" +source "${DIR}/_post_process.sh" diff --git a/hadoop-ozone/dev-support/checks/findbugs.sh b/hadoop-ozone/dev-support/checks/findbugs.sh index 8c61c524a9e..7d1565a0195 100755 --- a/hadoop-ozone/dev-support/checks/findbugs.sh +++ b/hadoop-ozone/dev-support/checks/findbugs.sh @@ -16,6 +16,8 @@ #checks:basic +set -u -o pipefail + DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" cd "$DIR/../../.." || exit 1 @@ -25,30 +27,25 @@ source "${DIR}/_lib.sh" install_spotbugs -MAVEN_OPTIONS='-B -fae -Dskip.npx -Dskip.installnpx --no-transfer-progress' +REPORT_DIR=${OUTPUT_DIR:-"$DIR/../../../target/findbugs"} +mkdir -p "$REPORT_DIR" +REPORT_FILE="$REPORT_DIR/summary.txt" + +MAVEN_OPTIONS='-B -fae -DskipRecon --no-transfer-progress' if [[ "${OZONE_WITH_COVERAGE}" != "true" ]]; then MAVEN_OPTIONS="${MAVEN_OPTIONS} -Djacoco.skip" fi #shellcheck disable=SC2086 -mvn ${MAVEN_OPTIONS} test-compile spotbugs:spotbugs "$@" +mvn ${MAVEN_OPTIONS} test-compile spotbugs:spotbugs "$@" | tee "${REPORT_DIR}/output.log" rc=$? -REPORT_DIR=${OUTPUT_DIR:-"$DIR/../../../target/findbugs"} -mkdir -p "$REPORT_DIR" -REPORT_FILE="$REPORT_DIR/summary.txt" - touch "$REPORT_FILE" find hadoop-hdds hadoop-ozone -name spotbugsXml.xml -print0 | xargs -0 unionBugs -output "${REPORT_DIR}"/summary.xml convertXmlToText "${REPORT_DIR}"/summary.xml | tee -a "${REPORT_FILE}" convertXmlToText -html:fancy-hist.xsl "${REPORT_DIR}"/summary.xml "${REPORT_DIR}"/summary.html -wc -l "$REPORT_FILE" | awk '{print $1}'> "$REPORT_DIR/failures" - -if [[ -s "${REPORT_FILE}" ]]; then - exit 1 -fi - -exit ${rc} +ERROR_PATTERN="\[ERROR\]" +source "${DIR}/_post_process.sh" diff --git a/hadoop-ozone/dev-support/checks/junit.sh b/hadoop-ozone/dev-support/checks/junit.sh index bb7088f0cd5..46f1fd77f2b 100755 --- a/hadoop-ozone/dev-support/checks/junit.sh +++ b/hadoop-ozone/dev-support/checks/junit.sh @@ -30,8 +30,8 @@ if [[ ${ITERATIONS} -le 0 ]]; then ITERATIONS=1 fi -export MAVEN_OPTS="-Xmx4096m $MAVEN_OPTS" -MAVEN_OPTIONS="-B -V -Dskip.npx -Dskip.installnpx -Dnative.lib.tmp.dir=/tmp --no-transfer-progress" +export MAVEN_OPTS="-Xmx4096m ${MAVEN_OPTS:-}" +MAVEN_OPTIONS="-B -V -DskipRecon -Dnative.lib.tmp.dir=/tmp --no-transfer-progress" if [[ "${OZONE_WITH_COVERAGE}" != "true" ]]; then MAVEN_OPTIONS="${MAVEN_OPTIONS} -Djacoco.skip" @@ -100,14 +100,10 @@ for i in $(seq 1 ${ITERATIONS}); do fi done -# check if Maven failed due to some error other than test failure -if [[ ${rc} -ne 0 ]] && [[ ! -s "${REPORT_FILE}" ]]; then - grep -m1 -F '[ERROR]' "${REPORT_DIR}/output.log" > "${REPORT_FILE}" -fi - if [[ "${OZONE_WITH_COVERAGE}" == "true" ]]; then #Archive combined jacoco records mvn -B -N jacoco:merge -Djacoco.destFile=$REPORT_DIR/jacoco-combined.exec -Dscan=false fi -exit ${rc} +ERROR_PATTERN="\[ERROR\]" +source "${DIR}/_post_process.sh" diff --git a/hadoop-ozone/dev-support/checks/kubernetes.sh b/hadoop-ozone/dev-support/checks/kubernetes.sh index 4f4f78e6ef1..e9ecfdf5f2a 100755 --- a/hadoop-ozone/dev-support/checks/kubernetes.sh +++ b/hadoop-ozone/dev-support/checks/kubernetes.sh @@ -35,6 +35,7 @@ else fi REPORT_DIR=${OUTPUT_DIR:-"$DIR/../../../target/kubernetes"} +REPORT_FILE="$REPORT_DIR/summary.txt" OZONE_VERSION=$(mvn help:evaluate -Dexpression=ozone.version -q -DforceStdout -Dscan=false) DIST_DIR="$DIR/../../dist/target/ozone-$OZONE_VERSION" @@ -48,10 +49,10 @@ mkdir -p "$REPORT_DIR" cd "$DIST_DIR/kubernetes/examples" || exit 1 ./test-all.sh 2>&1 | tee "${REPORT_DIR}/output.log" -RES=$? +rc=$? cp -r result/* "$REPORT_DIR/" -cp "$REPORT_DIR/log.html" "$REPORT_DIR/summary.html" -grep -A1 FAIL "${REPORT_DIR}/output.log" > "${REPORT_DIR}/summary.txt" +grep -A1 FAIL "${REPORT_DIR}/output.log" > "${REPORT_FILE}" -exit $RES +ERROR_PATTERN="FAIL" +source "${DIR}/_post_process.sh" diff --git a/hadoop-ozone/dev-support/checks/license.sh b/hadoop-ozone/dev-support/checks/license.sh index 90accc85662..673a77e6d4f 100755 --- a/hadoop-ozone/dev-support/checks/license.sh +++ b/hadoop-ozone/dev-support/checks/license.sh @@ -42,7 +42,7 @@ DEFAULT_SRC="target/generated-sources/license/THIRD-PARTY.txt" src="${1:-${DEFAULT_SRC}}" if [[ ! -e ${src} ]]; then - MAVEN_OPTIONS="-B -fae -Dskip.npx -Dskip.installnpx --no-transfer-progress ${MAVEN_OPTIONS:-}" + MAVEN_OPTIONS="-B -fae -DskipRecon --no-transfer-progress ${MAVEN_OPTIONS:-}" mvn ${MAVEN_OPTIONS} license:aggregate-add-third-party | tee "${REPORT_DIR}/output.log" src="${DEFAULT_SRC}" fi @@ -67,9 +67,7 @@ grep '(' ${src} \ || true ) \ | sort -u \ | tee "${REPORT_FILE}" +rc=$? -wc -l "${REPORT_FILE}" | awk '{ print $1 }' > "${REPORT_DIR}/failures" - -if [[ -s "${REPORT_FILE}" ]]; then - exit 1 -fi +ERROR_PATTERN="" +source "${DIR}/_post_process.sh" diff --git a/hadoop-ozone/dev-support/checks/rat.sh b/hadoop-ozone/dev-support/checks/rat.sh index 2bdb66ba119..3582587f8da 100755 --- a/hadoop-ozone/dev-support/checks/rat.sh +++ b/hadoop-ozone/dev-support/checks/rat.sh @@ -28,9 +28,5 @@ mvn -B --no-transfer-progress -fn org.apache.rat:apache-rat-plugin:check "$@" grep -r --include=rat.txt "!????" $dirs | tee "$REPORT_FILE" -wc -l "$REPORT_FILE" | awk '{print $1}'> "$REPORT_DIR/failures" - -if [[ -s "${REPORT_FILE}" ]]; then - exit 1 -fi - +ERROR_PATTERN="\[ERROR\]" +source "${DIR}/_post_process.sh" diff --git a/hadoop-ozone/dev-support/checks/repro.sh b/hadoop-ozone/dev-support/checks/repro.sh new file mode 100755 index 00000000000..8d3db0fa7e9 --- /dev/null +++ b/hadoop-ozone/dev-support/checks/repro.sh @@ -0,0 +1,37 @@ +#!/usr/bin/env bash +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# This check verifies build reproducibility. + +set -u -o pipefail + +DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" +cd "$DIR/../../.." || exit 1 + +BASE_DIR="$(pwd -P)" +REPORT_DIR=${OUTPUT_DIR:-"${BASE_DIR}/target/repro"} + +rc=0 +source "${DIR}"/_build.sh verify artifact:compare "$@" | tee output.log + +mkdir -p "$REPORT_DIR" +mv output.log "$REPORT_DIR"/ + +REPORT_FILE="$REPORT_DIR/summary.txt" +grep 'ERROR.*mismatch' "${REPORT_DIR}/output.log" > "${REPORT_FILE}" + +ERROR_PATTERN="\[ERROR\]" +source "${DIR}/_post_process.sh" diff --git a/hadoop-ozone/dev-support/checks/sonar.sh b/hadoop-ozone/dev-support/checks/sonar.sh index 27a971f691c..b9948a31d4e 100755 --- a/hadoop-ozone/dev-support/checks/sonar.sh +++ b/hadoop-ozone/dev-support/checks/sonar.sh @@ -24,7 +24,7 @@ if [ ! "$SONAR_TOKEN" ]; then fi -mvn -V -B -DskipShade -DskipTests -Dskip.npx -Dskip.installnpx --no-transfer-progress \ +mvn -V -B -DskipShade -DskipTests -DskipRecon --no-transfer-progress \ -Dsonar.coverage.jacoco.xmlReportPaths="$(pwd)/target/coverage/all.xml" \ -Dsonar.host.url=https://sonarcloud.io -Dsonar.organization=apache -Dsonar.projectKey=hadoop-ozone \ verify org.sonarsource.scanner.maven:sonar-maven-plugin:3.6.0.1398:sonar diff --git a/hadoop-ozone/dist/dev-support/bin/dist-layout-stitching b/hadoop-ozone/dist/dev-support/bin/dist-layout-stitching index 0ec066aca56..a902eab5a97 100755 --- a/hadoop-ozone/dist/dev-support/bin/dist-layout-stitching +++ b/hadoop-ozone/dist/dev-support/bin/dist-layout-stitching @@ -71,6 +71,8 @@ run cp -p "${ROOT}/HISTORY.md" . run cp -p "${ROOT}/SECURITY.md" . run cp -p "${ROOT}/CONTRIBUTING.md" . +run mkdir -p ./share/ozone/classpath +run mkdir -p ./share/ozone/lib run mkdir -p ./share/ozone/web run mkdir -p ./bin run mkdir -p ./sbin @@ -126,8 +128,19 @@ run cp -p -r "${ROOT}/hadoop-ozone/dist/src/main/smoketest" . run cp -p -r "${ROOT}/hadoop-ozone/dist/target/k8s" kubernetes run cp -p -r "${ROOT}/hadoop-ozone/dist/target/Dockerfile" . -#Copy pre-generated keytabs -run cp -p -R "${ROOT}/hadoop-ozone/dist/src/main/keytabs" compose/_keytabs +run mkdir compose/_keytabs + +for file in $(find "${ROOT}" -path '*/target/classes/*.classpath' | sort); do + # We need to add the artifact manually as it's not part the generated classpath desciptor + module=$(basename "${file%.classpath}") + sed -i -e "s;$;:\$HDDS_LIB_JARS_DIR/${module}-${HDDS_VERSION}.jar;" "$file" + + cp -n -p -v "$file" share/ozone/classpath/ +done + +for file in $(find "${ROOT}" -path '*/share/ozone/lib/*jar' | sort); do + cp -n -p -v "$file" share/ozone/lib/ +done #workaround for https://issues.apache.org/jira/browse/MRESOURCES-236 find ./compose -name "*.sh" -exec chmod 755 {} \; diff --git a/hadoop-ozone/dist/pom.xml b/hadoop-ozone/dist/pom.xml index f2c2ba365c0..9540a5195b7 100644 --- a/hadoop-ozone/dist/pom.xml +++ b/hadoop-ozone/dist/pom.xml @@ -19,17 +19,19 @@ org.apache.ozone ozone - 1.5.0-SNAPSHOT + 2.0.0-SNAPSHOT ozone-dist Apache Ozone Distribution jar - 1.5.0-SNAPSHOT + 2.0.0-SNAPSHOT UTF-8 true - 20240729-jdk17-1 - apache/ozone-testkrb5:20230318-1 + apache/ozone + -rocky + 20241212-1-jdk21 + ghcr.io/apache/ozone-testkrb5:20241129-1 true @@ -71,24 +73,7 @@ maven-dependency-plugin - copy-classpath-files - prepare-package - - unpack-dependencies - - - - target/ozone-${ozone.version}/share/ozone/classpath - - *.classpath - - hdds-server-scm,ozone-common,ozone-csi,ozone-datanode,ozone-httpfsgateway, - ozone-insight,ozone-manager,ozone-recon,ozone-s3gateway,ozone-tools,hdds-rocks-native,ozone-s3-secret-store - - - - - copy-jars + copy-omitted-jars prepare-package copy-dependencies @@ -98,24 +83,6 @@ runtime - - copy-omitted-jars - prepare-package - - copy - - - target/ozone-${ozone.version}/share/ozone/lib - - - - com.google.protobuf - protobuf-java - ${grpc.protobuf-compile.version} - - - - @@ -199,6 +166,10 @@ org.apache.ozone hdds-container-service + + org.apache.ozone + ozone-recon + org.apache.ozone ozone-s3gateway @@ -267,21 +238,6 @@ - - build-with-recon - - - !skipRecon - - - - - - org.apache.ozone - ozone-recon - - - diff --git a/hadoop-ozone/dist/src/main/assemblies/ozone-src.xml b/hadoop-ozone/dist/src/main/assemblies/ozone-src.xml index 3450b387393..517d03926fb 100644 --- a/hadoop-ozone/dist/src/main/assemblies/ozone-src.xml +++ b/hadoop-ozone/dist/src/main/assemblies/ozone-src.xml @@ -32,28 +32,30 @@ NOTICE.txt / - - hadoop-ozone/dist/src/main/license/src/licenses/LICENSE-angular-nvd3.txt - /licenses - - - hadoop-ozone/dist/src/main/license/src/licenses/LICENSE-angular.txt - /licenses - - - hadoop-ozone/dist/src/main/license/src/licenses/LICENSE-d3.txt - /licenses - - - hadoop-ozone/dist/src/main/license/src/licenses/LICENSE-nvd3.txt - /licenses - - - hadoop-ozone/dist/src/main/license/src/licenses/LICENSE-jquery.txt - /licenses - + + hadoop-ozone/dist/src/main/license/src/licenses + /licenses + + LICENSE-*.txt + + + + tools + /tools + + **/* + + true + + **/.classpath + **/.project + **/.settings + **/*.iml + **/target/** + + . diff --git a/hadoop-ozone/dist/src/main/compose/common/hadoop-secure.yaml b/hadoop-ozone/dist/src/main/compose/common/hadoop-secure.yaml index 8600659b786..ee97525fbb0 100644 --- a/hadoop-ozone/dist/src/main/compose/common/hadoop-secure.yaml +++ b/hadoop-ozone/dist/src/main/compose/common/hadoop-secure.yaml @@ -16,7 +16,7 @@ services: rm: - image: ${HADOOP_IMAGE}:${HADOOP_VERSION} + image: ${HADOOP_TEST_IMAGE} hostname: rm dns_search: . volumes: @@ -35,7 +35,7 @@ services: profiles: - hadoop nm: - image: ${HADOOP_IMAGE}:${HADOOP_VERSION} + image: ${HADOOP_TEST_IMAGE} hostname: nm dns_search: . volumes: @@ -54,7 +54,7 @@ services: profiles: - hadoop jhs: - image: ${HADOOP_IMAGE}:${HADOOP_VERSION} + image: ${HADOOP_TEST_IMAGE} container_name: jhs hostname: jhs dns_search: . diff --git a/hadoop-ozone/dist/src/main/compose/common/hadoop-test.sh b/hadoop-ozone/dist/src/main/compose/common/hadoop-test.sh index 4b66baca422..c16f6fe22f5 100755 --- a/hadoop-ozone/dist/src/main/compose/common/hadoop-test.sh +++ b/hadoop-ozone/dist/src/main/compose/common/hadoop-test.sh @@ -21,8 +21,18 @@ if [[ ${SECURITY_ENABLED} == "true" ]]; then fi export COMPOSE_FILE="${COMPOSE_FILE:-docker-compose.yaml}":../common/${extra_compose_file} +: ${HADOOP_IMAGE:="apache/hadoop"} +: ${HADOOP_TEST_IMAGES:=""} + +if [[ -z "${HADOOP_TEST_IMAGES}" ]]; then + # hadoop2 and flokkr images are only available from Docker Hub + HADOOP_TEST_IMAGES="${HADOOP_TEST_IMAGES} apache/hadoop:${hadoop2.version}" + HADOOP_TEST_IMAGES="${HADOOP_TEST_IMAGES} flokkr/hadoop:3.1.2" + HADOOP_TEST_IMAGES="${HADOOP_TEST_IMAGES} ${HADOOP_IMAGE}:${hadoop.version}" +fi + export HADOOP_MAJOR_VERSION=3 -export HADOOP_VERSION=unused # will be set for each test version below +export HADOOP_TEST_IMAGE="${HADOOP_IMAGE}:${hadoop.version}" export OZONE_REPLICATION_FACTOR=3 # shellcheck source=/dev/null @@ -42,14 +52,10 @@ export OZONE_DIR=/opt/ozone # shellcheck source=/dev/null source "$COMPOSE_DIR/../testlib.sh" -for HADOOP_VERSION in ${hadoop2.version} 3.1.2 ${hadoop.version}; do - export HADOOP_VERSION - export HADOOP_MAJOR_VERSION=${HADOOP_VERSION%%.*} - if [[ "${HADOOP_VERSION}" == "${hadoop2.version}" ]] || [[ "${HADOOP_VERSION}" == "${hadoop.version}" ]]; then - export HADOOP_IMAGE=apache/hadoop - else - export HADOOP_IMAGE=flokkr/hadoop - fi +for HADOOP_TEST_IMAGE in $HADOOP_TEST_IMAGES; do + export HADOOP_TEST_IMAGE + hadoop_version="${HADOOP_TEST_IMAGE##*:}" + export HADOOP_MAJOR_VERSION=${hadoop_version%%.*} docker-compose --ansi never --profile hadoop up -d nm rm @@ -60,10 +66,10 @@ for HADOOP_VERSION in ${hadoop2.version} 3.1.2 ${hadoop.version}; do fi for scheme in o3fs ofs; do - execute_robot_test rm -v "SCHEME:${scheme}" -N "hadoop-${HADOOP_VERSION}-hadoopfs-${scheme}" ozonefs/hadoopo3fs.robot + execute_robot_test rm -v "SCHEME:${scheme}" -N "hadoop-${hadoop_version}-hadoopfs-${scheme}" ozonefs/hadoopo3fs.robot # TODO secure MapReduce test is failing with 2.7 due to some token problem if [[ ${SECURITY_ENABLED} != "true" ]] || [[ ${HADOOP_MAJOR_VERSION} == "3" ]]; then - execute_robot_test rm -v "SCHEME:${scheme}" -N "hadoop-${HADOOP_VERSION}-mapreduce-${scheme}" mapreduce.robot + execute_robot_test rm -v "SCHEME:${scheme}" -N "hadoop-${hadoop_version}-mapreduce-${scheme}" mapreduce.robot fi done diff --git a/hadoop-ozone/dist/src/main/compose/common/hadoop.conf b/hadoop-ozone/dist/src/main/compose/common/hadoop.conf index 2a82c0bfa2f..ad2d4cb42be 100644 --- a/hadoop-ozone/dist/src/main/compose/common/hadoop.conf +++ b/hadoop-ozone/dist/src/main/compose/common/hadoop.conf @@ -18,9 +18,6 @@ CORE-SITE.xml_fs.AbstractFileSystem.o3fs.impl=org.apache.hadoop.fs.ozone.OzFs CORE-SITE.xml_fs.AbstractFileSystem.ofs.impl=org.apache.hadoop.fs.ozone.RootedOzFs MAPRED-SITE.XML_mapreduce.framework.name=yarn -MAPRED-SITE.XML_yarn.app.mapreduce.am.env=HADOOP_MAPRED_HOME=$HADOOP_HOME -MAPRED-SITE.XML_mapreduce.map.env=HADOOP_MAPRED_HOME=$HADOOP_HOME -MAPRED-SITE.XML_mapreduce.reduce.env=HADOOP_MAPRED_HOME=$HADOOP_HOME MAPRED-SITE.XML_mapreduce.map.memory.mb=4096 MAPRED-SITE.XML_mapreduce.reduce.memory.mb=4096 MAPRED-SITE.XML_mapred.child.java.opts=-Xmx2g diff --git a/hadoop-ozone/dist/src/main/compose/common/hadoop.yaml b/hadoop-ozone/dist/src/main/compose/common/hadoop.yaml index 4fb56e5aa98..c5899eb6b4f 100644 --- a/hadoop-ozone/dist/src/main/compose/common/hadoop.yaml +++ b/hadoop-ozone/dist/src/main/compose/common/hadoop.yaml @@ -16,7 +16,7 @@ services: rm: - image: ${HADOOP_IMAGE}:${HADOOP_VERSION} + image: ${HADOOP_TEST_IMAGE} hostname: rm volumes: - ../..:/opt/ozone @@ -31,7 +31,7 @@ services: profiles: - hadoop nm: - image: ${HADOOP_IMAGE}:${HADOOP_VERSION} + image: ${HADOOP_TEST_IMAGE} hostname: nm volumes: - ../..:/opt/ozone diff --git a/hadoop-ozone/dist/src/main/compose/common/init-kdc.sh b/hadoop-ozone/dist/src/main/compose/common/init-kdc.sh new file mode 100755 index 00000000000..97f532cfed7 --- /dev/null +++ b/hadoop-ozone/dist/src/main/compose/common/init-kdc.sh @@ -0,0 +1,87 @@ +#!/usr/bin/env bash +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set -eux -o pipefail + +# This script exports keytabs and starts KDC server. + +export_keytab() { + kadmin.local -q "addprinc -randkey $1@EXAMPLE.COM" + kadmin.local -q "ktadd -norandkey -k /etc/security/keytabs/$2.keytab $1@EXAMPLE.COM" +} + +rm -f /etc/security/keytabs/*.keytab + +export_keytab scm/scm scm +export_keytab HTTP/scm scm +export_keytab testuser/scm scm +export_keytab testuser2/scm scm + +export_keytab testuser/dn testuser +export_keytab testuser/httpfs testuser +export_keytab testuser/om testuser +export_keytab testuser/recon testuser +export_keytab testuser/s3g testuser +export_keytab testuser/scm testuser + +export_keytab testuser2/dn testuser2 +export_keytab testuser2/httpfs testuser2 +export_keytab testuser2/om testuser2 +export_keytab testuser2/recon testuser2 +export_keytab testuser2/s3g testuser2 +export_keytab testuser2/scm testuser2 + +export_keytab om/om om +export_keytab HTTP/om om +export_keytab testuser/om om +export_keytab testuser2/om om + +export_keytab s3g/s3g s3g +export_keytab HTTP/s3g s3g +export_keytab testuser/s3g s3g +export_keytab testuser2/s3g s3g + +export_keytab httpfs/httpfs httpfs +export_keytab HTTP/httpfs httpfs +export_keytab testuser/httpfs httpfs +export_keytab testuser2/httpfs httpfs + +export_keytab recon/recon recon +export_keytab HTTP/recon recon +export_keytab testuser/recon recon +export_keytab testuser2/recon recon + +export_keytab dn/dn dn +export_keytab HTTP/dn dn +export_keytab testuser/dn dn +export_keytab testuser2/dn dn + +export_keytab HTTP/scm HTTP +export_keytab HTTP/s3g HTTP +export_keytab HTTP/httpfs HTTP +export_keytab HTTP/ozone HTTP + +export_keytab hadoop/rm hadoop + +export_keytab rm/rm rm +export_keytab nm/nm nm +export_keytab jhs/jhs jhs + +chmod 755 /etc/security/keytabs/*.keytab +chown 1000. /etc/security/keytabs/*.keytab + +krb5kdc -n diff --git a/hadoop-ozone/dist/src/main/compose/common/s3a-test.sh b/hadoop-ozone/dist/src/main/compose/common/s3a-test.sh index e8032068465..03600616a76 100644 --- a/hadoop-ozone/dist/src/main/compose/common/s3a-test.sh +++ b/hadoop-ozone/dist/src/main/compose/common/s3a-test.sh @@ -93,10 +93,13 @@ execute_s3a_tests() { EOF # Some tests are skipped due to known issues. + # - ITestS3AContractBulkDelete: HDDS-11661 + # - ITestS3AContractCreate: HDDS-11663 # - ITestS3AContractDistCp: HDDS-10616 + # - ITestS3AContractMkdirWithCreatePerf: HDDS-11662 # - ITestS3AContractRename: HDDS-10665 - mvn -B -V --fail-never --no-transfer-progress \ - -Dtest='ITestS3AContract*, ITestS3ACommitterMRJob, !ITestS3AContractDistCp, !ITestS3AContractRename' \ + mvn ${MAVEN_ARGS:-} --fail-never \ + -Dtest='ITestS3AContract*, ITestS3ACommitterMRJob, !ITestS3AContractBulkDelete, !ITestS3AContractCreate#testOverwrite*EmptyDirectory[*], !ITestS3AContractDistCp, !ITestS3AContractMkdirWithCreatePerf, !ITestS3AContractRename' \ clean test local target="${RESULT_DIR}/junit/${bucket}/target" diff --git a/hadoop-ozone/dist/src/main/compose/compatibility/docker-config b/hadoop-ozone/dist/src/main/compose/compatibility/docker-config index d3984110d8d..f7f1c24b8a0 100644 --- a/hadoop-ozone/dist/src/main/compose/compatibility/docker-config +++ b/hadoop-ozone/dist/src/main/compose/compatibility/docker-config @@ -21,7 +21,7 @@ OZONE-SITE.XML_ozone.scm.datanode.ratis.volume.free-space.min=10MB OZONE-SITE.XML_ozone.scm.pipeline.creation.interval=30s OZONE-SITE.XML_ozone.scm.pipeline.owner.container.count=1 OZONE-SITE.XML_ozone.scm.names=scm -OZONE-SITE.XML_ozone.scm.datanode.id.dir=/data +OZONE-SITE.XML_ozone.scm.datanode.id.dir=/data/metadata OZONE-SITE.XML_ozone.scm.block.client.address=scm OZONE-SITE.XML_ozone.metadata.dirs=/data/metadata OZONE-SITE.XML_ozone.recon.db.dir=/data/metadata/recon diff --git a/hadoop-ozone/dist/src/main/compose/ozone-balancer/docker-compose.yaml b/hadoop-ozone/dist/src/main/compose/ozone-balancer/docker-compose.yaml index 5220d71669d..e2d7272b030 100644 --- a/hadoop-ozone/dist/src/main/compose/ozone-balancer/docker-compose.yaml +++ b/hadoop-ozone/dist/src/main/compose/ozone-balancer/docker-compose.yaml @@ -39,6 +39,8 @@ services: volumes: - tmpfs1:/data - ../..:/opt/hadoop + deploy: + replicas: ${DATANODE1_REPLICA:-1} datanode2: <<: *common-config ports: @@ -50,6 +52,8 @@ services: volumes: - tmpfs2:/data - ../..:/opt/hadoop + deploy: + replicas: ${DATANODE2_REPLICA:-1} datanode3: <<: *common-config ports: @@ -61,6 +65,8 @@ services: volumes: - tmpfs3:/data - ../..:/opt/hadoop + deploy: + replicas: ${DATANODE3_REPLICA:-1} datanode4: <<: *common-config ports: @@ -72,6 +78,34 @@ services: volumes: - tmpfs4:/data - ../..:/opt/hadoop + deploy: + replicas: ${DATANODE4_REPLICA:-1} + datanode5: + <<: *common-config + ports: + - 19864 + - 9882 + environment: + <<: *replication + command: [ "ozone","datanode" ] + volumes: + - tmpfs5:/data + - ../..:/opt/hadoop + deploy: + replicas: ${DATANODE5_REPLICA:-1} + datanode6: + <<: *common-config + ports: + - 19864 + - 9882 + environment: + <<: *replication + command: [ "ozone","datanode" ] + volumes: + - tmpfs6:/data + - ../..:/opt/hadoop + deploy: + replicas: ${DATANODE6_REPLICA:-1} om1: <<: *common-config environment: @@ -175,3 +209,15 @@ volumes: o: "size=1g,uid=4000" device: tmpfs type: tmpfs + tmpfs5: + driver: local + driver_opts: + o: "size=1g,uid=5000" + device: tmpfs + type: tmpfs + tmpfs6: + driver: local + driver_opts: + o: "size=1g,uid=6000" + device: tmpfs + type: tmpfs \ No newline at end of file diff --git a/hadoop-ozone/dist/src/main/compose/ozone-balancer/docker-config b/hadoop-ozone/dist/src/main/compose/ozone-balancer/docker-config index 29984d43662..6e0781a1d9e 100644 --- a/hadoop-ozone/dist/src/main/compose/ozone-balancer/docker-config +++ b/hadoop-ozone/dist/src/main/compose/ozone-balancer/docker-config @@ -34,7 +34,7 @@ OZONE-SITE.XML_ozone.scm.address.scmservice.scm1=scm1 OZONE-SITE.XML_ozone.scm.address.scmservice.scm2=scm2 OZONE-SITE.XML_ozone.scm.address.scmservice.scm3=scm3 OZONE-SITE.XML_ozone.scm.ratis.enable=true -OZONE-SITE.XML_ozone.scm.datanode.id.dir=/data +OZONE-SITE.XML_ozone.scm.datanode.id.dir=/data/metadata OZONE-SITE.XML_ozone.scm.container.size=100MB OZONE-SITE.XML_ozone.scm.block.size=20MB OZONE-SITE.XML_ozone.scm.datanode.ratis.volume.free-space.min=10MB @@ -43,6 +43,7 @@ OZONE-SITE.XML_hdds.node.report.interval=20s OZONE-SITE.XML_hdds.heartbeat.interval=20s OZONE-SITE.XML_hdds.datanode.du.refresh.period=20s OZONE-SITE.XML_hdds.datanode.dir=/data/hdds +OZONE-SITE.XML_hdds.datanode.container.db.dir=/data/metadata OZONE-SITE.XML_hdds.datanode.volume.min.free.space=100MB OZONE-SITE.XML_ozone.scm.pipeline.creation.auto.factor.one=false OZONE-SITE.XML_ozone.datanode.pipeline.limit=1 @@ -53,7 +54,8 @@ OZONE-SITE.XML_ozone.om.s3.grpc.server_enabled=true OZONE-SITE.XML_ozone.recon.db.dir=/data/metadata/recon OZONE-SITE.XML_dfs.container.ratis.datastream.enabled=true OZONE-SITE.XML_ozone.http.basedir=/tmp/ozone_http - +OZONE-SITE.XML_hdds.container.balancer.balancing.iteration.interval=25s +OZONE-SITE.XML_hdds.container.balancer.trigger.du.before.move.enable=false OZONE_CONF_DIR=/etc/hadoop OZONE_LOG_DIR=/var/log/hadoop diff --git a/hadoop-ozone/dist/src/main/compose/ozone-balancer/test-ec.sh b/hadoop-ozone/dist/src/main/compose/ozone-balancer/test-ec.sh new file mode 100644 index 00000000000..bc4bf6c6661 --- /dev/null +++ b/hadoop-ozone/dist/src/main/compose/ozone-balancer/test-ec.sh @@ -0,0 +1,31 @@ +#!/usr/bin/env bash +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +#suite:balancer + +COMPOSE_DIR="$( cd "$( dirname "${BASH_SOURCE0}" )" >/dev/null 2>&1 && pwd )" +export COMPOSE_DIR +export OM_SERVICE_ID="om" +export OM=om1 +export SCM=scm1 +export OZONE_REPLICATION_FACTOR=3 + +# shellcheck source=/dev/null +source "$COMPOSE_DIR/../testlib.sh" + +start_docker_env +execute_robot_test ${OM} -v REPLICATION:rs-3-2-1024k -v TYPE:EC -v LOWER_LIMIT:0.7 -v UPPER_LIMIT:1.5 -N ozone-balancer-EC balancer/testBalancer.robot diff --git a/hadoop-ozone/dist/src/main/compose/ozone-balancer/test.sh b/hadoop-ozone/dist/src/main/compose/ozone-balancer/test-ratis.sh similarity index 83% rename from hadoop-ozone/dist/src/main/compose/ozone-balancer/test.sh rename to hadoop-ozone/dist/src/main/compose/ozone-balancer/test-ratis.sh index e79979877ba..2c5091c64f5 100644 --- a/hadoop-ozone/dist/src/main/compose/ozone-balancer/test.sh +++ b/hadoop-ozone/dist/src/main/compose/ozone-balancer/test-ratis.sh @@ -24,10 +24,12 @@ export OM=om1 export SCM=scm1 export OZONE_REPLICATION_FACTOR=3 +export DATANODE2_REPLICA=0 +export DATANODE5_REPLICA=0 + # shellcheck source=/dev/null source "$COMPOSE_DIR/../testlib.sh" -# We need 4 dataNodes in this tests -start_docker_env 4 +start_docker_env -execute_robot_test ${OM} balancer/testBalancer.robot +execute_robot_test ${OM} -v REPLICATION:THREE -v TYPE:RATIS -v LOWER_LIMIT:3 -v UPPER_LIMIT:3.5 -N ozone-balancer-RATIS balancer/testBalancer.robot diff --git a/hadoop-ozone/dist/src/main/compose/ozone-csi/docker-config b/hadoop-ozone/dist/src/main/compose/ozone-csi/docker-config index 623f9595583..ba4d80a9d05 100644 --- a/hadoop-ozone/dist/src/main/compose/ozone-csi/docker-config +++ b/hadoop-ozone/dist/src/main/compose/ozone-csi/docker-config @@ -27,7 +27,7 @@ OZONE-SITE.XML_ozone.scm.datanode.ratis.volume.free-space.min=10MB OZONE-SITE.XML_ozone.scm.pipeline.creation.interval=30s OZONE-SITE.XML_ozone.scm.pipeline.owner.container.count=1 OZONE-SITE.XML_ozone.scm.names=scm -OZONE-SITE.XML_ozone.scm.datanode.id.dir=/data +OZONE-SITE.XML_ozone.scm.datanode.id.dir=/data/metadata OZONE-SITE.XML_ozone.scm.block.client.address=scm OZONE-SITE.XML_ozone.metadata.dirs=/data/metadata OZONE-SITE.XML_ozone.recon.db.dir=/data/metadata/recon diff --git a/hadoop-ozone/dist/src/main/compose/ozone-ha/.env b/hadoop-ozone/dist/src/main/compose/ozone-ha/.env index 2de359fc5db..6507664fad7 100644 --- a/hadoop-ozone/dist/src/main/compose/ozone-ha/.env +++ b/hadoop-ozone/dist/src/main/compose/ozone-ha/.env @@ -15,6 +15,7 @@ # limitations under the License. HDDS_VERSION=${hdds.version} +HADOOP_IMAGE=apache/hadoop OZONE_RUNNER_VERSION=${docker.ozone-runner.version} OZONE_RUNNER_IMAGE=apache/ozone-runner OZONE_OPTS= diff --git a/hadoop-ozone/dist/src/main/compose/ozone-ha/docker-config b/hadoop-ozone/dist/src/main/compose/ozone-ha/docker-config index 08c490ea51f..ebf2ce532bd 100644 --- a/hadoop-ozone/dist/src/main/compose/ozone-ha/docker-config +++ b/hadoop-ozone/dist/src/main/compose/ozone-ha/docker-config @@ -34,7 +34,7 @@ OZONE-SITE.XML_ozone.scm.address.scmservice.scm1=scm1 OZONE-SITE.XML_ozone.scm.address.scmservice.scm2=scm2 OZONE-SITE.XML_ozone.scm.address.scmservice.scm3=scm3 OZONE-SITE.XML_ozone.scm.ratis.enable=true -OZONE-SITE.XML_ozone.scm.datanode.id.dir=/data +OZONE-SITE.XML_ozone.scm.datanode.id.dir=/data/metadata OZONE-SITE.XML_ozone.scm.container.size=1GB OZONE-SITE.XML_ozone.scm.datanode.ratis.volume.free-space.min=10MB OZONE-SITE.XML_ozone.metadata.dirs=/data/metadata diff --git a/hadoop-ozone/dist/src/main/compose/ozone-om-ha/docker-config b/hadoop-ozone/dist/src/main/compose/ozone-om-ha/docker-config index 65834455eaa..ae2fb092be6 100644 --- a/hadoop-ozone/dist/src/main/compose/ozone-om-ha/docker-config +++ b/hadoop-ozone/dist/src/main/compose/ozone-om-ha/docker-config @@ -23,7 +23,7 @@ OZONE-SITE.XML_ozone.om.address.omservice.om2=om2 OZONE-SITE.XML_ozone.om.address.omservice.om3=om3 OZONE-SITE.XML_ozone.om.ratis.enable=true OZONE-SITE.XML_ozone.scm.names=scm -OZONE-SITE.XML_ozone.scm.datanode.id.dir=/data +OZONE-SITE.XML_ozone.scm.datanode.id.dir=/data/metadata OZONE-SITE.XML_ozone.scm.block.client.address=scm OZONE-SITE.XML_ozone.scm.container.size=1GB OZONE-SITE.XML_ozone.scm.datanode.ratis.volume.free-space.min=10MB diff --git a/hadoop-ozone/dist/src/main/compose/ozone-om-prepare/docker-config b/hadoop-ozone/dist/src/main/compose/ozone-om-prepare/docker-config index 79d2e5285fb..f0ec8fcaa1a 100644 --- a/hadoop-ozone/dist/src/main/compose/ozone-om-prepare/docker-config +++ b/hadoop-ozone/dist/src/main/compose/ozone-om-prepare/docker-config @@ -24,7 +24,7 @@ OZONE-SITE.XML_ozone.om.address.omservice.om3=om3 OZONE-SITE.XML_ozone.om.ratis.enable=true OZONE-SITE.XML_ozone.scm.names=scm -OZONE-SITE.XML_ozone.scm.datanode.id.dir=/data +OZONE-SITE.XML_ozone.scm.datanode.id.dir=/data/metadata OZONE-SITE.XML_ozone.scm.block.client.address=scm OZONE-SITE.XML_ozone.scm.container.size=1GB OZONE-SITE.XML_ozone.metadata.dirs=/data/metadata diff --git a/hadoop-ozone/dist/src/main/compose/ozone-topology/docker-config b/hadoop-ozone/dist/src/main/compose/ozone-topology/docker-config index 8239aad2a5d..59b1fcf8cab 100644 --- a/hadoop-ozone/dist/src/main/compose/ozone-topology/docker-config +++ b/hadoop-ozone/dist/src/main/compose/ozone-topology/docker-config @@ -24,7 +24,7 @@ OZONE-SITE.XML_ozone.ozone.scm.block.size=64MB OZONE-SITE.XML_ozone.scm.pipeline.creation.interval=30s OZONE-SITE.XML_ozone.scm.pipeline.owner.container.count=1 OZONE-SITE.XML_ozone.scm.names=scm -OZONE-SITE.XML_ozone.scm.datanode.id.dir=/data +OZONE-SITE.XML_ozone.scm.datanode.id.dir=/data/metadata OZONE-SITE.XML_ozone.scm.block.client.address=scm OZONE-SITE.XML_ozone.metadata.dirs=/data/metadata OZONE-SITE.XML_ozone.recon.db.dir=/data/metadata/recon diff --git a/hadoop-ozone/dist/src/main/compose/ozone/docker-config b/hadoop-ozone/dist/src/main/compose/ozone/docker-config index a657f22340e..f2a9e044793 100644 --- a/hadoop-ozone/dist/src/main/compose/ozone/docker-config +++ b/hadoop-ozone/dist/src/main/compose/ozone/docker-config @@ -29,7 +29,7 @@ OZONE-SITE.XML_ozone.scm.datanode.ratis.volume.free-space.min=10MB OZONE-SITE.XML_ozone.scm.pipeline.creation.interval=30s OZONE-SITE.XML_ozone.scm.pipeline.owner.container.count=1 OZONE-SITE.XML_ozone.scm.names=scm -OZONE-SITE.XML_ozone.scm.datanode.id.dir=/data +OZONE-SITE.XML_ozone.scm.datanode.id.dir=/data/metadata OZONE-SITE.XML_ozone.scm.block.client.address=scm OZONE-SITE.XML_ozone.metadata.dirs=/data/metadata OZONE-SITE.XML_ozone.recon.db.dir=/data/metadata/recon diff --git a/hadoop-ozone/dist/src/main/compose/ozoneblockade/docker-config b/hadoop-ozone/dist/src/main/compose/ozoneblockade/docker-config index 06696a0e413..87b0cb50537 100644 --- a/hadoop-ozone/dist/src/main/compose/ozoneblockade/docker-config +++ b/hadoop-ozone/dist/src/main/compose/ozoneblockade/docker-config @@ -19,7 +19,7 @@ CORE-SITE.XML_fs.defaultFS=ofs://om OZONE-SITE.XML_ozone.om.address=om OZONE-SITE.XML_ozone.om.http-address=om:9874 OZONE-SITE.XML_ozone.scm.names=scm -OZONE-SITE.XML_ozone.scm.datanode.id.dir=/data +OZONE-SITE.XML_ozone.scm.datanode.id.dir=/data/metadata OZONE-SITE.XML_ozone.scm.block.client.address=scm OZONE-SITE.XML_ozone.metadata.dirs=/data/metadata OZONE-SITE.XML_ozone.handler.type=distributed diff --git a/hadoop-ozone/dist/src/main/compose/ozonescripts/docker-config b/hadoop-ozone/dist/src/main/compose/ozonescripts/docker-config index 66f4cf151ec..adfaeb287d0 100644 --- a/hadoop-ozone/dist/src/main/compose/ozonescripts/docker-config +++ b/hadoop-ozone/dist/src/main/compose/ozonescripts/docker-config @@ -17,7 +17,7 @@ CORE-SITE.XML_fs.defaultFS=hdfs://namenode:9000 OZONE-SITE.XML_ozone.ksm.address=ksm OZONE-SITE.XML_ozone.scm.names=scm -OZONE-SITE.XML_ozone.scm.datanode.id.dir=/data +OZONE-SITE.XML_ozone.scm.datanode.id.dir=/data/metadata OZONE-SITE.XML_ozone.om.address=om OZONE-SITE.XML_ozone.om.http-address=om:9874 OZONE-SITE.XML_ozone.scm.block.client.address=scm diff --git a/hadoop-ozone/dist/src/main/compose/ozonesecure-ha/.env b/hadoop-ozone/dist/src/main/compose/ozonesecure-ha/.env index 5f3e96ed617..c3a2c5329aa 100644 --- a/hadoop-ozone/dist/src/main/compose/ozonesecure-ha/.env +++ b/hadoop-ozone/dist/src/main/compose/ozonesecure-ha/.env @@ -15,6 +15,7 @@ # limitations under the License. HDDS_VERSION=${hdds.version} +HADOOP_IMAGE=apache/hadoop HADOOP_VERSION=${hadoop.version} OZONE_RUNNER_VERSION=${docker.ozone-runner.version} OZONE_RUNNER_IMAGE=apache/ozone-runner diff --git a/hadoop-ozone/dist/src/main/compose/ozonesecure-ha/docker-compose.yaml b/hadoop-ozone/dist/src/main/compose/ozonesecure-ha/docker-compose.yaml index d55d5e0e2e8..e48d3cb9b05 100644 --- a/hadoop-ozone/dist/src/main/compose/ozonesecure-ha/docker-compose.yaml +++ b/hadoop-ozone/dist/src/main/compose/ozonesecure-ha/docker-compose.yaml @@ -22,12 +22,12 @@ services: volumes: - ../..:/opt/hadoop - ../_keytabs:/etc/security/keytabs - command: ["krb5kdc","-n"] + command: ["/opt/hadoop/compose/common/init-kdc.sh"] networks: ozone_net: ipv4_address: 172.25.0.100 kms: - image: apache/hadoop:${HADOOP_VERSION} + image: ${HADOOP_IMAGE}:${HADOOP_VERSION} dns_search: . ports: - 9600:9600 diff --git a/hadoop-ozone/dist/src/main/compose/ozonesecure-ha/docker-config b/hadoop-ozone/dist/src/main/compose/ozonesecure-ha/docker-config index 38cc5b71a18..1495e89813a 100644 --- a/hadoop-ozone/dist/src/main/compose/ozonesecure-ha/docker-config +++ b/hadoop-ozone/dist/src/main/compose/ozonesecure-ha/docker-config @@ -47,7 +47,7 @@ OZONE-SITE.XML_ozone.scm.container.size=1GB OZONE-SITE.XML_ozone.scm.datanode.ratis.volume.free-space.min=10MB OZONE-SITE.XML_ozone.scm.pipeline.creation.interval=30s OZONE-SITE.XML_ozone.scm.pipeline.owner.container.count=1 -OZONE-SITE.XML_ozone.scm.datanode.id.dir=/data +OZONE-SITE.XML_ozone.scm.datanode.id.dir=/data/metadata OZONE-SITE.XML_ozone.scm.block.client.address=scm OZONE-SITE.XML_ozone.metadata.dirs=/data/metadata OZONE-SITE.XML_ozone.handler.type=distributed diff --git a/hadoop-ozone/dist/src/main/compose/ozonesecure-mr/.env b/hadoop-ozone/dist/src/main/compose/ozonesecure-mr/.env index 34706093171..c3a2c5329aa 100644 --- a/hadoop-ozone/dist/src/main/compose/ozonesecure-mr/.env +++ b/hadoop-ozone/dist/src/main/compose/ozonesecure-mr/.env @@ -15,6 +15,8 @@ # limitations under the License. HDDS_VERSION=${hdds.version} +HADOOP_IMAGE=apache/hadoop +HADOOP_VERSION=${hadoop.version} OZONE_RUNNER_VERSION=${docker.ozone-runner.version} OZONE_RUNNER_IMAGE=apache/ozone-runner OZONE_TESTKRB5_IMAGE=${docker.ozone-testkr5b.image} diff --git a/hadoop-ozone/dist/src/main/compose/ozonesecure-mr/docker-compose.yaml b/hadoop-ozone/dist/src/main/compose/ozonesecure-mr/docker-compose.yaml index 625aac77723..4db7576bd22 100644 --- a/hadoop-ozone/dist/src/main/compose/ozonesecure-mr/docker-compose.yaml +++ b/hadoop-ozone/dist/src/main/compose/ozonesecure-mr/docker-compose.yaml @@ -22,9 +22,9 @@ services: volumes: - ../..:/opt/hadoop - ../_keytabs:/etc/security/keytabs - command: ["krb5kdc","-n"] + command: ["/opt/hadoop/compose/common/init-kdc.sh"] kms: - image: apache/hadoop:3 + image: ${HADOOP_IMAGE}:${HADOOP_VERSION} dns_search: . ports: - 9600:9600 diff --git a/hadoop-ozone/dist/src/main/compose/ozonesecure-mr/docker-config b/hadoop-ozone/dist/src/main/compose/ozonesecure-mr/docker-config index 12a7819d1ad..2a58ffcf384 100644 --- a/hadoop-ozone/dist/src/main/compose/ozonesecure-mr/docker-config +++ b/hadoop-ozone/dist/src/main/compose/ozonesecure-mr/docker-config @@ -22,7 +22,7 @@ OZONE-SITE.XML_ozone.scm.datanode.ratis.volume.free-space.min=10MB OZONE-SITE.XML_ozone.scm.pipeline.creation.interval=30s OZONE-SITE.XML_ozone.scm.pipeline.owner.container.count=1 OZONE-SITE.XML_ozone.scm.names=scm -OZONE-SITE.XML_ozone.scm.datanode.id.dir=/data +OZONE-SITE.XML_ozone.scm.datanode.id.dir=/data/metadata OZONE-SITE.XML_ozone.scm.block.client.address=scm OZONE-SITE.XML_ozone.metadata.dirs=/data/metadata OZONE-SITE.XML_ozone.handler.type=distributed diff --git a/hadoop-ozone/dist/src/main/compose/ozonesecure/.env b/hadoop-ozone/dist/src/main/compose/ozonesecure/.env index 5f3e96ed617..c3a2c5329aa 100644 --- a/hadoop-ozone/dist/src/main/compose/ozonesecure/.env +++ b/hadoop-ozone/dist/src/main/compose/ozonesecure/.env @@ -15,6 +15,7 @@ # limitations under the License. HDDS_VERSION=${hdds.version} +HADOOP_IMAGE=apache/hadoop HADOOP_VERSION=${hadoop.version} OZONE_RUNNER_VERSION=${docker.ozone-runner.version} OZONE_RUNNER_IMAGE=apache/ozone-runner diff --git a/hadoop-ozone/dist/src/main/compose/ozonesecure/docker-compose.yaml b/hadoop-ozone/dist/src/main/compose/ozonesecure/docker-compose.yaml index 39d26c362f6..f3e372964bb 100644 --- a/hadoop-ozone/dist/src/main/compose/ozonesecure/docker-compose.yaml +++ b/hadoop-ozone/dist/src/main/compose/ozonesecure/docker-compose.yaml @@ -22,9 +22,9 @@ services: volumes: - ../..:/opt/hadoop - ../_keytabs:/etc/security/keytabs - command: ["krb5kdc","-n"] + command: ["/opt/hadoop/compose/common/init-kdc.sh"] kms: - image: apache/hadoop:${HADOOP_VERSION} + image: ${HADOOP_IMAGE}:${HADOOP_VERSION} hostname: kms dns_search: . ports: @@ -96,7 +96,7 @@ services: - 9878:9878 env_file: - ./docker-config - command: ["/opt/hadoop/bin/ozone","s3g"] + command: ["/opt/hadoop/bin/ozone","s3g", "-Dozone.om.transport.class=${OZONE_S3_OM_TRANSPORT:-org.apache.hadoop.ozone.om.protocolPB.GrpcOmTransportFactory}"] environment: OZONE_OPTS: recon: diff --git a/hadoop-ozone/dist/src/main/compose/ozonesecure/docker-config b/hadoop-ozone/dist/src/main/compose/ozonesecure/docker-config index 4f13d624969..387a1c8517e 100644 --- a/hadoop-ozone/dist/src/main/compose/ozonesecure/docker-config +++ b/hadoop-ozone/dist/src/main/compose/ozonesecure/docker-config @@ -27,7 +27,7 @@ OZONE-SITE.XML_ozone.scm.container.size=1GB OZONE-SITE.XML_ozone.scm.pipeline.creation.interval=30s OZONE-SITE.XML_ozone.scm.pipeline.owner.container.count=1 OZONE-SITE.XML_ozone.scm.names=scm -OZONE-SITE.XML_ozone.scm.datanode.id.dir=/data +OZONE-SITE.XML_ozone.scm.datanode.id.dir=/data/metadata OZONE-SITE.XML_ozone.scm.block.client.address=scm OZONE-SITE.XML_ozone.metadata.dirs=/data/metadata OZONE-SITE.XML_ozone.handler.type=distributed diff --git a/hadoop-ozone/dist/src/main/compose/ozonesecure/test-fcq.sh b/hadoop-ozone/dist/src/main/compose/ozonesecure/test-fcq.sh index 644e45c4d5a..a9e87a60cdd 100644 --- a/hadoop-ozone/dist/src/main/compose/ozonesecure/test-fcq.sh +++ b/hadoop-ozone/dist/src/main/compose/ozonesecure/test-fcq.sh @@ -25,6 +25,7 @@ source "$COMPOSE_DIR/../testlib.sh" export SECURITY_ENABLED=true export COMPOSE_FILE=docker-compose.yaml:fcq.yaml +export OZONE_S3_OM_TRANSPORT="org.apache.hadoop.ozone.om.protocolPB.Hadoop3OmTransportFactory" start_docker_env diff --git a/hadoop-ozone/dist/src/main/compose/restart/docker-config b/hadoop-ozone/dist/src/main/compose/restart/docker-config index 161af7a2975..852eb6647c3 100644 --- a/hadoop-ozone/dist/src/main/compose/restart/docker-config +++ b/hadoop-ozone/dist/src/main/compose/restart/docker-config @@ -21,7 +21,7 @@ OZONE-SITE.XML_ozone.scm.datanode.ratis.volume.free-space.min=10MB OZONE-SITE.XML_ozone.scm.pipeline.creation.interval=30s OZONE-SITE.XML_ozone.scm.pipeline.owner.container.count=1 OZONE-SITE.XML_ozone.scm.names=scm -OZONE-SITE.XML_ozone.scm.datanode.id.dir=/data +OZONE-SITE.XML_ozone.scm.datanode.id.dir=/data/metadata OZONE-SITE.XML_ozone.scm.block.client.address=scm OZONE-SITE.XML_ozone.metadata.dirs=/data/metadata OZONE-SITE.XML_ozone.recon.db.dir=/data/metadata/recon diff --git a/hadoop-ozone/dist/src/main/compose/testlib.sh b/hadoop-ozone/dist/src/main/compose/testlib.sh index 1e9cc85781a..8ced94e5007 100755 --- a/hadoop-ozone/dist/src/main/compose/testlib.sh +++ b/hadoop-ozone/dist/src/main/compose/testlib.sh @@ -30,8 +30,29 @@ fi source ${_testlib_dir}/compose_v2_compatibility.sh +: ${OZONE_COMPOSE_RUNNING:=false} : ${SCM:=scm} +# create temp directory for test data; only once, even if testlib.sh is sourced again +if [[ -z "${TEST_DATA_DIR:-}" ]] && [[ "${KEEP_RUNNING:-false}" == "false" ]]; then + export TEST_DATA_DIR="$(mktemp -d "${TMPDIR:-/tmp}"/robot-data-XXXXXX)" + chmod go+rx "${TEST_DATA_DIR}" + _compose_delete_test_data() { + rm -frv "${TEST_DATA_DIR}" + } + + trap _compose_cleanup EXIT HUP INT TERM +fi + +_compose_cleanup() { + if [[ "${OZONE_COMPOSE_RUNNING}" == "true" ]]; then + stop_docker_env || true + fi + if [[ "$(type -t _compose_delete_test_data || true)" == "function" ]]; then + _compose_delete_test_data + fi +} + ## @description create results directory, purging any prior data create_results_dir() { #delete previous results @@ -138,15 +159,15 @@ start_docker_env(){ create_results_dir export OZONE_SAFEMODE_MIN_DATANODES="${datanode_count}" - docker-compose --ansi never down - - trap stop_docker_env EXIT HUP INT TERM + docker-compose --ansi never down --remove-orphans opts="" if has_scalable_datanode; then opts="--scale datanode=${datanode_count}" fi + OZONE_COMPOSE_RUNNING=true + trap _compose_cleanup EXIT HUP INT TERM docker-compose --ansi never up -d $opts wait_for_safemode_exit @@ -184,11 +205,11 @@ execute_robot_test(){ local output_name=$(get_output_name) # find unique filename - declare -i i=0 - OUTPUT_FILE="robot-${output_name}1.xml" - while [[ -f $RESULT_DIR/$OUTPUT_FILE ]]; do - let ++i - OUTPUT_FILE="robot-${output_name}${i}.xml" + for ((i=1; i<1000; i++)); do + OUTPUT_FILE="robot-${output_name}$(printf "%03d" ${i}).xml" + if [[ ! -f $RESULT_DIR/$OUTPUT_FILE ]]; then + break; + fi done SMOKETEST_DIR_INSIDE="${OZONE_DIR:-/opt/hadoop}/smoketest" @@ -200,7 +221,7 @@ execute_robot_test(){ # shellcheck disable=SC2068 docker-compose exec -T "$CONTAINER" mkdir -p "$RESULT_DIR_INSIDE" \ && docker-compose exec -T "$CONTAINER" robot \ - -v KEY_NAME:"${OZONE_BUCKET_KEY_NAME}" \ + -v ENCRYPTION_KEY:"${OZONE_BUCKET_KEY_NAME}" \ -v OM_HA_PARAM:"${OM_HA_PARAM}" \ -v OM_SERVICE_ID:"${OM_SERVICE_ID:-om}" \ -v OZONE_DIR:"${OZONE_DIR}" \ @@ -367,7 +388,8 @@ stop_docker_env(){ down_repeats=3 for i in $(seq 1 $down_repeats) do - if docker-compose --ansi never down; then + if docker-compose --ansi never --profile "*" down --remove-orphans; then + OZONE_COMPOSE_RUNNING=false return fi if [[ ${i} -eq 1 ]]; then @@ -398,7 +420,7 @@ run_rebot() { shift 2 - local tempdir="$(mktemp -d --suffix rebot -p "${output_dir}")" + local tempdir="$(mktemp -d "${output_dir}"/rebot-XXXXXX)" #Should be writeable from the docker containers where user is different. chmod a+wx "${tempdir}" if docker run --rm -v "${input_dir}":/rebot-input -v "${tempdir}":/rebot-output -w /rebot-input \ @@ -517,9 +539,13 @@ fix_data_dir_permissions() { ## @param `ozone` image version prepare_for_binary_image() { local v=$1 + local default_image="${docker.ozone.image}" # set at build-time from Maven property + local default_flavor="${docker.ozone.image.flavor}" # set at build-time from Maven property + local image="${OZONE_IMAGE:-${default_image}}" # may be specified by user running the test + local flavor="${OZONE_IMAGE_FLAVOR:-${default_flavor}}" # may be specified by user running the test export OZONE_DIR=/opt/ozone - export OZONE_IMAGE="apache/ozone:${v}" + export OZONE_TEST_IMAGE="${image}:${v}${flavor}" } ## @description Define variables required for using `ozone-runner` docker image @@ -539,7 +565,7 @@ get_runner_image_spec() { ## @param `ozone-runner` image version (optional) prepare_for_runner_image() { export OZONE_DIR=/opt/hadoop - export OZONE_IMAGE="$(get_runner_image_spec "$@")" + export OZONE_TEST_IMAGE="$(get_runner_image_spec "$@")" } ## @description Executing the Ozone Debug CLI related robot tests diff --git a/hadoop-ozone/dist/src/main/compose/upgrade/compose/ha/.env b/hadoop-ozone/dist/src/main/compose/upgrade/compose/ha/.env index 4e01ec92416..2625c4fbe90 100644 --- a/hadoop-ozone/dist/src/main/compose/upgrade/compose/ha/.env +++ b/hadoop-ozone/dist/src/main/compose/upgrade/compose/ha/.env @@ -14,11 +14,12 @@ # See the License for the specific language governing permissions and # limitations under the License. +HADOOP_IMAGE=apache/hadoop HADOOP_VERSION=${hadoop.version} HDDS_VERSION=${hdds.version} OZONE_RUNNER_VERSION=${docker.ozone-runner.version} OZONE_RUNNER_IMAGE=apache/ozone-runner -OZONE_IMAGE=apache/ozone-runner:${docker.ozone-runner.version} +OZONE_TEST_IMAGE=apache/ozone-runner:${docker.ozone-runner.version} OZONE_TESTKRB5_IMAGE=${docker.ozone-testkr5b.image} OZONE_DIR=/opt/hadoop OZONE_VOLUME=./data diff --git a/hadoop-ozone/dist/src/main/compose/upgrade/compose/ha/docker-compose.yaml b/hadoop-ozone/dist/src/main/compose/upgrade/compose/ha/docker-compose.yaml index 84ae48fbbc3..8235f213749 100644 --- a/hadoop-ozone/dist/src/main/compose/upgrade/compose/ha/docker-compose.yaml +++ b/hadoop-ozone/dist/src/main/compose/upgrade/compose/ha/docker-compose.yaml @@ -20,7 +20,7 @@ x-common-config: env_file: - docker-config - ../../../common/security.conf - image: ${OZONE_IMAGE} + image: ${OZONE_TEST_IMAGE} dns_search: . x-environment: @@ -67,7 +67,7 @@ x-volumes: services: kdc: - command: ["krb5kdc","-n"] + command: ["/opt/hadoop/compose/common/init-kdc.sh"] hostname: kdc image: ${OZONE_TESTKRB5_IMAGE} networks: @@ -83,7 +83,7 @@ services: - docker-config environment: HADOOP_CONF_DIR: /opt/hadoop/etc/hadoop - image: apache/hadoop:${HADOOP_VERSION} + image: ${HADOOP_IMAGE}:${HADOOP_VERSION} networks: net: ipv4_address: 10.9.0.3 diff --git a/hadoop-ozone/dist/src/main/compose/upgrade/compose/ha/docker-config b/hadoop-ozone/dist/src/main/compose/upgrade/compose/ha/docker-config index a1b6da80c4b..d06d3279dc9 100644 --- a/hadoop-ozone/dist/src/main/compose/upgrade/compose/ha/docker-config +++ b/hadoop-ozone/dist/src/main/compose/upgrade/compose/ha/docker-config @@ -35,7 +35,7 @@ OZONE-SITE.XML_ozone.scm.primordial.node.id=scm1 OZONE-SITE.XML_ozone.scm.pipeline.creation.interval=30s OZONE-SITE.XML_ozone.scm.pipeline.owner.container.count=1 -OZONE-SITE.XML_ozone.scm.datanode.id.dir=/data +OZONE-SITE.XML_ozone.scm.datanode.id.dir=/data/metadata OZONE-SITE.XML_ozone.scm.container.size=1GB OZONE-SITE.XML_hdds.datanode.dir=/data/hdds OZONE-SITE.XML_hdds.datanode.volume.min.free.space=100MB diff --git a/hadoop-ozone/dist/src/main/compose/upgrade/compose/non-ha/.env b/hadoop-ozone/dist/src/main/compose/upgrade/compose/non-ha/.env index 616f960b3e4..babe87a492a 100644 --- a/hadoop-ozone/dist/src/main/compose/upgrade/compose/non-ha/.env +++ b/hadoop-ozone/dist/src/main/compose/upgrade/compose/non-ha/.env @@ -17,6 +17,6 @@ HDDS_VERSION=${hdds.version} OZONE_RUNNER_VERSION=${docker.ozone-runner.version} OZONE_RUNNER_IMAGE=apache/ozone-runner -OZONE_IMAGE=apache/ozone-runner:${docker.ozone-runner.version} +OZONE_TEST_IMAGE=apache/ozone-runner:${docker.ozone-runner.version} OZONE_DIR=/opt/hadoop OZONE_VOLUME=./data diff --git a/hadoop-ozone/dist/src/main/compose/upgrade/compose/non-ha/docker-compose.yaml b/hadoop-ozone/dist/src/main/compose/upgrade/compose/non-ha/docker-compose.yaml index 28b3d922f71..7aea9af378e 100644 --- a/hadoop-ozone/dist/src/main/compose/upgrade/compose/non-ha/docker-compose.yaml +++ b/hadoop-ozone/dist/src/main/compose/upgrade/compose/non-ha/docker-compose.yaml @@ -19,7 +19,7 @@ x-common-config: &common-config env_file: - docker-config - image: ${OZONE_IMAGE} + image: ${OZONE_TEST_IMAGE} x-environment: &environment diff --git a/hadoop-ozone/dist/src/main/compose/upgrade/compose/non-ha/docker-config b/hadoop-ozone/dist/src/main/compose/upgrade/compose/non-ha/docker-config index 88126ddf2cb..ce4a8807e54 100644 --- a/hadoop-ozone/dist/src/main/compose/upgrade/compose/non-ha/docker-config +++ b/hadoop-ozone/dist/src/main/compose/upgrade/compose/non-ha/docker-config @@ -25,7 +25,7 @@ OZONE-SITE.XML_ozone.scm.datanode.ratis.volume.free-space.min=10MB OZONE-SITE.XML_ozone.scm.pipeline.creation.interval=30s OZONE-SITE.XML_ozone.scm.pipeline.owner.container.count=1 OZONE-SITE.XML_ozone.scm.names=scm -OZONE-SITE.XML_ozone.scm.datanode.id.dir=/data +OZONE-SITE.XML_ozone.scm.datanode.id.dir=/data/metadata OZONE-SITE.XML_ozone.scm.block.client.address=scm OZONE-SITE.XML_ozone.scm.container.size=1GB OZONE-SITE.XML_ozone.scm.client.address=scm diff --git a/hadoop-ozone/dist/src/main/compose/upgrade/compose/om-ha/.env b/hadoop-ozone/dist/src/main/compose/upgrade/compose/om-ha/.env index 4d1c35c3b2d..85c422b5ad7 100644 --- a/hadoop-ozone/dist/src/main/compose/upgrade/compose/om-ha/.env +++ b/hadoop-ozone/dist/src/main/compose/upgrade/compose/om-ha/.env @@ -17,7 +17,7 @@ HDDS_VERSION=${hdds.version} OZONE_RUNNER_VERSION=${docker.ozone-runner.version} OZONE_RUNNER_IMAGE=apache/ozone-runner -OZONE_IMAGE=apache/ozone-runner:${docker.ozone-runner.version} +OZONE_TEST_IMAGE=apache/ozone-runner:${docker.ozone-runner.version} OZONE_DIR=/opt/hadoop OZONE_VOLUME=./data OM_SERVICE_ID=omservice diff --git a/hadoop-ozone/dist/src/main/compose/upgrade/compose/om-ha/docker-compose.yaml b/hadoop-ozone/dist/src/main/compose/upgrade/compose/om-ha/docker-compose.yaml index b7bf1fc4983..880b36ff2b3 100644 --- a/hadoop-ozone/dist/src/main/compose/upgrade/compose/om-ha/docker-compose.yaml +++ b/hadoop-ozone/dist/src/main/compose/upgrade/compose/om-ha/docker-compose.yaml @@ -19,7 +19,7 @@ x-common-config: &common-config env_file: - docker-config - image: ${OZONE_IMAGE} + image: ${OZONE_TEST_IMAGE} x-environment: &environment diff --git a/hadoop-ozone/dist/src/main/compose/upgrade/compose/om-ha/docker-config b/hadoop-ozone/dist/src/main/compose/upgrade/compose/om-ha/docker-config index 77fa2b40ee4..a049ba5f012 100644 --- a/hadoop-ozone/dist/src/main/compose/upgrade/compose/om-ha/docker-config +++ b/hadoop-ozone/dist/src/main/compose/upgrade/compose/om-ha/docker-config @@ -27,7 +27,7 @@ OZONE-SITE.XML_ozone.om.ratis.enable=true OZONE-SITE.XML_ozone.scm.pipeline.creation.interval=30s OZONE-SITE.XML_ozone.scm.pipeline.owner.container.count=1 OZONE-SITE.XML_ozone.scm.names=scm -OZONE-SITE.XML_ozone.scm.datanode.id.dir=/data +OZONE-SITE.XML_ozone.scm.datanode.id.dir=/data/metadata OZONE-SITE.XML_ozone.scm.block.client.address=scm OZONE-SITE.XML_ozone.scm.container.size=1GB OZONE-SITE.XML_ozone.scm.client.address=scm diff --git a/hadoop-ozone/dist/src/main/compose/upgrade/test.sh b/hadoop-ozone/dist/src/main/compose/upgrade/test.sh index 6fc4763631b..18930538029 100755 --- a/hadoop-ozone/dist/src/main/compose/upgrade/test.sh +++ b/hadoop-ozone/dist/src/main/compose/upgrade/test.sh @@ -15,7 +15,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -#suite:compat +#suite:upgrade TEST_DIR=$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null && pwd ) @@ -36,7 +36,8 @@ RESULT_DIR="$ALL_RESULT_DIR" create_results_dir # This is the version of Ozone that should use the runner image to run the # code that was built. Other versions will pull images from docker hub. export OZONE_CURRENT_VERSION="${ozone.version}" -run_test ha non-rolling-upgrade 1.4.0 "$OZONE_CURRENT_VERSION" +run_test ha non-rolling-upgrade 1.4.1 "$OZONE_CURRENT_VERSION" +# run_test ha non-rolling-upgrade 1.4.0 "$OZONE_CURRENT_VERSION" # run_test ha non-rolling-upgrade 1.3.0 "$OZONE_CURRENT_VERSION" # run_test ha non-rolling-upgrade 1.2.1 "$OZONE_CURRENT_VERSION" # run_test om-ha non-rolling-upgrade 1.1.0 "$OZONE_CURRENT_VERSION" diff --git a/hadoop-ozone/dist/src/main/compose/upgrade/testlib.sh b/hadoop-ozone/dist/src/main/compose/upgrade/testlib.sh index d2718d04b7d..bad0a704d39 100755 --- a/hadoop-ozone/dist/src/main/compose/upgrade/testlib.sh +++ b/hadoop-ozone/dist/src/main/compose/upgrade/testlib.sh @@ -56,7 +56,7 @@ prepare_for_image() { if [[ "$image_version" = "$OZONE_CURRENT_VERSION" ]]; then prepare_for_runner_image else - prepare_for_binary_image "${image_version}-rocky" + prepare_for_binary_image "${image_version}" fi } diff --git a/hadoop-ozone/dist/src/main/compose/xcompat/.env b/hadoop-ozone/dist/src/main/compose/xcompat/.env index a673b7f4655..11979d34326 100644 --- a/hadoop-ozone/dist/src/main/compose/xcompat/.env +++ b/hadoop-ozone/dist/src/main/compose/xcompat/.env @@ -17,5 +17,8 @@ HDDS_VERSION=${hdds.version} OZONE_RUNNER_VERSION=${docker.ozone-runner.version} OZONE_RUNNER_IMAGE=apache/ozone-runner +HADOOP_IMAGE=apache/hadoop HADOOP_VERSION=${hadoop.version} OZONE_TESTKRB5_IMAGE=${docker.ozone-testkr5b.image} +OZONE_IMAGE=${docker.ozone.image} +OZONE_IMAGE_FLAVOR="${docker.ozone.image.flavor}" diff --git a/hadoop-ozone/dist/src/main/compose/xcompat/clients.yaml b/hadoop-ozone/dist/src/main/compose/xcompat/clients.yaml index eda14353688..567845e0889 100644 --- a/hadoop-ozone/dist/src/main/compose/xcompat/clients.yaml +++ b/hadoop-ozone/dist/src/main/compose/xcompat/clients.yaml @@ -14,60 +14,58 @@ # See the License for the specific language governing permissions and # limitations under the License. +x-common-config: + &common-config + dns_search: . + env_file: + - docker-config + command: ["sleep","1000000"] + +x-old-config: + &old-config + <<: *common-config + volumes: + - ../..:/opt/ozone + - ../_keytabs:/etc/security/keytabs + - ./krb5.conf:/etc/krb5.conf + - ${TEST_DATA_DIR}:/testdata + +x-new-config: + &new-config + image: ${OZONE_RUNNER_IMAGE}:${OZONE_RUNNER_VERSION} + <<: *common-config + volumes: + - ../..:/opt/hadoop + - ../_keytabs:/etc/security/keytabs + - ./krb5.conf:/etc/krb5.conf + - ${TEST_DATA_DIR}:/testdata + services: old_client_1_0_0: - image: apache/ozone:1.0.0-rocky - env_file: - - docker-config - volumes: - - ../..:/opt/ozone - - ../_keytabs:/etc/security/keytabs - - ./krb5.conf:/etc/krb5.conf - command: ["sleep","1000000"] + image: ${OZONE_IMAGE}:1.0.0${OZONE_IMAGE_FLAVOR} + <<: *old-config + old_client_1_1_0: - image: apache/ozone:1.1.0-rocky - env_file: - - docker-config - volumes: - - ../..:/opt/ozone - - ../_keytabs:/etc/security/keytabs - - ./krb5.conf:/etc/krb5.conf - command: ["sleep","1000000"] + image: ${OZONE_IMAGE}:1.1.0${OZONE_IMAGE_FLAVOR} + <<: *old-config + old_client_1_2_1: - image: apache/ozone:1.2.1-rocky - env_file: - - docker-config - volumes: - - ../..:/opt/ozone - - ../_keytabs:/etc/security/keytabs - - ./krb5.conf:/etc/krb5.conf - command: ["sleep","1000000"] + image: ${OZONE_IMAGE}:1.2.1${OZONE_IMAGE_FLAVOR} + <<: *old-config + old_client_1_3_0: - image: apache/ozone:1.3.0-rocky - env_file: - - docker-config - volumes: - - ../..:/opt/ozone - - ../_keytabs:/etc/security/keytabs - - ./krb5.conf:/etc/krb5.conf - command: ["sleep","1000000"] + image: ${OZONE_IMAGE}:1.3.0${OZONE_IMAGE_FLAVOR} + <<: *old-config + old_client_1_4_0: - image: apache/ozone:1.4.0-rocky - env_file: - - docker-config - volumes: - - ../..:/opt/ozone - - ../_keytabs:/etc/security/keytabs - - ./krb5.conf:/etc/krb5.conf - command: ["sleep","1000000"] + image: ${OZONE_IMAGE}:1.4.0${OZONE_IMAGE_FLAVOR} + <<: *old-config + + old_client_1_4_1: + image: ${OZONE_IMAGE}:1.4.1${OZONE_IMAGE_FLAVOR} + <<: *old-config + new_client: - image: ${OZONE_RUNNER_IMAGE}:${OZONE_RUNNER_VERSION} - env_file: - - docker-config - volumes: - - ../..:/opt/hadoop - - ../_keytabs:/etc/security/keytabs - - ./krb5.conf:/etc/krb5.conf + <<: *new-config environment: OZONE_OPTS: - command: ["sleep","1000000"] diff --git a/hadoop-ozone/dist/src/main/compose/xcompat/docker-config b/hadoop-ozone/dist/src/main/compose/xcompat/docker-config index 1a61aaf4f7e..746b2b6e943 100644 --- a/hadoop-ozone/dist/src/main/compose/xcompat/docker-config +++ b/hadoop-ozone/dist/src/main/compose/xcompat/docker-config @@ -32,7 +32,7 @@ OZONE-SITE.XML_ozone.scm.block.client.address=scm OZONE-SITE.XML_ozone.scm.client.address=scm OZONE-SITE.XML_ozone.scm.container.size=1GB OZONE-SITE.XML_ozone.scm.datanode.ratis.volume.free-space.min=10MB -OZONE-SITE.XML_ozone.scm.datanode.id.dir=/data +OZONE-SITE.XML_ozone.scm.datanode.id.dir=/data/metadata OZONE-SITE.XML_ozone.scm.names=scm OZONE-SITE.XML_ozone.scm.pipeline.creation.interval=30s OZONE-SITE.XML_ozone.scm.pipeline.owner.container.count=1 diff --git a/hadoop-ozone/dist/src/main/compose/xcompat/new-cluster.yaml b/hadoop-ozone/dist/src/main/compose/xcompat/new-cluster.yaml index 32059140ce9..275338d7e70 100644 --- a/hadoop-ozone/dist/src/main/compose/xcompat/new-cluster.yaml +++ b/hadoop-ozone/dist/src/main/compose/xcompat/new-cluster.yaml @@ -34,9 +34,9 @@ services: volumes: - ../..:/opt/hadoop - ../_keytabs:/etc/security/keytabs - command: [ "krb5kdc","-n" ] + command: ["/opt/hadoop/compose/common/init-kdc.sh"] kms: - image: apache/hadoop:${HADOOP_VERSION} + image: ${HADOOP_IMAGE}:${HADOOP_VERSION} hostname: kms dns_search: . ports: diff --git a/hadoop-ozone/dist/src/main/compose/xcompat/old-cluster.yaml b/hadoop-ozone/dist/src/main/compose/xcompat/old-cluster.yaml index d1b6e56a084..e3df1b3dda0 100644 --- a/hadoop-ozone/dist/src/main/compose/xcompat/old-cluster.yaml +++ b/hadoop-ozone/dist/src/main/compose/xcompat/old-cluster.yaml @@ -17,7 +17,7 @@ # reusable fragments (see https://docs.docker.com/compose/compose-file/#extension-fields) x-old-config: &old-config - image: apache/ozone:${OZONE_VERSION}-rocky + image: ${OZONE_IMAGE}:${OZONE_VERSION}${OZONE_IMAGE_FLAVOR} dns_search: . env_file: - docker-config @@ -34,9 +34,9 @@ services: volumes: - ../..:/opt/ozone - ../_keytabs:/etc/security/keytabs - command: [ "krb5kdc","-n" ] + command: ["/opt/ozone/compose/common/init-kdc.sh"] kms: - image: apache/hadoop:${HADOOP_VERSION} + image: ${HADOOP_IMAGE}:${HADOOP_VERSION} hostname: kms dns_search: . ports: diff --git a/hadoop-ozone/dist/src/main/compose/xcompat/test.sh b/hadoop-ozone/dist/src/main/compose/xcompat/test.sh index 8774cf2f632..e452463a066 100755 --- a/hadoop-ozone/dist/src/main/compose/xcompat/test.sh +++ b/hadoop-ozone/dist/src/main/compose/xcompat/test.sh @@ -21,9 +21,10 @@ COMPOSE_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" export COMPOSE_DIR basename=$(basename ${COMPOSE_DIR}) -current_version="${ozone.version}" +# version is used in bucket name, which does not allow uppercase +current_version="$(echo "${ozone.version}" | sed -e 's/-SNAPSHOT//' | tr '[:upper:]' '[:lower:]')" # TODO: debug acceptance test failures for client versions 1.0.0 on secure clusters -old_versions="1.1.0 1.2.1 1.3.0 1.4.0" # container is needed for each version in clients.yaml +old_versions="1.1.0 1.2.1 1.3.0 1.4.0 1.4.1" # container is needed for each version in clients.yaml # shellcheck source=hadoop-ozone/dist/src/main/compose/testlib.sh source "${COMPOSE_DIR}/../testlib.sh" @@ -31,16 +32,17 @@ source "${COMPOSE_DIR}/../testlib.sh" export SECURITY_ENABLED=true : ${OZONE_BUCKET_KEY_NAME:=key1} -old_client() { - OZONE_DIR=/opt/ozone - container=${client} - "$@" -} +echo 'Compatibility Test' > "${TEST_DATA_DIR}"/small + +client() { + if [[ "${client_version}" == "${current_version}" ]]; then + OZONE_DIR=/opt/hadoop + container=new_client + else + OZONE_DIR=/opt/ozone + container="old_client_${client_version//./_}" + fi -new_client() { - OZONE_DIR=/opt/hadoop - container=new_client - client_version=${current_version} "$@" } @@ -49,106 +51,75 @@ _kinit() { } _init() { + container=scm _kinit execute_command_in_container ${container} ozone freon ockg -n1 -t1 -p warmup } _write() { _kinit - execute_robot_test ${container} -N "xcompat-cluster-${cluster_version}-client-${client_version}-write" -v SUFFIX:${client_version} compatibility/write.robot + execute_robot_test ${container} -N "xcompat-cluster-${cluster_version}-client-${client_version}-write" \ + -v CLIENT_VERSION:${client_version} \ + -v CLUSTER_VERSION:${cluster_version} \ + -v TEST_DATA_DIR:/testdata \ + compatibility/write.robot } _read() { _kinit local data_version="$1" - execute_robot_test ${container} -N "xcompat-cluster-${cluster_version}-client-${client_version}-read-${data_version}" -v SUFFIX:${data_version} compatibility/read.robot -} - -test_bucket_encryption() { - _kinit - execute_robot_test ${container} -N "xcompat-cluster-${cluster_version}-client-${client_version}" -v SUFFIX:${client_version} security/bucket-encryption.robot + execute_robot_test ${container} -N "xcompat-cluster-${cluster_version}-client-${client_version}-read-${data_version}" \ + -v CLIENT_VERSION:${client_version} \ + -v CLUSTER_VERSION:${cluster_version} \ + -v DATA_VERSION:${data_version} \ + -v TEST_DATA_DIR:/testdata \ + compatibility/read.robot } test_cross_compatibility() { - echo "Starting cluster with COMPOSE_FILE=${COMPOSE_FILE}" + echo "Starting ${cluster_version} cluster with COMPOSE_FILE=${COMPOSE_FILE}" - OZONE_KEEP_RESULTS=true start_docker_env + OZONE_KEEP_RESULTS=true start_docker_env 5 execute_command_in_container kms hadoop key create ${OZONE_BUCKET_KEY_NAME} - new_client test_bucket_encryption - - container=scm _kinit - execute_command_in_container scm ozone freon ockg -n1 -t1 -p warmup - new_client _write - new_client _read ${current_version} - for client_version in "$@"; do - client="old_client_${client_version//./_}" + _init - old_client test_bucket_encryption + # first write with client matching cluster version + client_version="${cluster_version}" client _write - old_client _write - old_client _read ${client_version} - - old_client _read ${current_version} - new_client _read ${client_version} + for client_version in "$@"; do + # skip write, since already done + if [[ "${client_version}" == "${cluster_version}" ]]; then + continue + fi + client _write done - KEEP_RUNNING=false stop_docker_env -} - -test_ec_cross_compatibility() { - echo "Running Erasure Coded storage backward compatibility tests." - # local cluster_versions_with_ec="1.3.0 1.4.0 ${current_version}" - local cluster_versions_with_ec="${current_version}" # until HDDS-11334 - # TODO: debug acceptance test failures for client versions 1.0.0 on secure clusters - local non_ec_client_versions="1.1.0 1.2.1" - - for cluster_version in ${cluster_versions_with_ec}; do - export COMPOSE_FILE=new-cluster.yaml:clients.yaml cluster_version=${cluster_version} - OZONE_KEEP_RESULTS=true start_docker_env 5 - - echo -n "Generating data locally... " - dd if=/dev/urandom of=/tmp/1mb bs=1048576 count=1 >/dev/null 2>&1 - dd if=/dev/urandom of=/tmp/2mb bs=1048576 count=2 >/dev/null 2>&1 - dd if=/dev/urandom of=/tmp/3mb bs=1048576 count=3 >/dev/null 2>&1 - echo "done" - echo -n "Copy data into client containers... " - for container in $(docker ps --format '{{.Names}}' | grep client); do - docker cp /tmp/1mb ${container}:/tmp/1mb - docker cp /tmp/2mb ${container}:/tmp/2mb - docker cp /tmp/3mb ${container}:/tmp/3mb - done - echo "done" - rm -f /tmp/1mb /tmp/2mb /tmp/3mb + for client_version in "$@"; do + for data_version in $(echo "$client_version" "$cluster_version" "$current_version" | xargs -n1 | sort -u); do + # do not test old-only scenario + if [[ "${cluster_version}" != "${current_version}" ]] \ + && [[ "${client_version}" != "${current_version}" ]] \ + && [[ "${data_version}" != "${current_version}" ]]; then + continue + fi - local prefix=$(LC_CTYPE=C tr -dc '[:alnum:]' < /dev/urandom | head -c 5 | tr '[:upper:]' '[:lower:]') - OZONE_DIR=/opt/hadoop - new_client _kinit - execute_robot_test new_client --include setup-ec-data -N "xcompat-cluster-${cluster_version}-setup-data" -v prefix:"${prefix}" ec/backward-compat.robot - OZONE_DIR=/opt/ozone - - for client_version in ${non_ec_client_versions}; do - client="old_client_${client_version//./_}" - unset OUTPUT_PATH - container="${client}" _kinit - execute_robot_test "${client}" --include test-ec-compat -N "xcompat-cluster-${cluster_version}-client-${client_version}-read-${cluster_version}" -v prefix:"${prefix}" ec/backward-compat.robot + client _read ${data_version} done - - KEEP_RUNNING=false stop_docker_env done + + KEEP_RUNNING=false stop_docker_env } create_results_dir # current cluster with various clients -COMPOSE_FILE=new-cluster.yaml:clients.yaml cluster_version=${current_version} test_cross_compatibility ${old_versions} +COMPOSE_FILE=new-cluster.yaml:clients.yaml cluster_version=${current_version} test_cross_compatibility ${old_versions} ${current_version} # old cluster with clients: same version and current version for cluster_version in ${old_versions}; do export OZONE_VERSION=${cluster_version} - COMPOSE_FILE=old-cluster.yaml:clients.yaml test_cross_compatibility ${cluster_version} + COMPOSE_FILE=old-cluster.yaml:clients.yaml test_cross_compatibility ${cluster_version} ${current_version} done - -test_ec_cross_compatibility diff --git a/hadoop-ozone/dist/src/main/k8s/definitions/ozone/config.yaml b/hadoop-ozone/dist/src/main/k8s/definitions/ozone/config.yaml index 88a36835c29..f451a893eba 100644 --- a/hadoop-ozone/dist/src/main/k8s/definitions/ozone/config.yaml +++ b/hadoop-ozone/dist/src/main/k8s/definitions/ozone/config.yaml @@ -20,7 +20,7 @@ metadata: data: OZONE-SITE.XML_hdds.datanode.dir: "/data/storage" - OZONE-SITE.XML_ozone.scm.datanode.id.dir: "/data" + OZONE-SITE.XML_ozone.scm.datanode.id.dir: "/data/metadata" OZONE-SITE.XML_ozone.metadata.dirs: "/data/metadata" OZONE-SITE.XML_ozone.scm.block.client.address: "scm-0.scm" OZONE-SITE.XML_ozone.om.address: "om-0.om" diff --git a/hadoop-ozone/dist/src/main/k8s/examples/getting-started/config-configmap.yaml b/hadoop-ozone/dist/src/main/k8s/examples/getting-started/config-configmap.yaml index b3acc6f1d22..bb0608dac82 100644 --- a/hadoop-ozone/dist/src/main/k8s/examples/getting-started/config-configmap.yaml +++ b/hadoop-ozone/dist/src/main/k8s/examples/getting-started/config-configmap.yaml @@ -19,13 +19,19 @@ kind: ConfigMap metadata: name: config data: + HTTPFS-SITE.XML_httpfs.hadoop.config.dir: /opt/hadoop/etc/config + CORE-SITE.XML_fs.defaultFS: ofs://om/ + CORE-SITE.XML_fs.trash.interval: "1" + HTTPFS-SITE.XML_httpfs.proxyuser.hadoop.hosts: "*" + HTTPFS-SITE.XML_httpfs.proxyuser.hadoop.groups: "*" OZONE-SITE.XML_hdds.datanode.dir: /data/storage - OZONE-SITE.XML_ozone.scm.datanode.id.dir: /data + OZONE-SITE.XML_ozone.scm.datanode.id.dir: /data/metadata OZONE-SITE.XML_ozone.metadata.dirs: /data/metadata OZONE-SITE.XML_ozone.scm.block.client.address: scm-0.scm OZONE-SITE.XML_ozone.om.address: om-0.om OZONE-SITE.XML_ozone.scm.client.address: scm-0.scm OZONE-SITE.XML_ozone.scm.names: scm-0.scm + OZONE-SITE.XML_ozone.recon.address: recon-0.recon OZONE-SITE.XML_hdds.scm.safemode.min.datanode: "3" OZONE-SITE.XML_ozone.datanode.pipeline.limit: "1" OZONE-SITE.XML_dfs.datanode.use.datanode.hostname: "true" diff --git a/hadoop-ozone/dist/src/main/k8s/examples/getting-started/httpfs-public-service.yaml b/hadoop-ozone/dist/src/main/k8s/examples/getting-started/httpfs-public-service.yaml new file mode 100644 index 00000000000..d8586250553 --- /dev/null +++ b/hadoop-ozone/dist/src/main/k8s/examples/getting-started/httpfs-public-service.yaml @@ -0,0 +1,28 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: v1 +kind: Service +metadata: + name: httpfs-public +spec: + ports: + - port: 14000 + name: rest + selector: + app: ozone + component: httpfs + type: NodePort diff --git a/hadoop-ozone/dist/src/main/k8s/examples/getting-started/httpfs-service.yaml b/hadoop-ozone/dist/src/main/k8s/examples/getting-started/httpfs-service.yaml new file mode 100644 index 00000000000..0ab49c2d72e --- /dev/null +++ b/hadoop-ozone/dist/src/main/k8s/examples/getting-started/httpfs-service.yaml @@ -0,0 +1,28 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: v1 +kind: Service +metadata: + name: httpfs +spec: + ports: + - port: 14000 + name: rest + clusterIP: None + selector: + app: ozone + component: httpfs diff --git a/hadoop-ozone/dist/src/main/k8s/examples/getting-started/httpfs-statefulset.yaml b/hadoop-ozone/dist/src/main/k8s/examples/getting-started/httpfs-statefulset.yaml new file mode 100644 index 00000000000..7bca21585c1 --- /dev/null +++ b/hadoop-ozone/dist/src/main/k8s/examples/getting-started/httpfs-statefulset.yaml @@ -0,0 +1,55 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: httpfs + labels: + app.kubernetes.io/component: ozone +spec: + selector: + matchLabels: + app: ozone + component: httpfs + serviceName: httpfs + replicas: 1 + template: + metadata: + labels: + app: ozone + component: httpfs + spec: + containers: + - name: httpfs + image: '@docker.image@' + args: + - ozone + - httpfs + livenessProbe: + httpGet: + path: /webhdfs/v1/?op=LISTSTATUS&user.name=hadoop + port: 14000 + initialDelaySeconds: 30 + envFrom: + - configMapRef: + name: config + volumeMounts: + - name: data + mountPath: /data + volumes: + - name: data + emptyDir: {} diff --git a/hadoop-ozone/dist/src/main/k8s/examples/getting-started/kustomization.yaml b/hadoop-ozone/dist/src/main/k8s/examples/getting-started/kustomization.yaml index 3059b9c801e..4f60be17872 100644 --- a/hadoop-ozone/dist/src/main/k8s/examples/getting-started/kustomization.yaml +++ b/hadoop-ozone/dist/src/main/k8s/examples/getting-started/kustomization.yaml @@ -24,7 +24,13 @@ resources: - s3g-statefulset.yaml - scm-service.yaml - scm-statefulset.yaml +- httpfs-service.yaml +- httpfs-statefulset.yaml +- recon-service.yaml +- recon-statefulset.yaml - datanode-public-service.yaml - om-public-service.yaml - s3g-public-service.yaml - scm-public-service.yaml +- httpfs-public-service.yaml +- recon-public-service.yaml diff --git a/hadoop-ozone/dist/src/main/k8s/examples/getting-started/recon-public-service.yaml b/hadoop-ozone/dist/src/main/k8s/examples/getting-started/recon-public-service.yaml new file mode 100644 index 00000000000..c737a02f446 --- /dev/null +++ b/hadoop-ozone/dist/src/main/k8s/examples/getting-started/recon-public-service.yaml @@ -0,0 +1,28 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: v1 +kind: Service +metadata: + name: recon-public +spec: + ports: + - port: 9888 + name: ui + selector: + app: ozone + component: recon + type: NodePort diff --git a/hadoop-ozone/dist/src/main/k8s/examples/getting-started/recon-service.yaml b/hadoop-ozone/dist/src/main/k8s/examples/getting-started/recon-service.yaml new file mode 100644 index 00000000000..9c52d393d55 --- /dev/null +++ b/hadoop-ozone/dist/src/main/k8s/examples/getting-started/recon-service.yaml @@ -0,0 +1,28 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: v1 +kind: Service +metadata: + name: recon +spec: + ports: + - port: 9888 + name: ui + clusterIP: None + selector: + app: ozone + component: recon diff --git a/hadoop-ozone/dist/src/main/k8s/examples/getting-started/recon-statefulset.yaml b/hadoop-ozone/dist/src/main/k8s/examples/getting-started/recon-statefulset.yaml new file mode 100644 index 00000000000..8b9ee191d03 --- /dev/null +++ b/hadoop-ozone/dist/src/main/k8s/examples/getting-started/recon-statefulset.yaml @@ -0,0 +1,63 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: recon + labels: + app.kubernetes.io/component: ozone +spec: + selector: + matchLabels: + app: ozone + component: recon + serviceName: recon + replicas: 1 + template: + metadata: + labels: + app: ozone + component: recon + annotations: + prometheus.io/scrape: "true" + prometheus.io/port: "9888" + prometheus.io/path: /prom + spec: + securityContext: + fsGroup: 1000 + containers: + - name: recon + image: '@docker.image@' + args: + - ozone + - recon + env: + - name: WAITFOR + value: scm-0.scm:9876 + livenessProbe: + tcpSocket: + port: 9891 + initialDelaySeconds: 30 + envFrom: + - configMapRef: + name: config + volumeMounts: + - name: data + mountPath: /data + volumes: + - name: data + emptyDir: {} diff --git a/hadoop-ozone/dist/src/main/k8s/examples/minikube/config-configmap.yaml b/hadoop-ozone/dist/src/main/k8s/examples/minikube/config-configmap.yaml index b3acc6f1d22..2a0cf869a59 100644 --- a/hadoop-ozone/dist/src/main/k8s/examples/minikube/config-configmap.yaml +++ b/hadoop-ozone/dist/src/main/k8s/examples/minikube/config-configmap.yaml @@ -20,7 +20,7 @@ metadata: name: config data: OZONE-SITE.XML_hdds.datanode.dir: /data/storage - OZONE-SITE.XML_ozone.scm.datanode.id.dir: /data + OZONE-SITE.XML_ozone.scm.datanode.id.dir: /data/metadata OZONE-SITE.XML_ozone.metadata.dirs: /data/metadata OZONE-SITE.XML_ozone.scm.block.client.address: scm-0.scm OZONE-SITE.XML_ozone.om.address: om-0.om diff --git a/hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/config-configmap.yaml b/hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/config-configmap.yaml index 122382afdbd..1d85fe91f5c 100644 --- a/hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/config-configmap.yaml +++ b/hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/config-configmap.yaml @@ -20,7 +20,7 @@ metadata: name: config data: OZONE-SITE.XML_hdds.datanode.dir: /data/storage - OZONE-SITE.XML_ozone.scm.datanode.id.dir: /data + OZONE-SITE.XML_ozone.scm.datanode.id.dir: /data/metadata OZONE-SITE.XML_ozone.metadata.dirs: /data/metadata OZONE-SITE.XML_ozone.scm.block.client.address: scm-0.scm OZONE-SITE.XML_ozone.om.address: om-0.om diff --git a/hadoop-ozone/dist/src/main/k8s/examples/ozone-ha/config-configmap.yaml b/hadoop-ozone/dist/src/main/k8s/examples/ozone-ha/config-configmap.yaml index 61555e1eb56..193a0618ab4 100644 --- a/hadoop-ozone/dist/src/main/k8s/examples/ozone-ha/config-configmap.yaml +++ b/hadoop-ozone/dist/src/main/k8s/examples/ozone-ha/config-configmap.yaml @@ -20,7 +20,7 @@ metadata: name: config data: OZONE-SITE.XML_hdds.datanode.dir: /data/storage - OZONE-SITE.XML_ozone.scm.datanode.id.dir: /data + OZONE-SITE.XML_ozone.scm.datanode.id.dir: /data/metadata OZONE-SITE.XML_ozone.metadata.dirs: /data/metadata OZONE-SITE.XML_ozone.om.address: om-0.om OZONE-SITE.XML_hdds.scm.safemode.min.datanode: "3" diff --git a/hadoop-ozone/dist/src/main/k8s/examples/ozone/config-configmap.yaml b/hadoop-ozone/dist/src/main/k8s/examples/ozone/config-configmap.yaml index 92fe9166d03..85930731050 100644 --- a/hadoop-ozone/dist/src/main/k8s/examples/ozone/config-configmap.yaml +++ b/hadoop-ozone/dist/src/main/k8s/examples/ozone/config-configmap.yaml @@ -20,7 +20,7 @@ metadata: name: config data: OZONE-SITE.XML_hdds.datanode.dir: /data/storage - OZONE-SITE.XML_ozone.scm.datanode.id.dir: /data + OZONE-SITE.XML_ozone.scm.datanode.id.dir: /data/metadata OZONE-SITE.XML_ozone.metadata.dirs: /data/metadata OZONE-SITE.XML_ozone.scm.block.client.address: scm-0.scm OZONE-SITE.XML_ozone.om.address: om-0.om diff --git a/hadoop-ozone/dist/src/main/keytabs/HTTP.keytab b/hadoop-ozone/dist/src/main/keytabs/HTTP.keytab deleted file mode 100755 index 889de4410dc..00000000000 Binary files a/hadoop-ozone/dist/src/main/keytabs/HTTP.keytab and /dev/null differ diff --git a/hadoop-ozone/dist/src/main/keytabs/dn.keytab b/hadoop-ozone/dist/src/main/keytabs/dn.keytab deleted file mode 100755 index a3f03127c81..00000000000 Binary files a/hadoop-ozone/dist/src/main/keytabs/dn.keytab and /dev/null differ diff --git a/hadoop-ozone/dist/src/main/keytabs/hadoop.keytab b/hadoop-ozone/dist/src/main/keytabs/hadoop.keytab deleted file mode 100755 index 239f7271404..00000000000 Binary files a/hadoop-ozone/dist/src/main/keytabs/hadoop.keytab and /dev/null differ diff --git a/hadoop-ozone/dist/src/main/keytabs/httpfs.keytab b/hadoop-ozone/dist/src/main/keytabs/httpfs.keytab deleted file mode 100755 index 99b446f2178..00000000000 Binary files a/hadoop-ozone/dist/src/main/keytabs/httpfs.keytab and /dev/null differ diff --git a/hadoop-ozone/dist/src/main/keytabs/jhs.keytab b/hadoop-ozone/dist/src/main/keytabs/jhs.keytab deleted file mode 100755 index 458b42240a5..00000000000 Binary files a/hadoop-ozone/dist/src/main/keytabs/jhs.keytab and /dev/null differ diff --git a/hadoop-ozone/dist/src/main/keytabs/nm.keytab b/hadoop-ozone/dist/src/main/keytabs/nm.keytab deleted file mode 100755 index 8b325773acd..00000000000 Binary files a/hadoop-ozone/dist/src/main/keytabs/nm.keytab and /dev/null differ diff --git a/hadoop-ozone/dist/src/main/keytabs/om.keytab b/hadoop-ozone/dist/src/main/keytabs/om.keytab deleted file mode 100755 index fa4a1c59c06..00000000000 Binary files a/hadoop-ozone/dist/src/main/keytabs/om.keytab and /dev/null differ diff --git a/hadoop-ozone/dist/src/main/keytabs/recon.keytab b/hadoop-ozone/dist/src/main/keytabs/recon.keytab deleted file mode 100755 index 4bd3c9e38f4..00000000000 Binary files a/hadoop-ozone/dist/src/main/keytabs/recon.keytab and /dev/null differ diff --git a/hadoop-ozone/dist/src/main/keytabs/rm.keytab b/hadoop-ozone/dist/src/main/keytabs/rm.keytab deleted file mode 100755 index 17feed24128..00000000000 Binary files a/hadoop-ozone/dist/src/main/keytabs/rm.keytab and /dev/null differ diff --git a/hadoop-ozone/dist/src/main/keytabs/s3g.keytab b/hadoop-ozone/dist/src/main/keytabs/s3g.keytab deleted file mode 100755 index e7722c546aa..00000000000 Binary files a/hadoop-ozone/dist/src/main/keytabs/s3g.keytab and /dev/null differ diff --git a/hadoop-ozone/dist/src/main/keytabs/scm.keytab b/hadoop-ozone/dist/src/main/keytabs/scm.keytab deleted file mode 100755 index adb7cd58054..00000000000 Binary files a/hadoop-ozone/dist/src/main/keytabs/scm.keytab and /dev/null differ diff --git a/hadoop-ozone/dist/src/main/keytabs/testuser.keytab b/hadoop-ozone/dist/src/main/keytabs/testuser.keytab deleted file mode 100755 index add20797af6..00000000000 Binary files a/hadoop-ozone/dist/src/main/keytabs/testuser.keytab and /dev/null differ diff --git a/hadoop-ozone/dist/src/main/keytabs/testuser2.keytab b/hadoop-ozone/dist/src/main/keytabs/testuser2.keytab deleted file mode 100755 index 55a9167579a..00000000000 Binary files a/hadoop-ozone/dist/src/main/keytabs/testuser2.keytab and /dev/null differ diff --git a/hadoop-ozone/dist/src/main/keytabs/update-keytabs.sh b/hadoop-ozone/dist/src/main/keytabs/update-keytabs.sh deleted file mode 100755 index 5094a6bf857..00000000000 --- a/hadoop-ozone/dist/src/main/keytabs/update-keytabs.sh +++ /dev/null @@ -1,98 +0,0 @@ -#!/usr/bin/env bash -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -SCRIPT_DIR=$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null && pwd ) -set -ex - -export_keytab() { - kadmin.local -q "ktadd -norandkey -k /etc/security/keytabs/$2.keytab $1@EXAMPLE.COM" -} - -#this section supposed to be executed inside a docker image which already -#has these credentials -# -# the rest of the code executes this part inside a container -# -if [ "$1" == "internal" ]; then - rm /etc/security/keytabs/*.keytab - - export_keytab scm/scm scm - export_keytab HTTP/scm scm - export_keytab testuser/scm scm - export_keytab testuser2/scm scm - - export_keytab testuser/dn testuser - export_keytab testuser/httpfs testuser - export_keytab testuser/om testuser - export_keytab testuser/recon testuser - export_keytab testuser/s3g testuser - export_keytab testuser/scm testuser - - export_keytab testuser2/dn testuser2 - export_keytab testuser2/httpfs testuser2 - export_keytab testuser2/om testuser2 - export_keytab testuser2/recon testuser2 - export_keytab testuser2/s3g testuser2 - export_keytab testuser2/scm testuser2 - - export_keytab om/om om - export_keytab HTTP/om om - export_keytab testuser/om om - export_keytab testuser2/om om - - export_keytab s3g/s3g s3g - export_keytab HTTP/s3g s3g - export_keytab testuser/s3g s3g - export_keytab testuser2/s3g s3g - - export_keytab httpfs/httpfs httpfs - export_keytab HTTP/httpfs httpfs - export_keytab testuser/httpfs httpfs - export_keytab testuser2/httpfs httpfs - - export_keytab recon/recon recon - export_keytab HTTP/recon recon - export_keytab testuser/recon recon - export_keytab testuser2/recon recon - - export_keytab dn/dn dn - export_keytab HTTP/dn dn - export_keytab testuser/dn dn - export_keytab testuser2/dn dn - - export_keytab HTTP/scm HTTP - export_keytab HTTP/s3g HTTP - export_keytab HTTP/httpfs HTTP - export_keytab HTTP/ozone HTTP - - export_keytab hadoop/rm hadoop - - export_keytab rm/rm rm - export_keytab nm/nm nm - export_keytab jhs/jhs jhs - - - - chmod 755 /etc/security/keytabs/*.keytab - chown 1000. /etc/security/keytabs/*.keytab - exit 0 -fi - -TESTKRB5_IMAGE=$(mvn -f "$SCRIPT_DIR"/../../../pom.xml help:evaluate -Dexpression=docker.ozone-testkr5b.image -q -DforceStdout -Dscan=false) - -docker run -it --entrypoint=/etc/security/keytabs/update-keytabs.sh -v "$SCRIPT_DIR":/etc/security/keytabs $TESTKRB5_IMAGE internal - - diff --git a/hadoop-ozone/dist/src/main/license/bin/LICENSE.txt b/hadoop-ozone/dist/src/main/license/bin/LICENSE.txt index 9cb9202be0b..b291afc568a 100644 --- a/hadoop-ozone/dist/src/main/license/bin/LICENSE.txt +++ b/hadoop-ozone/dist/src/main/license/bin/LICENSE.txt @@ -300,6 +300,7 @@ Apache License 2.0 com.nimbusds:nimbus-jose-jwt com.squareup.okhttp3:okhttp com.squareup.okio:okio + com.squareup.okio:okio-jvm commons-beanutils:commons-beanutils commons-cli:commons-cli commons-codec:commons-codec @@ -314,6 +315,7 @@ Apache License 2.0 commons-validator:commons-validator commons-fileupload:commons-fileupload info.picocli:picocli + info.picocli:picocli-shell-jline3 io.dropwizard.metrics:metrics-core io.grpc:grpc-api io.grpc:grpc-context @@ -431,6 +433,8 @@ Apache License 2.0 org.jetbrains:annotations org.jetbrains.kotlin:kotlin-stdlib org.jetbrains.kotlin:kotlin-stdlib-common + org.jetbrains.kotlin:kotlin-stdlib-jdk7 + org.jetbrains.kotlin:kotlin-stdlib-jdk8 org.jboss.weld.servlet:weld-servlet-shaded org.jheaps:jheaps org.jooq:jooq @@ -476,6 +480,7 @@ BSD 3-Clause com.google.re2j:re2j com.jcraft:jsch com.thoughtworks.paranamer:paranamer + org.jline:jline3 org.ow2.asm:asm org.ow2.asm:asm-analysis org.ow2.asm:asm-commons diff --git a/hadoop-ozone/dist/src/main/license/jar-report.txt b/hadoop-ozone/dist/src/main/license/jar-report.txt index 1e07ec1a2c2..be48c1d1fe2 100644 --- a/hadoop-ozone/dist/src/main/license/jar-report.txt +++ b/hadoop-ozone/dist/src/main/license/jar-report.txt @@ -63,7 +63,7 @@ share/ozone/lib/hadoop-common.jar share/ozone/lib/hadoop-hdfs-client.jar share/ozone/lib/hadoop-hdfs.jar share/ozone/lib/hadoop-shaded-guava.jar -share/ozone/lib/hadoop-shaded-protobuf_3_7.jar +share/ozone/lib/hadoop-shaded-protobuf_3_25.jar share/ozone/lib/hdds-annotation-processing.jar share/ozone/lib/hdds-client.jar share/ozone/lib/hdds-common.jar @@ -134,6 +134,7 @@ share/ozone/lib/jersey-hk2.jar share/ozone/lib/jersey-media-jaxb.jar share/ozone/lib/jersey-media-json-jackson.jar share/ozone/lib/jersey-server.jar +share/ozone/lib/jettison.jar share/ozone/lib/jetty-client.jar share/ozone/lib/jetty-http.jar share/ozone/lib/jetty-io.jar @@ -150,6 +151,7 @@ share/ozone/lib/jgrapht-core.jar share/ozone/lib/jgrapht-ext.jar share/ozone/lib/jgraphx.jar share/ozone/lib/jheaps.jar +share/ozone/lib/jline.jar share/ozone/lib/jmespath-java.jar share/ozone/lib/jna.jar share/ozone/lib/jna-platform.jar @@ -202,6 +204,7 @@ share/ozone/lib/netty-tcnative-classes.Final.jar share/ozone/lib/netty-transport.Final.jar share/ozone/lib/netty-transport-classes-epoll.Final.jar share/ozone/lib/netty-transport-native-epoll.Final-linux-x86_64.jar +share/ozone/lib/netty-transport-native-epoll.Final.jar share/ozone/lib/netty-transport-native-unix-common.Final.jar share/ozone/lib/nimbus-jose-jwt.jar share/ozone/lib/okhttp.jar @@ -234,6 +237,7 @@ share/ozone/lib/ozone-s3gateway.jar share/ozone/lib/ozone-tools.jar share/ozone/lib/perfmark-api.jar share/ozone/lib/picocli.jar +share/ozone/lib/picocli-shell-jline3.jar share/ozone/lib/protobuf-java.jar share/ozone/lib/protobuf-java.jar share/ozone/lib/protobuf-java-util.jar diff --git a/hadoop-ozone/dist/src/main/license/src/licenses/IMPORTANT.md b/hadoop-ozone/dist/src/main/license/src/licenses/IMPORTANT.md index 2581412d320..c473ed2fe16 100644 --- a/hadoop-ozone/dist/src/main/license/src/licenses/IMPORTANT.md +++ b/hadoop-ozone/dist/src/main/license/src/licenses/IMPORTANT.md @@ -14,8 +14,8 @@ # Important -The files from this directory are not copied by automatically to the source distribution package. +The files from this directory are copied automatically to the source distribution package +via the `hadoop-ozone/dist/src/main/assemblies/ozone-src.xml` file. If you add any of the files to here, - * please also adjust `hadoop-ozone/dist/src/main/assemblies/ozone-src.xml` file. - * and copy the dependency to ../../bin/licenses (if it's included in the bin tar) \ No newline at end of file + * copy the dependency to ../../bin/licenses (if it's included in the bin tar) \ No newline at end of file diff --git a/hadoop-ozone/dist/src/main/smoketest/admincli/container.robot b/hadoop-ozone/dist/src/main/smoketest/admincli/container.robot index fae08991781..fa2156258e6 100644 --- a/hadoop-ozone/dist/src/main/smoketest/admincli/container.robot +++ b/hadoop-ozone/dist/src/main/smoketest/admincli/container.robot @@ -101,6 +101,35 @@ Report containers as JSON Should contain ${output} stats Should contain ${output} samples +List all containers + ${output} = Execute ozone admin container list --all + Should contain ${output} OPEN + +List all containers according to count (batchSize) + ${output} = Execute ozone admin container list --all --count 10 + Should contain ${output} OPEN + +List all containers from a particular container ID + ${output} = Execute ozone admin container list --all --start 1 + Should contain ${output} OPEN + +Close container + ${container} = Execute ozone admin container list --state OPEN | jq -r 'select(.replicationConfig.replicationFactor == "THREE") | .containerID' | head -1 + Execute ozone admin container close "${container}" + ${output} = Execute ozone admin container info "${container}" + Should contain ${output} CLOS + Wait until keyword succeeds 1min 10sec Container is closed ${container} + +Incomplete command + ${output} = Execute And Ignore Error ozone admin container + Should contain ${output} Incomplete command + Should contain ${output} list + Should contain ${output} info + Should contain ${output} create + Should contain ${output} close + Should contain ${output} report + Should contain ${output} upgrade + #List containers on unknown host # ${output} = Execute And Ignore Error ozone admin --verbose container list --scm unknown-host # Should contain ${output} Invalid host name diff --git a/hadoop-ozone/dist/src/main/smoketest/admincli/scmrole.robot b/hadoop-ozone/dist/src/main/smoketest/admincli/scmrole.robot index 29727548561..f1628939451 100644 --- a/hadoop-ozone/dist/src/main/smoketest/admincli/scmrole.robot +++ b/hadoop-ozone/dist/src/main/smoketest/admincli/scmrole.robot @@ -30,4 +30,8 @@ Run scm roles List scm roles as JSON ${output} = Execute ozone admin scm roles --json ${leader} = Execute echo '${output}' | jq -r '.[] | select(.raftPeerRole == "LEADER")' - Should Not Be Equal ${leader} ${EMPTY} \ No newline at end of file + Should Not Be Equal ${leader} ${EMPTY} + +List scm roles as TABLE + ${output} = Execute ozone admin scm roles --table + Should Match Regexp ${output} \\|.*LEADER.* \ No newline at end of file diff --git a/hadoop-ozone/dist/src/main/smoketest/balancer/testBalancer.robot b/hadoop-ozone/dist/src/main/smoketest/balancer/testBalancer.robot index 4299afe5f2d..641bc1462bb 100644 --- a/hadoop-ozone/dist/src/main/smoketest/balancer/testBalancer.robot +++ b/hadoop-ozone/dist/src/main/smoketest/balancer/testBalancer.robot @@ -16,6 +16,7 @@ *** Settings *** Documentation Smoketest ozone cluster startup Library OperatingSystem +Library String Library Collections Resource ../commonlib.robot Resource ../ozone-lib/shell.robot @@ -35,7 +36,7 @@ Prepare For Tests Execute dd if=/dev/urandom of=/tmp/100mb bs=1048576 count=100 Run Keyword if '${SECURITY_ENABLED}' == 'true' Kinit test user testuser testuser.keytab Execute ozone sh volume create /${VOLUME} - Execute ozone sh bucket create /${VOLUME}/${BUCKET} + Execute ozone sh bucket create --replication ${REPLICATION} --type ${TYPE} /${VOLUME}/${BUCKET} Datanode In Maintenance Mode @@ -61,28 +62,36 @@ Datanode Recommission is Finished Should Not Contain ${result} ENTERING_MAINTENANCE Run Container Balancer - ${result} = Execute ozone admin containerbalancer start -t 1 -d 100 -i 1 + ${result} = Execute ozone admin containerbalancer start -t 0.1 -d 100 -i 3 Should Contain ${result} Container Balancer started successfully. Wait Finish Of Balancing ${result} = Execute ozone admin containerbalancer status - Should Contain ${result} ContainerBalancer is Running. - Wait Until Keyword Succeeds 3min 10sec ContainerBalancer is Not Running - Sleep 60000ms + Wait Until Keyword Succeeds 4min 10sec ContainerBalancer is Not Running -Verify Verbose Balancer Status - [arguments] ${output} + Sleep 60000ms +Verify Balancer Iteration + [arguments] ${output} ${number} Should Contain ${output} ContainerBalancer is Running. Should Contain ${output} Started at: Should Contain ${output} Container Balancer Configuration values: - -Verify Balancer Iteration - [arguments] ${output} ${number} ${status} ${containers} - - Should Contain ${output} Iteration number ${number} - Should Contain ${output} Iteration result ${status} - Should Contain ${output} Scheduled to move containers ${containers} + Should Contain ${output} Iteration number ${number} collapse_spaces=True + Should Contain ${output} Scheduled to move containers collapse_spaces=True + Should Contain ${output} Balancing duration: + Should Contain ${output} Iteration duration + Should Contain ${output} Current iteration info: + +Verify Balancer Iteration History + [arguments] ${output} + Should Contain ${output} Iteration history list: + Should Contain X Times ${output} Size scheduled to move 1 collapse_spaces=True + Should Contain X Times ${output} Moved data size 1 collapse_spaces=True + Should Contain X Times ${output} Scheduled to move containers 1 collapse_spaces=True + Should Contain X Times ${output} Already moved containers 1 collapse_spaces=True + Should Contain X Times ${output} Failed to move containers 0 1 collapse_spaces=True + Should Contain X Times ${output} Failed to move containers by timeout 0 1 collapse_spaces=True + Should Contain ${output} Iteration result ITERATION_COMPLETED collapse_spaces=True Run Balancer Status ${result} = Execute ozone admin containerbalancer status @@ -90,15 +99,14 @@ Run Balancer Status Run Balancer Verbose Status ${result} = Execute ozone admin containerbalancer status -v - Verify Verbose Balancer Status ${result} - Verify Balancer Iteration ${result} 1 IN_PROGRESS 3 - Should Contain ${result} Current iteration info: + Verify Balancer Iteration ${result} 1 + Should Contain ${result} Iteration result - collapse_spaces=True + Run Balancer Verbose History Status ${result} = Execute ozone admin containerbalancer status -v --history - Verify Verbose Balancer Status ${result} - Verify Balancer Iteration ${result} 1 IN_PROGRESS 3 - Should Contain ${result} Iteration history list: + Verify Balancer Iteration ${result} 1 + Verify Balancer Iteration History ${result} ContainerBalancer is Not Running ${result} = Execute ozone admin containerbalancer status @@ -111,7 +119,7 @@ Create Multiple Keys ${fileName} = Set Variable file-${INDEX}.txt ${key} = Set Variable /${VOLUME}/${BUCKET}/${fileName} LOG ${fileName} - Create Key ${key} ${file} + Create Key ${key} ${file} --replication=${REPLICATION} --type=${TYPE} Key Should Match Local File ${key} ${file} END @@ -126,14 +134,14 @@ Get Uuid Close All Containers FOR ${INDEX} IN RANGE 15 - ${container} = Execute ozone admin container list --state OPEN | jq -r 'select(.replicationConfig.replicationFactor == "THREE") | .containerID' | head -1 + ${container} = Execute ozone admin container list --state OPEN | jq -r 'select(.replicationConfig.data == 3) | .containerID' | head -1 EXIT FOR LOOP IF "${container}" == "${EMPTY}" ${message} = Execute And Ignore Error ozone admin container close "${container}" Run Keyword If '${message}' != '${EMPTY}' Should Contain ${message} is in closing state ${output} = Execute ozone admin container info "${container}" Should contain ${output} CLOS END - Wait until keyword succeeds 3min 10sec All container is closed + Wait until keyword succeeds 4min 10sec All container is closed All container is closed ${output} = Execute ozone admin container list --state OPEN @@ -146,7 +154,7 @@ Get Datanode Ozone Used Bytes Info [return] ${result} ** Test Cases *** -Verify Container Balancer for RATIS containers +Verify Container Balancer for RATIS/EC containers Prepare For Tests Datanode In Maintenance Mode @@ -169,16 +177,13 @@ Verify Container Balancer for RATIS containers Run Balancer Verbose Status - Run Balancer Verbose History Status + Wait Until Keyword Succeeds 40sec 5sec Run Balancer Verbose History Status Wait Finish Of Balancing ${datanodeOzoneUsedBytesInfoAfterContainerBalancing} = Get Datanode Ozone Used Bytes Info ${uuid} Should Not Be Equal As Integers ${datanodeOzoneUsedBytesInfo} ${datanodeOzoneUsedBytesInfoAfterContainerBalancing} - Should Be True ${datanodeOzoneUsedBytesInfoAfterContainerBalancing} < ${SIZE} * 3.5 - Should Be True ${datanodeOzoneUsedBytesInfoAfterContainerBalancing} > ${SIZE} * 3 - - - - - + #We need to ensure that after balancing, the amount of data recorded on each datanode falls within the following ranges: + #{SIZE}*3 < used < {SIZE}*3.5 for RATIS containers, and {SIZE}*0.7 < used < {SIZE}*1.5 for EC containers. + Should Be True ${datanodeOzoneUsedBytesInfoAfterContainerBalancing} < ${SIZE} * ${UPPER_LIMIT} + Should Be True ${datanodeOzoneUsedBytesInfoAfterContainerBalancing} > ${SIZE} * ${LOWER_LIMIT} \ No newline at end of file diff --git a/hadoop-ozone/dist/src/main/smoketest/compatibility/read.robot b/hadoop-ozone/dist/src/main/smoketest/compatibility/read.robot index 57715cda95f..e6fbdd47dc3 100644 --- a/hadoop-ozone/dist/src/main/smoketest/compatibility/read.robot +++ b/hadoop-ozone/dist/src/main/smoketest/compatibility/read.robot @@ -18,23 +18,144 @@ Documentation Read Compatibility Resource ../ozone-lib/shell.robot Resource setup.robot Test Timeout 5 minutes -Suite Setup Create Local Test File -*** Variables *** -${SUFFIX} ${EMPTY} + +*** Keywords *** +Key List With Replication + [arguments] ${args} + ${list} = Execute ozone sh key list ${args} + ${result} = Execute echo '${list}' | jq -r '[.name, .replicationType, (.replicationFactor | tostring)] | join (" ")' + [return] ${result} + *** Test Cases *** +Buckets Can Be Listed + ${result} = Execute ozone sh bucket list /vol1 + Should Contain ${result} bucket1 + + IF '${CLUSTER_VERSION}' >= '${EC_VERSION}' + Should Contain ${result} ratis-${CLUSTER_VERSION} + Should Contain ${result} ecbucket-${CLUSTER_VERSION} + END + +Bucket Without Replication Config + Verify Bucket Empty Replication Config /vol1/bucket1 + +Bucket With Replication Config + Pass Execution If '${CLUSTER_VERSION}' < '${EC_VERSION}' Cluster does not support EC + + IF '${CLIENT_VERSION}' >= '${EC_VERSION}' + Verify Bucket Replica Replication Config /vol1/ratis-${CLUSTER_VERSION} RATIS THREE + Verify Bucket EC Replication Config /vol1/ecbucket-${CLUSTER_VERSION} RS 3 2 1048576 + ELSE + Verify Bucket Empty Replication Config /vol1/ratis-${CLUSTER_VERSION} + Verify Bucket Empty Replication Config /vol1/ecbucket-${CLUSTER_VERSION} + END + Key Can Be Read - Key Should Match Local File /vol1/bucket1/key-${SUFFIX} ${TESTFILE} + Key Should Match Local File /vol1/bucket1/key-${DATA_VERSION} ${TESTFILE} + +Encrypted Key Can Be Read + Key Should Match Local File /vol1/encrypted-${DATA_VERSION}/key ${TESTFILE} + File Should Match Local File ofs://om/vol1/encrypted-${DATA_VERSION}/key ${TESTFILE} + +Key Read From Bucket With Replication + Pass Execution If '${CLUSTER_VERSION}' < '${EC_VERSION}' Cluster does not support EC + + Key Should Match Local File /vol1/ratis-${CLUSTER_VERSION}/key-${DATA_VERSION} ${TESTFILE} + + IF '${CLIENT_VERSION}' >= '${EC_VERSION}' or '${DATA_VERSION}' == '${CLIENT_VERSION}' + Key Should Match Local File /vol1/ecbucket-${CLUSTER_VERSION}/key-${DATA_VERSION} ${TESTFILE} + ELSE + Assert Unsupported ozone sh key get -f /vol1/ecbucket-${CLUSTER_VERSION}/key-${DATA_VERSION} /dev/null + END Dir Can Be Listed - Execute ozone fs -ls o3fs://bucket1.vol1/dir-${SUFFIX} + ${result} = Execute ozone fs -ls o3fs://bucket1.vol1/dir-${DATA_VERSION} + Should contain ${result} dir-${DATA_VERSION}/file-${DATA_VERSION} + + Pass Execution If '${CLUSTER_VERSION}' < '${EC_VERSION}' Cluster does not support EC + +# TODO HDDS-11803 +# ${result} = Execute ozone fs -ls ofs://om/vol1/ +# Should contain ${result} /vol1/ratis-${CLUSTER_VERSION} +# Should contain ${result} /vol1/ecbucket-${CLUSTER_VERSION} + + IF '${CLIENT_VERSION}' < '${EC_VERSION}' + ${result} = Execute and checkrc ozone fs -ls ofs://om/vol1/ecbucket-${CLUSTER_VERSION}/ 1 + Should contain ${result} ls: The list of keys contains keys with Erasure Coded replication set + END + +File Can Be Listed + ${result} = Execute ozone fs -ls o3fs://bucket1.vol1/dir-${DATA_VERSION}/file-${DATA_VERSION} + Should contain ${result} dir-${DATA_VERSION}/file-${DATA_VERSION} + + Pass Execution If '${CLUSTER_VERSION}' < '${EC_VERSION}' Cluster does not support EC + + ${result} = Execute ozone fs -ls ofs://om/vol1/ratis-${CLUSTER_VERSION}/file-${DATA_VERSION} + Should contain ${result} /vol1/ratis-${CLUSTER_VERSION}/file-${DATA_VERSION} + + IF '${CLIENT_VERSION}' >= '${EC_VERSION}' or '${DATA_VERSION}' == '${CLIENT_VERSION}' + ${result} = Execute ozone fs -ls ofs://om/vol1/ecbucket-${CLUSTER_VERSION}/file-${DATA_VERSION} + Should contain ${result} /vol1/ecbucket-${CLUSTER_VERSION}/file-${DATA_VERSION} + ELSE + ${result} = Execute and checkrc ozone fs -ls ofs://om/vol1/ecbucket-${CLUSTER_VERSION}/file-${DATA_VERSION} 1 + Should contain ${result} : No such file or directory + END + +Key List + IF '${CLIENT_VERSION}' >= '${EC_VERSION}' + ${result} = Execute ozone sh key list /vol1/bucket1 + Should Contain ${result} key-${DATA_VERSION} + ELSE IF '${DATA_VERSION}' < '${EC_VERSION}' # New client creates RATIS/ONE key by default: BUG? + ${result} = Key List With Replication /vol1/bucket1 + Should contain ${result} key-${DATA_VERSION} RATIS 3 + END + +Key List In Bucket With Replication + Pass Execution If '${CLUSTER_VERSION}' < '${EC_VERSION}' Cluster does not support EC + + IF '${CLIENT_VERSION}' < '${EC_VERSION}' + ${result} = Key List With Replication /vol1/ratis-${CLUSTER_VERSION}/ + Should contain ${result} key-${DATA_VERSION} RATIS 3 + + Assert Unsupported ozone sh key list /vol1/ecbucket-${CLUSTER_VERSION}/ + ELSE + ${result} = Execute ozone sh key list /vol1/ratis-${CLUSTER_VERSION} + Should Contain ${result} key-${DATA_VERSION} + ${result} = Execute ozone sh key list /vol1/ecbucket-${CLUSTER_VERSION} + Should Contain ${result} key-${DATA_VERSION} + END + Dir Can Be Listed Using Shell ${result} = Execute ozone sh key list /vol1/bucket1 Should Contain ${result} key-${SUFFIX} File Can Be Get - Execute ozone fs -get o3fs://bucket1.vol1/dir-${SUFFIX}/file-${SUFFIX} /tmp/ - Execute diff -q ${TESTFILE} /tmp/file-${SUFFIX} - [teardown] Execute rm /tmp/file-${SUFFIX} + Key Should Match Local File /vol1/bucket1/dir-${DATA_VERSION}/file-${DATA_VERSION} ${TESTFILE} + File Should Match Local File o3fs://bucket1.vol1/dir-${DATA_VERSION}/file-${DATA_VERSION} ${TESTFILE} + +File Can Be Get From Bucket With Replication + Pass Execution If '${CLUSTER_VERSION}' < '${EC_VERSION}' Cluster does not support EC + + File Should Match Local File ofs://om/vol1/ratis-${CLUSTER_VERSION}/file-${DATA_VERSION} ${TESTFILE} + + IF '${CLIENT_VERSION}' >= '${EC_VERSION}' or '${DATA_VERSION}' == '${CLIENT_VERSION}' + File Should Match Local File ofs://om/vol1/ecbucket-${CLUSTER_VERSION}/key-${DATA_VERSION} ${TESTFILE} + ELSE + ${result} = Execute and checkrc ozone fs -get ofs://om/vol1/ecbucket-${CLUSTER_VERSION}/key-${DATA_VERSION} 1 + Should contain ${result} : No such file or directory + END + +FSO Bucket Can Be Read + Pass Execution If '${DATA_VERSION}' < '${FSO_VERSION}' Skipped write test case + Pass Execution If '${CLIENT_VERSION}' < '${FSO_VERSION}' Client does not support FSO + Pass Execution If '${CLUSTER_VERSION}' < '${FSO_VERSION}' Cluster does not support FSO + File Should Match Local File ofs://om/vol1/fso-bucket-${DATA_VERSION}/dir/subdir/file ${TESTFILE} + +HSync Lease Recover Can Be Used + Pass Execution If '${DATA_VERSION}' < '${FSO_VERSION}' Skipped write test case + Pass Execution If '${CLIENT_VERSION}' < '${HSYNC_VERSION}' Client does not support HSYNC + Pass Execution If '${CLUSTER_VERSION}' < '${HSYNC_VERSION}' Cluster does not support HSYNC + Execute ozone debug recover --path=ofs://om/vol1/fso-bucket-${DATA_VERSION}/dir/subdir/file diff --git a/hadoop-ozone/dist/src/main/smoketest/compatibility/setup.robot b/hadoop-ozone/dist/src/main/smoketest/compatibility/setup.robot index ae765f23e2b..4f41d280a6c 100644 --- a/hadoop-ozone/dist/src/main/smoketest/compatibility/setup.robot +++ b/hadoop-ozone/dist/src/main/smoketest/compatibility/setup.robot @@ -19,10 +19,7 @@ Library OperatingSystem Resource ../ozone-lib/shell.robot *** Variables *** -${SUFFIX} ${EMPTY} - - -*** Keywords *** -Create Local Test File - Set Suite Variable ${TESTFILE} /tmp/test-data-${SUFFIX}.txt - Create File ${TESTFILE} Compatibility Test +${EC_VERSION} 1.3.0 +${FSO_VERSION} 1.3.0 +${HSYNC_VERSION} 2.0.0 +${TESTFILE} ${TEST_DATA_DIR}/small diff --git a/hadoop-ozone/dist/src/main/smoketest/compatibility/write.robot b/hadoop-ozone/dist/src/main/smoketest/compatibility/write.robot index 4c611d4287b..0497edaca16 100644 --- a/hadoop-ozone/dist/src/main/smoketest/compatibility/write.robot +++ b/hadoop-ozone/dist/src/main/smoketest/compatibility/write.robot @@ -17,19 +17,65 @@ Documentation Write Compatibility Resource ../ozone-lib/shell.robot Resource setup.robot +Resource ../lib/fs.robot +Resource ../ozone-lib/freon.robot Test Timeout 5 minutes -Suite Setup Create Local Test File *** Variables *** -${SUFFIX} ${EMPTY} +${ENCRYPTION_KEY} key1 *** Test Cases *** +Create Bucket With Replication Type + Pass Execution If '${CLIENT_VERSION}' < '${EC_VERSION}' Client does not support EC + Pass Execution If '${CLUSTER_VERSION}' < '${EC_VERSION}' Cluster does not support EC + Execute ozone sh bucket create --replication 3 --type RATIS /vol1/ratis-${CLIENT_VERSION} + Execute ozone sh bucket create --replication rs-3-2-1024k --type EC /vol1/ecbucket-${CLIENT_VERSION} + +Create Encrypted Bucket + Execute ozone sh bucket create -k ${ENCRYPTION_KEY} /vol1/encrypted-${CLIENT_VERSION} + +Create Key in Encrypted Bucket + Execute ozone sh key put /vol1/encrypted-${CLIENT_VERSION}/key ${TESTFILE} + Key Can Be Written - Create Key /vol1/bucket1/key-${SUFFIX} ${TESTFILE} + Create Key /vol1/bucket1/key-${CLIENT_VERSION} ${TESTFILE} + +Key Can Be Written To Bucket With Replication Type + Pass Execution If '${CLUSTER_VERSION}' < '${EC_VERSION}' Cluster does not support EC + Execute ozone sh key put /vol1/ratis-${CLUSTER_VERSION}/key-${CLIENT_VERSION} ${TESTFILE} + Execute ozone sh key put /vol1/ecbucket-${CLUSTER_VERSION}/key-${CLIENT_VERSION} ${TESTFILE} + +Key Can Be Deleted + Create Key /vol1/bucket1/to-be-deleted-${CLIENT_VERSION} ${TESTFILE} + Execute ozone sh key delete /vol1/bucket1/to-be-deleted-${CLIENT_VERSION} Dir Can Be Created - Execute ozone fs -mkdir o3fs://bucket1.vol1/dir-${SUFFIX} + Execute ozone fs -mkdir o3fs://bucket1.vol1/dir-${CLIENT_VERSION} File Can Be Put - Execute ozone fs -put ${TESTFILE} o3fs://bucket1.vol1/dir-${SUFFIX}/file-${SUFFIX} + Execute ozone fs -put ${TESTFILE} o3fs://bucket1.vol1/dir-${CLIENT_VERSION}/file-${CLIENT_VERSION} + +File Can Be Put To Bucket With Replication Type + Pass Execution If '${CLUSTER_VERSION}' < '${EC_VERSION}' Cluster does not support EC + Execute ozone fs -put ${TESTFILE} ofs://om/vol1/ratis-${CLUSTER_VERSION}/file-${CLIENT_VERSION} + Execute ozone fs -put ${TESTFILE} ofs://om/vol1/ecbucket-${CLUSTER_VERSION}/file-${CLIENT_VERSION} + +File Can Be Deleted + Execute ozone fs -put ${TESTFILE} o3fs://bucket1.vol1/dir-${CLIENT_VERSION}/to-be-deleted + Execute ozone fs -rm -skipTrash o3fs://bucket1.vol1/dir-${CLIENT_VERSION}/to-be-deleted + +FSO Bucket Can Be Created and Used + Pass Execution If '${CLIENT_VERSION}' < '${FSO_VERSION}' Client does not support FSO + Pass Execution If '${CLUSTER_VERSION}' < '${FSO_VERSION}' Cluster does not support FSO + Execute ozone sh bucket create --layout FILE_SYSTEM_OPTIMIZED /vol1/fso-bucket-${CLIENT_VERSION} + Execute ozone fs -mkdir -p ofs://om/vol1/fso-bucket-${CLIENT_VERSION}/dir/subdir + Execute ozone fs -put ${TESTFILE} ofs://om/vol1/fso-bucket-${CLIENT_VERSION}/dir/subdir/file + +HSync Can Be Used To Create Keys + Pass Execution If '${CLIENT_VERSION}' < '${HSYNC_VERSION}' Client does not support HSYNC + Pass Execution If '${CLUSTER_VERSION}' < '${HSYNC_VERSION}' Cluster does not support HSYNC + ${o3fspath} = Format FS URL o3fs vol1 bucket1 + Freon DFSG sync=HSYNC n=1 path=${o3fspath} + ${pfspath} = Format FS URL ofs $vol1 bucket1 + Freon DFSG sync=HSYNC n=1 path=${pfspath} diff --git a/hadoop-ozone/dist/src/main/smoketest/debug/ozone-debug-ldb.robot b/hadoop-ozone/dist/src/main/smoketest/debug/ozone-debug-ldb.robot index e006e154af1..0fa43dee6c0 100644 --- a/hadoop-ozone/dist/src/main/smoketest/debug/ozone-debug-ldb.robot +++ b/hadoop-ozone/dist/src/main/smoketest/debug/ozone-debug-ldb.robot @@ -32,10 +32,12 @@ Write keys Run Keyword if '${SECURITY_ENABLED}' == 'true' Kinit test user testuser testuser.keytab Execute ozone sh volume create ${VOLUME} Execute ozone sh bucket create ${VOLUME}/${BUCKET} -l OBJECT_STORE - Execute dd if=/dev/urandom of=${TEMP_DIR}/${TESTFILE} bs=100000 count=15 - Execute ozone sh key put ${VOLUME}/${BUCKET}/${TESTFILE}1 ${TEMP_DIR}/${TESTFILE} - Execute ozone sh key put ${VOLUME}/${BUCKET}/${TESTFILE}2 ${TEMP_DIR}/${TESTFILE} - Execute ozone sh key put ${VOLUME}/${BUCKET}/${TESTFILE}3 ${TEMP_DIR}/${TESTFILE} + Execute dd if=/dev/urandom of=${TEMP_DIR}/${TESTFILE}1 bs=100 count=10 + Execute ozone sh key put ${VOLUME}/${BUCKET}/${TESTFILE}1 ${TEMP_DIR}/${TESTFILE}1 + Execute dd if=/dev/urandom of=${TEMP_DIR}/${TESTFILE}2 bs=100 count=15 + Execute ozone sh key put ${VOLUME}/${BUCKET}/${TESTFILE}2 ${TEMP_DIR}/${TESTFILE}2 + Execute dd if=/dev/urandom of=${TEMP_DIR}/${TESTFILE}3 bs=100 count=20 + Execute ozone sh key put ${VOLUME}/${BUCKET}/${TESTFILE}3 ${TEMP_DIR}/${TESTFILE}3 Execute ozone sh key addacl -a user:systest:a ${VOLUME}/${BUCKET}/${TESTFILE}3 *** Test Cases *** @@ -53,6 +55,16 @@ Test ozone debug ldb scan Should contain ${output} testfile1 Should contain ${output} testfile2 Should contain ${output} testfile3 + # test key is included with --with-keys + ${output1} = Execute ozone debug ldb --db=/data/metadata/om.db scan --cf=keyTable | jq '."\/cli-debug-volume\/cli-debug-bucket\/testfile1"' + ${output2} = Execute ozone debug ldb --db=/data/metadata/om.db scan --cf=keyTable --with-keys | jq '."\/cli-debug-volume\/cli-debug-bucket\/testfile1"' + ${output3} = Execute ozone debug ldb --db=/data/metadata/om.db scan --cf=keyTable --with-keys=true | jq '."\/cli-debug-volume\/cli-debug-bucket\/testfile1"' + Should contain ${output1} testfile1 + Should Be Equal ${output1} ${output2} + Should Be Equal ${output1} ${output3} + # test key is ommitted with --with-keys set to false + ${output} = Execute and Ignore Error ozone debug ldb --db=/data/metadata/om.db scan --cf=keyTable --with-keys=false | jq '."\/cli-debug-volume\/cli-debug-bucket\/testfile1"' + Should contain ${output} Cannot index array with string # test startkey option ${output} = Execute ozone debug ldb --db=/data/metadata/om.db scan --cf=keyTable --startkey="/cli-debug-volume/cli-debug-bucket/testfile2" Should not contain ${output} testfile1 @@ -71,6 +83,8 @@ Test ozone debug ldb scan Should not contain ${output} objectID Should not contain ${output} dataSize Should not contain ${output} keyLocationVersions + +Test ozone debug ldb scan with filter option success # test filter option with one filter ${output} = Execute ozone debug ldb --db=/data/metadata/om.db scan --cf=keyTable --filter="keyName:equals:testfile2" Should not contain ${output} testfile1 @@ -91,3 +105,42 @@ Test ozone debug ldb scan Should not contain ${output} testfile1 Should not contain ${output} testfile2 Should not contain ${output} testfile3 + # test filter option for size > 1200 + ${output} = Execute ozone debug ldb --db=/data/metadata/om.db scan --cf=keyTable --filter="dataSize:greater:1200" + Should not contain ${output} testfile1 + Should contain ${output} testfile2 + Should contain ${output} testfile3 + # test filter option for size < 1200 + ${output} = Execute ozone debug ldb --db=/data/metadata/om.db scan --cf=keyTable --filter="dataSize:lesser:1200" + Should contain ${output} testfile1 + Should not contain ${output} testfile2 + Should not contain ${output} testfile3 + # test filter option with no records match both filters + ${output} = Execute ozone debug ldb --db=/data/metadata/om.db scan --cf=keyTable --filter="dataSize:lesser:1200,keyName:equals:testfile2" + Should not contain ${output} testfile1 + Should not contain ${output} testfile2 + Should not contain ${output} testfile3 + # test filter option with regex matching numbers + ${output} = Execute ozone debug ldb --db=/data/metadata/om.db scan --cf=keyTable --filter="dataSize:regex:^1[0-2]{3}$" + Should contain ${output} testfile1 + Should not contain ${output} testfile2 + Should not contain ${output} testfile3 + # test filter option with regex matching string + ${output} = Execute ozone debug ldb --db=/data/metadata/om.db scan --cf=keyTable --filter="keyName:regex:^test.*[0-1]$" + Should contain ${output} testfile1 + Should not contain ${output} testfile2 + Should not contain ${output} testfile3 + +Test ozone debug ldb scan with filter option failure + # test filter option with invalid operator + ${output} = Execute ozone debug ldb --db=/data/metadata/om.db scan --cf=keyTable --filter="dataSize:lesserthan:1200" + Should contain ${output} Error: Invalid operator + # test filter option with invalid format + ${output} = Execute And Ignore Error ozone debug ldb --db=/data/metadata/om.db scan --cf=keyTable --filter="dataSize:1200" + Should contain ${output} Error: Invalid format + # test filter option with invalid field + ${output} = Execute And Ignore Error ozone debug ldb --db=/data/metadata/om.db scan --cf=keyTable --filter="size:equals:1200" + Should contain ${output} Error: Invalid field + # test filter option for lesser/greater operator on non-numeric field + ${output} = Execute And Ignore Error ozone debug ldb --db=/data/metadata/om.db scan --cf=keyTable --filter="keyName:lesser:k1" + Should contain ${output} only on numeric values diff --git a/hadoop-ozone/dist/src/main/smoketest/debug/ozone-debug-tests.robot b/hadoop-ozone/dist/src/main/smoketest/debug/ozone-debug-tests.robot index ca1995bf3a1..4e013e2a64b 100644 --- a/hadoop-ozone/dist/src/main/smoketest/debug/ozone-debug-tests.robot +++ b/hadoop-ozone/dist/src/main/smoketest/debug/ozone-debug-tests.robot @@ -49,3 +49,8 @@ Test ozone debug read-replicas FOR ${replica} IN RANGE 3 Verify Healthy Replica ${json} ${replica} ${md5sum} END + + +Test ozone debug version + ${output} = Execute ozone debug version + Execute echo '${output}' | jq -r '.' # validate JSON diff --git a/hadoop-ozone/dist/src/main/smoketest/ec/backward-compat.robot b/hadoop-ozone/dist/src/main/smoketest/ec/backward-compat.robot deleted file mode 100644 index d64c7686870..00000000000 --- a/hadoop-ozone/dist/src/main/smoketest/ec/backward-compat.robot +++ /dev/null @@ -1,107 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -*** Settings *** -Documentation Test EC backward compatibility -Library OperatingSystem -Resource lib.resource - -*** Variables *** -${PREFIX} ${EMPTY} -${VOLUME} vol${PREFIX} - -*** Test Cases *** -Setup Cluster Data - [Tags] setup-ec-data - Prepare Data For Xcompat Tests - -Test Read Key Compat - [Tags] test-ec-compat - Key Should Match Local File /${VOLUME}/ratis/3mb /tmp/3mb - Key Should Match Local File /${VOLUME}/default/3mb /tmp/3mb - - ${result} = Execute and checkrc ozone sh key get -f /${VOLUME}/ecbucket/3mb /dev/null 255 - Should Contain ${result} NOT_SUPPORTED_OPERATION - -Test Listing Compat - [Tags] test-ec-compat - ${result} = Execute ozone sh volume list | jq -r '.name' - Should contain ${result} ${VOLUME} - ${result} = Execute ozone sh bucket list /${VOLUME}/ | jq -r '.name' - Should contain ${result} default - Should contain ${result} ratis - Should contain ${result} ec - ${result} = Execute ozone sh key list /${VOLUME}/default/ | jq -r '[.name, .replicationType, (.replicationFactor | tostring)] | join (" ")' - Should contain ${result} 3mb RATIS 3 - ${result} = Execute ozone sh key list /${VOLUME}/ratis/ | jq -r '[.name, .replicationType, (.replicationFactor | tostring)] | join (" ")' - Should contain ${result} 3mb RATIS 3 - - ${result} = Execute and checkrc ozone sh key list /${VOLUME}/ecbucket/ 255 - Should contain ${result} NOT_SUPPORTED_OPERATION - -Test Info Compat - [Tags] test-ec-compat - ${result} = Execute ozone sh volume info ${VOLUME} | jq -r '.name' - Should contain ${result} ${VOLUME} - ${result} = Execute ozone sh bucket info /${VOLUME}/default | jq -r '[.name, .replicationType, .replicationFactor] | join (" ")' - Should contain ${result} default # there is no replication config in the old client for bucket info - ${result} = Execute ozone sh bucket info /${VOLUME}/ratis | jq -r '[.name, .replicationType, .replicationFactor] | join (" ")' - Should contain ${result} ratis # there is no replication config in the old client for bucket info - ${result} = Execute ozone sh bucket info /${VOLUME}/ecbucket | jq -r '[.name, .replicationType, .replicationFactor] | join (" ")' - Should contain ${result} ec # there is no replication config in the old client for bucket info - -Test FS Compat - [Tags] test-ec-compat - ${result} = Execute ozone fs -ls ofs://om/ - Should contain ${result} /${VOLUME} - ${result} = Execute ozone fs -ls ofs://om/${VOLUME}/ - Should contain ${result} /${VOLUME}/default - Should contain ${result} /${VOLUME}/ratis - Should contain ${result} /${VOLUME}/ecbucket - ${result} = Execute ozone fs -ls ofs://om/${VOLUME}/default/3mb - Should contain ${result} /${VOLUME}/default/3mb - ${result} = Execute ozone fs -ls ofs://om/${VOLUME}/ratis/3mb - Should contain ${result} /${VOLUME}/ratis/3mb - - ${result} = Execute and checkrc ozone fs -ls ofs://om/${VOLUME}/ecbucket/ 1 - Should contain ${result} ls: The list of keys contains keys with Erasure Coded replication set - ${result} = Execute and checkrc ozone fs -ls ofs://om/${VOLUME}/ecbucket/3mb 1 - Should contain ${result} : No such file or directory - ${result} = Execute and checkrc ozone fs -get ofs://om/${VOLUME}/ecbucket/3mb 1 - Should contain ${result} : No such file or directory - -Test FS Client Can Read Own Writes - [Tags] test-ec-compat - Execute ozone fs -put /tmp/1mb ofs://om/${VOLUME}/default/1mb - Execute ozone fs -put /tmp/1mb ofs://om/${VOLUME}/ratis/1mb - Execute ozone fs -put /tmp/1mb ofs://om/${VOLUME}/ecbucket/1mb - Key Should Match Local File /${VOLUME}/ratis/1mb /tmp/1mb - Key Should Match Local File /${VOLUME}/ratis/1mb /tmp/1mb - Key Should Match Local File /${VOLUME}/ratis/1mb /tmp/1mb - Execute ozone fs -rm -skipTrash ofs://om/${VOLUME}/default/1mb - Execute ozone fs -rm -skipTrash ofs://om/${VOLUME}/ratis/1mb - Execute ozone fs -rm -skipTrash ofs://om/${VOLUME}/ecbucket/1mb - -Test Client Can Read Own Writes - [Tags] test-ec-compat - Execute ozone sh key put /${VOLUME}/default/2mb /tmp/2mb - Execute ozone sh key put /${VOLUME}/ratis/2mb /tmp/2mb - Execute ozone sh key put /${VOLUME}/ecbucket/2mb /tmp/2mb - Key Should Match Local File /${VOLUME}/ratis/2mb /tmp/2mb - Key Should Match Local File /${VOLUME}/ratis/2mb /tmp/2mb - Key Should Match Local File /${VOLUME}/ratis/2mb /tmp/2mb - Execute ozone sh key delete /${VOLUME}/default/2mb - Execute ozone sh key delete /${VOLUME}/ratis/2mb - Execute ozone sh key delete /${VOLUME}/ecbucket/2mb diff --git a/hadoop-ozone/dist/src/main/smoketest/ec/lib.resource b/hadoop-ozone/dist/src/main/smoketest/ec/lib.resource index f6a84f9e065..f01ec191f55 100644 --- a/hadoop-ozone/dist/src/main/smoketest/ec/lib.resource +++ b/hadoop-ozone/dist/src/main/smoketest/ec/lib.resource @@ -21,8 +21,6 @@ Resource ../ozone-lib/shell.robot *** Variables *** ${SCM} scm -${PREFIX} ${EMPTY} -${VOLUME} vol${PREFIX} *** Keywords *** Prepare For Tests @@ -31,13 +29,3 @@ Prepare For Tests Execute dd if=/dev/urandom of=/tmp/3mb bs=1048576 count=3 Execute dd if=/dev/urandom of=/tmp/100mb bs=1048576 count=100 Run Keyword if '${SECURITY_ENABLED}' == 'true' Kinit test user testuser testuser.keytab - -# xcompat/test.sh creates unified test data files in /tmp for client containers -Prepare Data For Xcompat Tests - Execute ozone sh volume create /${VOLUME} - Execute ozone sh bucket create /${VOLUME}/default - Execute ozone sh bucket create --replication 3 --type RATIS /${VOLUME}/ratis - Execute ozone sh bucket create --replication rs-3-2-1024k --type EC /${VOLUME}/ecbucket - Execute ozone sh key put /${VOLUME}/default/3mb /tmp/3mb - Execute ozone sh key put /${VOLUME}/ratis/3mb /tmp/3mb - Execute ozone sh key put /${VOLUME}/ecbucket/3mb /tmp/3mb diff --git a/hadoop-ozone/dist/src/main/smoketest/om-ratis/testOMAdminCmd.robot b/hadoop-ozone/dist/src/main/smoketest/om-ratis/testOMAdminCmd.robot index 0c688865193..3977e053608 100644 --- a/hadoop-ozone/dist/src/main/smoketest/om-ratis/testOMAdminCmd.robot +++ b/hadoop-ozone/dist/src/main/smoketest/om-ratis/testOMAdminCmd.robot @@ -22,5 +22,5 @@ Test Timeout 5 minutes *** Test Cases *** Check om admin command - ${result} = Execute and checkrc ozone admin om roles -id=omServiceIdDefault 0 + ${result} = Execute and ignore error ozone admin om roles -id=omServiceIdDefault Should Contain ${result} This command works only on OzoneManager HA cluster. diff --git a/hadoop-ozone/dist/src/main/smoketest/omha/om-roles.robot b/hadoop-ozone/dist/src/main/smoketest/omha/om-roles.robot index 54e44bce36b..3513ec12de1 100644 --- a/hadoop-ozone/dist/src/main/smoketest/omha/om-roles.robot +++ b/hadoop-ozone/dist/src/main/smoketest/omha/om-roles.robot @@ -28,6 +28,9 @@ Assert Leader Present in JSON [Arguments] ${output} ${leader} = Execute echo '${output}' | jq '.[] | select(.[] | .serverRole == "LEADER")' Should Not Be Equal ${leader} ${EMPTY} +Assert Leader Present in TABLE + [Arguments] ${output} + Should Match Regexp ${output} \\|.*LEADER.* *** Test Cases *** List om roles with OM service ID passed @@ -53,3 +56,15 @@ List om roles as JSON without OM service ID passed Assert Leader Present in JSON ${output_without_id_passed} ${output_without_id_passed} = Execute And Ignore Error ozone admin --set=ozone.om.service.ids=omservice,omservice2 om roles --json Should Contain ${output_without_id_passed} no Ozone Manager service ID specified + +List om roles as TABLE with OM service ID passed + ${output_with_id_passed} = Execute ozone admin om roles --service-id=omservice --table + Assert Leader Present in TABLE ${output_with_id_passed} + ${output_with_id_passed} = Execute ozone admin --set=ozone.om.service.ids=omservice,omservice2 om roles --service-id=omservice --table + Assert Leader Present in TABLE ${output_with_id_passed} + +List om roles as TABLE without OM service ID passed + ${output_without_id_passed} = Execute ozone admin om roles --table + Assert Leader Present in TABLE ${output_without_id_passed} + ${output_without_id_passed} = Execute And Ignore Error ozone admin --set=ozone.om.service.ids=omservice,omservice2 om roles --table + Should Contain ${output_without_id_passed} no Ozone Manager service ID specified \ No newline at end of file diff --git a/hadoop-ozone/dist/src/main/smoketest/ozone-lib/shell.robot b/hadoop-ozone/dist/src/main/smoketest/ozone-lib/shell.robot index 2b54e8bf330..d5762f912e7 100644 --- a/hadoop-ozone/dist/src/main/smoketest/ozone-lib/shell.robot +++ b/hadoop-ozone/dist/src/main/smoketest/ozone-lib/shell.robot @@ -28,10 +28,10 @@ Bucket Exists [Return] ${TRUE} Compare Key With Local File - [arguments] ${key} ${file} + [arguments] ${key} ${file} ${cmd}=sh key get ${postfix} = Generate Random String 5 [NUMBERS] ${tmpfile} = Set Variable /tmp/tempkey-${postfix} - Execute ozone sh key get ${key} ${tmpfile} + Execute ozone ${cmd} ${key} ${tmpfile} ${rc} = Run And Return Rc diff -q ${file} ${tmpfile} Execute rm -f ${tmpfile} ${result} = Set Variable If ${rc} == 0 ${TRUE} ${FALSE} @@ -42,6 +42,11 @@ Key Should Match Local File ${matches} = Compare Key With Local File ${key} ${file} Should Be True ${matches} +File Should Match Local File + [arguments] ${key} ${file} + ${matches} = Compare Key With Local File ${key} ${file} fs -get + Should Be True ${matches} + Verify ACL [arguments] ${object_type} ${object} ${type} ${name} ${acls} ${actual_acls} = Execute ozone sh ${object_type} getacl ${object} | jq -r '.[] | select(.type == "${type}") | select(.name == "${name}") | .aclList[]' | xargs @@ -70,6 +75,11 @@ Create Key Should not contain ${output} Failed Log Uploaded ${file} to ${key} +Assert Unsupported + [arguments] ${cmd} + ${result} = Execute and checkrc ${cmd} 255 + Should Contain ${result} NOT_SUPPORTED_OPERATION + Verify Bucket Empty Replication Config [arguments] ${bucket} ${result} = Execute ozone sh bucket info ${bucket} | jq -r '.replicationConfig' diff --git a/hadoop-ozone/dist/src/main/smoketest/recon/recon-api.robot b/hadoop-ozone/dist/src/main/smoketest/recon/recon-api.robot index 43860b75d99..883e93bdc5c 100644 --- a/hadoop-ozone/dist/src/main/smoketest/recon/recon-api.robot +++ b/hadoop-ozone/dist/src/main/smoketest/recon/recon-api.robot @@ -28,6 +28,8 @@ ${API_ENDPOINT_URL} ${ENDPOINT_URL}/api/v1 ${ADMIN_API_ENDPOINT_URL} ${API_ENDPOINT_URL}/containers ${UNHEALTHY_ENDPOINT_URL} ${API_ENDPOINT_URL}/containers/unhealthy ${NON_ADMIN_API_ENDPOINT_URL} ${API_ENDPOINT_URL}/clusterState +${VOLUME} vol1 +${BUCKET} bucket1 *** Keywords *** Check if Recon picks up container from OM @@ -57,6 +59,15 @@ Check http return code Should contain ${result} 200 END +Check if the listKeys api responds OK + [Arguments] ${volume} ${bucket} + Run Keyword if '${SECURITY_ENABLED}' == 'true' Kinit as ozone admin + ${result} = Execute curl --negotiate -u : -LSs ${API_ENDPOINT_URL}/keys/listKeys?startPrefix=/${volume}/${bucket}&limit=1000 + Should contain ${result} "OK" + Should contain ${result} "keys" + Should contain ${result} "${volume}" + Should contain ${result} "${bucket}" + *** Test Cases *** Check if Recon picks up OM data Execute ozone sh volume create recon @@ -67,6 +78,7 @@ Check if Recon picks up OM data Execute ozone sh bucket create recon/api --layout=LEGACY Freon OCKG n=10 args=-s 1025 -v recon -b api Wait Until Keyword Succeeds 90sec 10sec Check if Recon picks up container from OM + Wait Until Keyword Succeeds 90sec 10sec Check if the listKeys api responds OK recon api Check if Recon picks up DN heartbeats ${result} = Execute curl --negotiate -u : -LSs ${API_ENDPOINT_URL}/datanodes diff --git a/hadoop-ozone/dist/src/main/smoketest/s3/MultipartUpload.robot b/hadoop-ozone/dist/src/main/smoketest/s3/MultipartUpload.robot index d62a217e606..c1165821b7d 100644 --- a/hadoop-ozone/dist/src/main/smoketest/s3/MultipartUpload.robot +++ b/hadoop-ozone/dist/src/main/smoketest/s3/MultipartUpload.robot @@ -20,6 +20,7 @@ Library String Library DateTime Resource ../commonlib.robot Resource commonawslib.robot +Resource mpu_lib.robot Test Timeout 5 minutes Suite Setup Setup Multipart Tests Suite Teardown Teardown Multipart Tests @@ -61,17 +62,8 @@ Test Multipart Upload With Adjusted Length Verify Multipart Upload ${BUCKET} multipart/adjusted_length_${PREFIX} /tmp/part1 /tmp/part2 Test Multipart Upload - ${result} = Execute AWSS3APICli create-multipart-upload --bucket ${BUCKET} --key ${PREFIX}/multipartKey - ${uploadID} = Execute and checkrc echo '${result}' | jq -r '.UploadId' 0 - Should contain ${result} ${BUCKET} - Should contain ${result} ${PREFIX}/multipartKey - Should contain ${result} UploadId -# initiate again - ${result} = Execute AWSS3APICli create-multipart-upload --bucket ${BUCKET} --key ${PREFIX}/multipartKey - ${nextUploadID} = Execute and checkrc echo '${result}' | jq -r '.UploadId' 0 - Should contain ${result} ${BUCKET} - Should contain ${result} ${PREFIX}/multipartKey - Should contain ${result} UploadId + ${uploadID} = Initiate MPU ${BUCKET} ${PREFIX}/multipartKey + ${nextUploadID} = Initiate MPU ${BUCKET} ${PREFIX}/multipartKey Should Not Be Equal ${uploadID} ${nextUploadID} # upload part @@ -79,33 +71,20 @@ Test Multipart Upload # upload we get error entity too small. So, considering further complete # multipart upload, uploading each part as 5MB file, exception is for last part - ${result} = Execute AWSS3APICli upload-part --bucket ${BUCKET} --key ${PREFIX}/multipartKey --part-number 1 --body /tmp/part1 --upload-id ${nextUploadID} - Should contain ${result} ETag -# override part - ${result} = Execute AWSS3APICli upload-part --bucket ${BUCKET} --key ${PREFIX}/multipartKey --part-number 1 --body /tmp/part1 --upload-id ${nextUploadID} - Should contain ${result} ETag + Upload MPU part ${BUCKET} ${PREFIX}/multipartKey ${nextUploadID} 1 /tmp/part1 + Upload MPU part ${BUCKET} ${PREFIX}/multipartKey ${nextUploadID} 1 /tmp/part1 Test Multipart Upload Complete - ${result} = Execute AWSS3APICli create-multipart-upload --bucket ${BUCKET} --key ${PREFIX}/multipartKey1 --metadata="custom-key1=custom-value1,custom-key2=custom-value2,gdprEnabled=true" --tagging="tag-key1=tag-value1&tag-key2=tag-value2" - ${uploadID} = Execute and checkrc echo '${result}' | jq -r '.UploadId' 0 - Should contain ${result} ${BUCKET} - Should contain ${result} ${PREFIX}/multipartKey - Should contain ${result} UploadId + ${uploadID} = Initiate MPU ${BUCKET} ${PREFIX}/multipartKey1 0 --metadata="custom-key1=custom-value1,custom-key2=custom-value2,gdprEnabled=true" --tagging="tag-key1=tag-value1&tag-key2=tag-value2" -#upload parts - ${result} = Execute AWSS3APICli upload-part --bucket ${BUCKET} --key ${PREFIX}/multipartKey1 --part-number 1 --body /tmp/part1 --upload-id ${uploadID} - ${eTag1} = Execute and checkrc echo '${result}' | jq -r '.ETag' 0 - Should contain ${result} ETag - ${part1Md5Sum} = Execute md5sum /tmp/part1 | awk '{print $1}' - Should Be Equal As Strings ${eTag1} ${part1Md5Sum} - - Execute echo "Part2" > /tmp/part2 - ${result} = Execute AWSS3APICli upload-part --bucket ${BUCKET} --key ${PREFIX}/multipartKey1 --part-number 2 --body /tmp/part2 --upload-id ${uploadID} - ${eTag2} = Execute and checkrc echo '${result}' | jq -r '.ETag' 0 - Should contain ${result} ETag - ${part2Md5Sum} = Execute md5sum /tmp/part2 | awk '{print $1}' - Should Be Equal As Strings ${eTag2} ${part2Md5Sum} + ${eTag1} = Upload MPU part ${BUCKET} ${PREFIX}/multipartKey1 ${uploadID} 1 /tmp/part1 + ${eTag2} = Upload MPU part ${BUCKET} ${PREFIX}/multipartKey1 ${uploadID} 2 /tmp/part2 + +#complete multipart upload without any parts + ${result} = Execute AWSS3APICli and checkrc complete-multipart-upload --upload-id ${uploadID} --bucket ${BUCKET} --key ${PREFIX}/multipartKey1 255 + Should contain ${result} InvalidRequest + Should contain ${result} must specify at least one part #complete multipart upload without any parts ${result} = Execute AWSS3APICli and checkrc complete-multipart-upload --upload-id ${uploadID} --bucket ${BUCKET} --key ${PREFIX}/multipartKey1 255 @@ -113,12 +92,8 @@ Test Multipart Upload Complete Should contain ${result} must specify at least one part #complete multipart upload - ${result} = Execute AWSS3APICli complete-multipart-upload --upload-id ${uploadID} --bucket ${BUCKET} --key ${PREFIX}/multipartKey1 --multipart-upload 'Parts=[{ETag=${eTag1},PartNumber=1},{ETag=${eTag2},PartNumber=2}]' - Should contain ${result} ${BUCKET} - Should contain ${result} ${PREFIX}/multipartKey1 - ${resultETag} = Execute and checkrc echo '${result}' | jq -r '.ETag' 0 + ${resultETag} = Complete MPU ${BUCKET} ${PREFIX}/multipartKey1 ${uploadID} {ETag=${eTag1},PartNumber=1},{ETag=${eTag2},PartNumber=2} ${expectedResultETag} = Execute echo -n ${eTag1}${eTag2} | md5sum | awk '{print $1}' - Should contain ${result} ETag Should Be Equal As Strings ${resultETag} "${expectedResultETag}-2" #check whether the user defined metadata and parts count can be retrieved @@ -163,116 +138,69 @@ Test Multipart Upload Complete Test Multipart Upload with user defined metadata size larger than 2 KB ${custom_metadata_value} = Generate Random String 3000 - ${result} = Execute AWSS3APICli and checkrc create-multipart-upload --bucket ${BUCKET} --key ${PREFIX}/mpuWithLargeMetadata --metadata="custom-key1=${custom_metadata_value}" 255 + ${result} = Initiate MPU ${BUCKET} ${PREFIX}/mpuWithLargeMetadata 255 --metadata="custom-key1=${custom_metadata_value}" Should contain ${result} MetadataTooLarge Should not contain ${result} custom-key1: ${custom_metadata_value} Test Multipart Upload Complete Entity too small - ${result} = Execute AWSS3APICli create-multipart-upload --bucket ${BUCKET} --key ${PREFIX}/multipartKey2 - ${uploadID} = Execute and checkrc echo '${result}' | jq -r '.UploadId' 0 - Should contain ${result} ${BUCKET} - Should contain ${result} ${PREFIX}/multipartKey - Should contain ${result} UploadId - -#upload parts - ${result} = Execute AWSS3APICli upload-part --bucket ${BUCKET} --key ${PREFIX}/multipartKey2 --part-number 1 --body /tmp/10kb --upload-id ${uploadID} - ${eTag1} = Execute and checkrc echo '${result}' | jq -r '.ETag' 0 - Should contain ${result} ETag - - ${result} = Execute AWSS3APICli upload-part --bucket ${BUCKET} --key ${PREFIX}/multipartKey2 --part-number 2 --body /tmp/10kb --upload-id ${uploadID} - ${eTag2} = Execute and checkrc echo '${result}' | jq -r '.ETag' 0 - Should contain ${result} ETag - -#complete multipart upload - ${result} = Execute AWSS3APICli and checkrc complete-multipart-upload --upload-id ${uploadID} --bucket ${BUCKET} --key ${PREFIX}/multipartKey2 --multipart-upload 'Parts=[{ETag=${eTag1},PartNumber=1},{ETag=${eTag2},PartNumber=2}]' 255 + ${uploadID} = Initiate MPU ${BUCKET} ${PREFIX}/multipartKey2 + ${parts} = Upload MPU parts ${BUCKET} ${PREFIX}/multipartKey2 ${uploadID} /tmp/10kb /tmp/10kb + ${result} = Complete MPU ${BUCKET} ${PREFIX}/multipartKey2 ${uploadID} ${parts} 255 Should contain ${result} EntityTooSmall Test Multipart Upload Complete Invalid part errors and complete mpu with few parts - ${result} = Execute AWSS3APICli create-multipart-upload --bucket ${BUCKET} --key ${PREFIX}/multipartKey3 - ${uploadID} = Execute and checkrc echo '${result}' | jq -r '.UploadId' 0 - Should contain ${result} ${BUCKET} - Should contain ${result} ${PREFIX}/multipartKey - Should contain ${result} UploadId + ${uploadID} = Initiate MPU ${BUCKET} ${PREFIX}/multipartKey3 #complete multipart upload when no parts uploaded - ${result} = Execute AWSS3APICli and checkrc complete-multipart-upload --upload-id ${uploadID} --bucket ${BUCKET} --key ${PREFIX}/multipartKey3 --multipart-upload 'Parts=[{ETag=etag1,PartNumber=1},{ETag=etag2,PartNumber=2}]' 255 + ${result} = Complete MPU ${BUCKET} ${PREFIX}/multipartKey3 ${uploadID} {ETag=etag1,PartNumber=1},{ETag=etag2,PartNumber=2} 255 Should contain ${result} InvalidPart - ${result} = Execute AWSS3APICli and checkrc complete-multipart-upload --upload-id ${uploadID} --bucket ${BUCKET} --key ${PREFIX}/multipartKey3 --multipart-upload 'Parts=[{ETag=etag1,PartNumber=2},{ETag=etag2,PartNumber=1}]' 255 + ${result} = Complete MPU ${BUCKET} ${PREFIX}/multipartKey3 ${uploadID} {ETag=etag1,PartNumber=2},{ETag=etag2,PartNumber=1} 255 Should contain ${result} InvalidPart #upload parts - ${result} = Execute AWSS3APICli upload-part --bucket ${BUCKET} --key ${PREFIX}/multipartKey3 --part-number 1 --body /tmp/part1 --upload-id ${uploadID} - ${eTag1} = Execute and checkrc echo '${result}' | jq -r '.ETag' 0 - Should contain ${result} ETag - - - ${result} = Execute AWSS3APICli upload-part --bucket ${BUCKET} --key ${PREFIX}/multipartKey3 --part-number 2 --body /tmp/part1 --upload-id ${uploadID} - ${eTag2} = Execute and checkrc echo '${result}' | jq -r '.ETag' 0 - Should contain ${result} ETag - - Execute echo "Part3" > /tmp/part3 - ${result} = Execute AWSS3APICli upload-part --bucket ${BUCKET} --key ${PREFIX}/multipartKey3 --part-number 3 --body /tmp/part3 --upload-id ${uploadID} - ${eTag3} = Execute and checkrc echo '${result}' | jq -r '.ETag' 0 - Should contain ${result} ETag + ${eTag1} = Upload MPU part ${BUCKET} ${PREFIX}/multipartKey3 ${uploadID} 1 /tmp/part1 + ${eTag2} = Upload MPU part ${BUCKET} ${PREFIX}/multipartKey3 ${uploadID} 2 /tmp/part1 + ${eTag3} = Upload MPU part ${BUCKET} ${PREFIX}/multipartKey3 ${uploadID} 3 /tmp/part2 #complete multipart upload - ${result} = Execute AWSS3APICli and checkrc complete-multipart-upload --upload-id ${uploadID} --bucket ${BUCKET} --key ${PREFIX}/multipartKey3 --multipart-upload 'Parts=[{ETag=etag1,PartNumber=1},{ETag=etag2,PartNumber=2}]' 255 + ${result} = Complete MPU ${BUCKET} ${PREFIX}/multipartKey3 ${uploadID} {ETag=etag1,PartNumber=1},{ETag=etag2,PartNumber=2} 255 Should contain ${result} InvalidPart - ${result} = Execute AWSS3APICli and checkrc complete-multipart-upload --upload-id ${uploadID} --bucket ${BUCKET} --key ${PREFIX}/multipartKey3 --multipart-upload 'Parts=[{ETag=${eTag1},PartNumber=1},{ETag=etag2,PartNumber=2}]' 255 + ${result} = Complete MPU ${BUCKET} ${PREFIX}/multipartKey3 ${uploadID} {ETag=${eTag1},PartNumber=1},{ETag=etag2,PartNumber=2} 255 Should contain ${result} InvalidPart - ${result} = Execute AWSS3APICli and checkrc complete-multipart-upload --upload-id ${uploadID} --bucket ${BUCKET} --key ${PREFIX}/multipartKey3 --multipart-upload 'Parts=[{ETag=${eTag1},PartNumber=4},{ETag=etag2,PartNumber=2}]' 255 + ${result} = Complete MPU ${BUCKET} ${PREFIX}/multipartKey3 ${uploadID} {ETag=${eTag1},PartNumber=4},{ETag=etag2,PartNumber=2} 255 Should contain ${result} InvalidPartOrder #complete multipart upload(merge with few parts) - ${result} = Execute AWSS3APICli complete-multipart-upload --upload-id ${uploadID} --bucket ${BUCKET} --key ${PREFIX}/multipartKey3 --multipart-upload 'Parts=[{ETag=${eTag1},PartNumber=1},{ETag=${eTag3},PartNumber=3}]' - Should contain ${result} ${BUCKET} - Should contain ${result} ${PREFIX}/multipartKey3 - Should contain ${result} ETag + ${result} = Complete MPU ${BUCKET} ${PREFIX}/multipartKey3 ${uploadID} {ETag=${eTag1},PartNumber=1},{ETag=${eTag3},PartNumber=3} ${result} = Execute AWSS3ApiCli get-object --bucket ${BUCKET} --key ${PREFIX}/multipartKey3 /tmp/${PREFIX}-multipartKey3.result - Execute cat /tmp/part1 /tmp/part3 > /tmp/${PREFIX}-multipartKey3 + Execute cat /tmp/part1 /tmp/part2 > /tmp/${PREFIX}-multipartKey3 Compare files /tmp/${PREFIX}-multipartKey3 /tmp/${PREFIX}-multipartKey3.result ${result} = Execute AWSS3ApiCli get-object --bucket ${BUCKET} --key ${PREFIX}/multipartKey3 --part-number 1 /tmp/${PREFIX}-multipartKey3-part1.result Compare files /tmp/part1 /tmp/${PREFIX}-multipartKey3-part1.result - ${result} = Execute AWSS3ApiCli get-object --bucket ${BUCKET} --key ${PREFIX}/multipartKey3 --part-number 3 /tmp/${PREFIX}-multipartKey3-part3.result - Compare files /tmp/part3 /tmp/${PREFIX}-multipartKey3-part3.result + ${result} = Execute AWSS3ApiCli get-object --bucket ${BUCKET} --key ${PREFIX}/multipartKey3 --part-number 3 /tmp/${PREFIX}-multipartKey3-part2.result + Compare files /tmp/part2 /tmp/${PREFIX}-multipartKey3-part2.result Test abort Multipart upload - ${result} = Execute AWSS3APICli create-multipart-upload --bucket ${BUCKET} --key ${PREFIX}/multipartKey4 --storage-class REDUCED_REDUNDANCY - ${uploadID} = Execute and checkrc echo '${result}' | jq -r '.UploadId' 0 - Should contain ${result} ${BUCKET} - Should contain ${result} ${PREFIX}/multipartKey - Should contain ${result} UploadId - - ${result} = Execute AWSS3APICli and checkrc abort-multipart-upload --bucket ${BUCKET} --key ${PREFIX}/multipartKey4 --upload-id ${uploadID} 0 + ${uploadID} = Initiate MPU ${BUCKET} ${PREFIX}/multipartKey4 0 --storage-class REDUCED_REDUNDANCY + ${result} = Abort MPU ${BUCKET} ${PREFIX}/multipartKey4 ${uploadID} 0 Test abort Multipart upload with invalid uploadId - ${result} = Execute AWSS3APICli and checkrc abort-multipart-upload --bucket ${BUCKET} --key ${PREFIX}/multipartKey5 --upload-id "random" 255 + ${result} = Abort MPU ${BUCKET} ${PREFIX}/multipartKey5 "random" 255 Upload part with Incorrect uploadID - ${result} = Execute AWSS3APICli create-multipart-upload --bucket ${BUCKET} --key ${PREFIX}/multipartKey - Execute echo "Multipart upload" > /tmp/testfile - ${result} = Execute AWSS3APICli and checkrc upload-part --bucket ${BUCKET} --key ${PREFIX}/multipartKey --part-number 1 --body /tmp/testfile --upload-id "random" 255 + ${uploadID} = Initiate MPU ${BUCKET} ${PREFIX}/multipartKey + ${result} = Upload MPU part ${BUCKET} ${PREFIX}/multipartKey "no-such-upload-id" 1 /tmp/10kb 255 Should contain ${result} NoSuchUpload Test list parts #initiate multipart upload - ${result} = Execute AWSS3APICli create-multipart-upload --bucket ${BUCKET} --key ${PREFIX}/multipartKey5 - ${uploadID} = Execute and checkrc echo '${result}' | jq -r '.UploadId' 0 - Should contain ${result} ${BUCKET} - Should contain ${result} ${PREFIX}/multipartKey - Should contain ${result} UploadId + ${uploadID} = Initiate MPU ${BUCKET} ${PREFIX}/multipartKey5 #upload parts - ${result} = Execute AWSS3APICli upload-part --bucket ${BUCKET} --key ${PREFIX}/multipartKey5 --part-number 1 --body /tmp/part1 --upload-id ${uploadID} - ${eTag1} = Execute and checkrc echo '${result}' | jq -r '.ETag' 0 - Should contain ${result} ETag - - Execute echo "Part2" > /tmp/part2 - ${result} = Execute AWSS3APICli upload-part --bucket ${BUCKET} --key ${PREFIX}/multipartKey5 --part-number 2 --body /tmp/part2 --upload-id ${uploadID} - ${eTag2} = Execute and checkrc echo '${result}' | jq -r '.ETag' 0 - Should contain ${result} ETag + ${eTag1} = Upload MPU part ${BUCKET} ${PREFIX}/multipartKey5 ${uploadID} 1 /tmp/part1 + ${eTag2} = Upload MPU part ${BUCKET} ${PREFIX}/multipartKey5 ${uploadID} 2 /tmp/part2 #list parts ${result} = Execute AWSS3APICli list-parts --bucket ${BUCKET} --key ${PREFIX}/multipartKey5 --upload-id ${uploadID} @@ -295,7 +223,7 @@ Test list parts Should contain ${result} STANDARD #finally abort it - ${result} = Execute AWSS3APICli and checkrc abort-multipart-upload --bucket ${BUCKET} --key ${PREFIX}/multipartKey5 --upload-id ${uploadID} 0 + ${result} = Abort MPU ${BUCKET} ${PREFIX}/multipartKey5 ${uploadID} 0 Test Multipart Upload with the simplified aws s3 cp API Execute AWSS3Cli cp /tmp/22mb s3://${BUCKET}/mpyawscli @@ -307,19 +235,14 @@ Test Multipart Upload Put With Copy ${result} = Execute AWSS3APICli put-object --bucket ${BUCKET} --key ${PREFIX}/copytest/source --body /tmp/part1 - ${result} = Execute AWSS3APICli create-multipart-upload --bucket ${BUCKET} --key ${PREFIX}/copytest/destination - - ${uploadID} = Execute and checkrc echo '${result}' | jq -r '.UploadId' 0 - Should contain ${result} ${BUCKET} - Should contain ${result} UploadId - + ${uploadID} = Initiate MPU ${BUCKET} ${PREFIX}/copytest/destination ${result} = Execute AWSS3APICli upload-part-copy --bucket ${BUCKET} --key ${PREFIX}/copytest/destination --upload-id ${uploadID} --part-number 1 --copy-source ${BUCKET}/${PREFIX}/copytest/source Should contain ${result} ETag Should contain ${result} LastModified ${eTag1} = Execute and checkrc echo '${result}' | jq -r '.CopyPartResult.ETag' 0 - Execute AWSS3APICli complete-multipart-upload --upload-id ${uploadID} --bucket ${BUCKET} --key ${PREFIX}/copytest/destination --multipart-upload 'Parts=[{ETag=${eTag1},PartNumber=1}]' + Complete MPU ${BUCKET} ${PREFIX}/copytest/destination ${uploadID} {ETag=${eTag1},PartNumber=1} Execute AWSS3APICli get-object --bucket ${BUCKET} --key ${PREFIX}/copytest/destination /tmp/part-result Compare files /tmp/part1 /tmp/part-result @@ -328,11 +251,7 @@ Test Multipart Upload Put With Copy and range ${result} = Execute AWSS3APICli put-object --bucket ${BUCKET} --key ${PREFIX}/copyrange/source --body /tmp/10mb - ${result} = Execute AWSS3APICli create-multipart-upload --bucket ${BUCKET} --key ${PREFIX}/copyrange/destination - - ${uploadID} = Execute and checkrc echo '${result}' | jq -r '.UploadId' 0 - Should contain ${result} ${BUCKET} - Should contain ${result} UploadId + ${uploadID} = Initiate MPU ${BUCKET} ${PREFIX}/copyrange/destination ${result} = Execute AWSS3APICli upload-part-copy --bucket ${BUCKET} --key ${PREFIX}/copyrange/destination --upload-id ${uploadID} --part-number 1 --copy-source ${BUCKET}/${PREFIX}/copyrange/source --copy-source-range bytes=0-10485757 Should contain ${result} ETag @@ -345,7 +264,7 @@ Test Multipart Upload Put With Copy and range ${eTag2} = Execute and checkrc echo '${result}' | jq -r '.CopyPartResult.ETag' 0 - Execute AWSS3APICli complete-multipart-upload --upload-id ${uploadID} --bucket ${BUCKET} --key ${PREFIX}/copyrange/destination --multipart-upload 'Parts=[{ETag=${eTag1},PartNumber=1},{ETag=${eTag2},PartNumber=2}]' + Complete MPU ${BUCKET} ${PREFIX}/copyrange/destination ${uploadID} {ETag=${eTag1},PartNumber=1},{ETag=${eTag2},PartNumber=2} Execute AWSS3APICli get-object --bucket ${BUCKET} --key ${PREFIX}/copyrange/destination /tmp/part-result Compare files /tmp/10mb /tmp/part-result @@ -357,11 +276,7 @@ Test Multipart Upload Put With Copy and range with IfModifiedSince ${result} = Execute AWSS3APICli put-object --bucket ${BUCKET} --key ${PREFIX}/copyrange/source --body /tmp/10mb - ${result} = Execute AWSS3APICli create-multipart-upload --bucket ${BUCKET} --key ${PREFIX}/copyrange/destination - - ${uploadID} = Execute and checkrc echo '${result}' | jq -r '.UploadId' 0 - Should contain ${result} ${BUCKET} - Should contain ${result} UploadId + ${uploadID} = Initiate MPU ${BUCKET} ${PREFIX}/copyrange/destination #calc time-to-sleep from time-last-modified plus a few seconds ${result} = Execute AWSS3APICli head-object --bucket ${BUCKET} --key ${PREFIX}/copyrange/source @@ -396,24 +311,14 @@ Test Multipart Upload Put With Copy and range with IfModifiedSince ${eTag1} = Execute and checkrc echo '${result}' | jq -r '.CopyPartResult.ETag' 0 - - Execute AWSS3APICli complete-multipart-upload --upload-id ${uploadID} --bucket ${BUCKET} --key ${PREFIX}/copyrange/destination --multipart-upload 'Parts=[{ETag=${eTag1},PartNumber=1},{ETag=${eTag2},PartNumber=2}]' + Complete MPU ${BUCKET} ${PREFIX}/copyrange/destination ${uploadID} {ETag=${eTag1},PartNumber=1},{ETag=${eTag2},PartNumber=2} Execute AWSS3APICli get-object --bucket ${BUCKET} --key ${PREFIX}/copyrange/destination /tmp/part-result Compare files /tmp/10mb /tmp/part-result Test Multipart Upload list - ${result} = Execute AWSS3APICli create-multipart-upload --bucket ${BUCKET} --key ${PREFIX}/listtest/key1 - ${uploadID1} = Execute and checkrc echo '${result}' | jq -r '.UploadId' 0 - Should contain ${result} ${BUCKET} - Should contain ${result} ${PREFIX}/listtest/key1 - Should contain ${result} UploadId - - ${result} = Execute AWSS3APICli create-multipart-upload --bucket ${BUCKET} --key ${PREFIX}/listtest/key2 - ${uploadID2} = Execute and checkrc echo '${result}' | jq -r '.UploadId' 0 - Should contain ${result} ${BUCKET} - Should contain ${result} ${PREFIX}/listtest/key2 - Should contain ${result} UploadId + ${uploadID1} = Initiate MPU ${BUCKET} ${PREFIX}/listtest/key1 + ${uploadID2} = Initiate MPU ${BUCKET} ${PREFIX}/listtest/key2 ${result} = Execute AWSS3APICli list-multipart-uploads --bucket ${BUCKET} --prefix ${PREFIX}/listtest Should contain ${result} ${uploadID1} diff --git a/hadoop-ozone/dist/src/main/smoketest/s3/bucketcreate.robot b/hadoop-ozone/dist/src/main/smoketest/s3/bucketcreate.robot index 76e0cadf372..39ddbde41b0 100644 --- a/hadoop-ozone/dist/src/main/smoketest/s3/bucketcreate.robot +++ b/hadoop-ozone/dist/src/main/smoketest/s3/bucketcreate.robot @@ -42,13 +42,14 @@ Create bucket with invalid bucket name ${result} = Execute AWSS3APICli and checkrc create-bucket --bucket invalid_bucket_${randStr} 255 Should contain ${result} InvalidBucketName -Create new bucket and check no group ACL +Create new bucket and check default group ACL ${bucket} = Create bucket ${acl} = Execute ozone sh bucket getacl s3v/${bucket} ${group} = Get Regexp Matches ${acl} "GROUP" - IF '${group}' is not '[]' + IF '${group}' != '[]' ${json} = Evaluate json.loads('''${acl}''') json # make sure this check is for group acl Should contain ${json}[1][type] GROUP - Should contain ${json}[1][aclList] NONE - END \ No newline at end of file + Should contain ${json}[1][aclList] READ + Should contain ${json}[1][aclList] LIST + END diff --git a/hadoop-ozone/dist/src/main/smoketest/s3/bucketdelete.robot b/hadoop-ozone/dist/src/main/smoketest/s3/bucketdelete.robot index 607a7dee960..a382970a6de 100644 --- a/hadoop-ozone/dist/src/main/smoketest/s3/bucketdelete.robot +++ b/hadoop-ozone/dist/src/main/smoketest/s3/bucketdelete.robot @@ -19,6 +19,7 @@ Library OperatingSystem Library String Resource ../commonlib.robot Resource commonawslib.robot +Resource mpu_lib.robot Test Timeout 5 minutes Suite Setup Setup s3 tests @@ -48,17 +49,13 @@ Delete bucket with incomplete multipart uploads [tags] no-bucket-type ${bucket} = Create bucket - # initiate incomplete multipart uploads (multipart upload is initiated but not completed/aborted) - ${initiate_result} = Execute AWSS3APICli create-multipart-upload --bucket ${bucket} --key incomplete-multipartkey - ${uploadID} = Execute echo '${initiate_result}' | jq -r '.UploadId' - Should contain ${initiate_result} ${bucket} - Should contain ${initiate_result} incomplete-multipartkey - Should contain ${initiate_result} UploadId + # initiate incomplete multipart upload (multipart upload is initiated but not completed/aborted) + ${uploadID} = Initiate MPU ${bucket} incomplete-multipartkey # bucket deletion should fail since there is still incomplete multipart upload ${delete_fail_result} = Execute AWSS3APICli and checkrc delete-bucket --bucket ${bucket} 255 Should contain ${delete_fail_result} BucketNotEmpty # after aborting the multipart upload, the bucket deletion should succeed - ${abort_result} = Execute AWSS3APICli and checkrc abort-multipart-upload --bucket ${bucket} --key incomplete-multipartkey --upload-id ${uploadID} 0 - ${delete_result} = Execute AWSS3APICli and checkrc delete-bucket --bucket ${bucket} 0 \ No newline at end of file + ${abort_result} = Abort MPU ${bucket} incomplete-multipartkey ${uploadID} + ${delete_result} = Execute AWSS3APICli and checkrc delete-bucket --bucket ${bucket} 0 diff --git a/hadoop-ozone/dist/src/main/smoketest/s3/commonawslib.robot b/hadoop-ozone/dist/src/main/smoketest/s3/commonawslib.robot index 45dee9270bd..ac64ee36537 100644 --- a/hadoop-ozone/dist/src/main/smoketest/s3/commonawslib.robot +++ b/hadoop-ozone/dist/src/main/smoketest/s3/commonawslib.robot @@ -24,7 +24,7 @@ ${OZONE_S3_HEADER_VERSION} v4 ${OZONE_S3_SET_CREDENTIALS} true ${BUCKET} generated ${BUCKET_LAYOUT} OBJECT_STORE -${KEY_NAME} key1 +${ENCRYPTION_KEY} key1 ${OZONE_S3_TESTS_SET_UP} ${FALSE} ${OZONE_AWS_ACCESS_KEY_ID} ${EMPTY} ${OZONE_S3_ADDRESS_STYLE} path @@ -156,7 +156,7 @@ Create encrypted bucket Return From Keyword if '${SECURITY_ENABLED}' == 'false' ${exists} = Bucket Exists o3://${OM_SERVICE_ID}/s3v/encrypted Return From Keyword If ${exists} - Execute ozone sh bucket create -k ${KEY_NAME} --layout ${BUCKET_LAYOUT} o3://${OM_SERVICE_ID}/s3v/encrypted + Execute ozone sh bucket create -k ${ENCRYPTION_KEY} --layout ${BUCKET_LAYOUT} o3://${OM_SERVICE_ID}/s3v/encrypted Create link [arguments] ${bucket} @@ -172,33 +172,6 @@ Generate random prefix ${random} = Generate Ozone String Set Global Variable ${PREFIX} ${random} -Perform Multipart Upload - [arguments] ${bucket} ${key} @{files} - - ${result} = Execute AWSS3APICli create-multipart-upload --bucket ${bucket} --key ${key} - ${upload_id} = Execute and checkrc echo '${result}' | jq -r '.UploadId' 0 - - @{etags} = Create List - FOR ${i} ${file} IN ENUMERATE @{files} - ${part} = Evaluate ${i} + 1 - ${result} = Execute AWSS3APICli upload-part --bucket ${bucket} --key ${key} --part-number ${part} --body ${file} --upload-id ${upload_id} - ${etag} = Execute echo '${result}' | jq -r '.ETag' - Append To List ${etags} {ETag=${etag},PartNumber=${part}} - END - - ${parts} = Catenate SEPARATOR=, @{etags} - Execute AWSS3APICli complete-multipart-upload --bucket ${bucket} --key ${key} --upload-id ${upload_id} --multipart-upload 'Parts=[${parts}]' - -Verify Multipart Upload - [arguments] ${bucket} ${key} @{files} - - ${random} = Generate Ozone String - - Execute AWSS3APICli get-object --bucket ${bucket} --key ${key} /tmp/verify${random} - ${tmp} = Catenate @{files} - Execute cat ${tmp} > /tmp/original${random} - Compare files /tmp/original${random} /tmp/verify${random} - Revoke S3 secrets Execute and Ignore Error ozone s3 revokesecret -y Execute and Ignore Error ozone s3 revokesecret -y -u testuser diff --git a/hadoop-ozone/dist/src/main/smoketest/s3/mpu_lib.robot b/hadoop-ozone/dist/src/main/smoketest/s3/mpu_lib.robot new file mode 100644 index 00000000000..0aaa0affec1 --- /dev/null +++ b/hadoop-ozone/dist/src/main/smoketest/s3/mpu_lib.robot @@ -0,0 +1,105 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +*** Settings *** +Documentation Keywords for Multipart Upload +Library OperatingSystem +Library String +Resource commonawslib.robot + +*** Keywords *** + +Initiate MPU + [arguments] ${bucket} ${key} ${expected_rc}=0 ${opts}=${EMPTY} + + ${result} = Execute AWSS3APICli and checkrc create-multipart-upload --bucket ${bucket} --key ${key} ${opts} ${expected_rc} + IF '${expected_rc}' == '0' + Should contain ${result} ${bucket} + Should contain ${result} ${key} + ${upload_id} = Execute and checkrc echo '${result}' | jq -r '.UploadId' 0 + RETURN ${upload_id} + ELSE + RETURN ${result} + END + + +Upload MPU part + [arguments] ${bucket} ${key} ${upload_id} ${part} ${file} ${expected_rc}=0 + + ${result} = Execute AWSS3APICli and checkrc upload-part --bucket ${bucket} --key ${key} --part-number ${part} --body ${file} --upload-id ${upload_id} ${expected_rc} + IF '${expected_rc}' == '0' + Should contain ${result} ETag + ${etag} = Execute echo '${result}' | jq -r '.ETag' + ${md5sum} = Execute md5sum ${file} | awk '{print $1}' + Should Be Equal As Strings ${etag} ${md5sum} + RETURN ${etag} + ELSE + RETURN ${result} + END + + +Complete MPU + [arguments] ${bucket} ${key} ${upload_id} ${parts} ${expected_rc}=0 + + ${result} = Execute AWSS3APICli and checkrc complete-multipart-upload --bucket ${bucket} --key ${key} --upload-id ${upload_id} --multipart-upload 'Parts=[${parts}]' ${expected_rc} + IF '${expected_rc}' == '0' + Should contain ${result} ${bucket} + Should contain ${result} ${key} + Should contain ${result} ETag + ${etag} = Execute echo '${result}' | jq -r '.ETag' + RETURN ${etag} + ELSE + RETURN ${result} + END + + +Abort MPU + [arguments] ${bucket} ${key} ${upload_id} ${expected_rc}=0 + + ${result} = Execute AWSS3APICli and checkrc abort-multipart-upload --bucket ${bucket} --key ${key} --upload-id ${upload_id} ${expected_rc} + + +Upload MPU parts + [arguments] ${bucket} ${key} ${upload_id} @{files} + + @{etags} = Create List + FOR ${i} ${file} IN ENUMERATE @{files} + ${part} = Evaluate ${i} + 1 + ${etag} = Upload MPU part ${bucket} ${key} ${upload_id} ${part} ${file} + Append To List ${etags} {ETag=${etag},PartNumber=${part}} + END + ${parts} = Catenate SEPARATOR=, @{etags} + + RETURN ${parts} + + +Perform Multipart Upload + [arguments] ${bucket} ${key} @{files} + + ${upload_id} = Initiate MPU ${bucket} ${key} + ${parts} = Upload MPU parts ${bucket} ${key} ${upload_id} @{files} + Complete MPU ${bucket} ${key} ${upload_id} ${parts} + + +Verify Multipart Upload + [arguments] ${bucket} ${key} @{files} + + ${random} = Generate Ozone String + + Execute AWSS3APICli get-object --bucket ${bucket} --key ${key} /tmp/verify${random} + ${tmp} = Catenate @{files} + Execute cat ${tmp} > /tmp/original${random} + Compare files /tmp/original${random} /tmp/verify${random} + diff --git a/hadoop-ozone/dist/src/main/smoketest/s3/objectcopys3a.robot b/hadoop-ozone/dist/src/main/smoketest/s3/objectcopys3a.robot index fead57ca31c..96fd1e62fc8 100644 --- a/hadoop-ozone/dist/src/main/smoketest/s3/objectcopys3a.robot +++ b/hadoop-ozone/dist/src/main/smoketest/s3/objectcopys3a.robot @@ -31,19 +31,19 @@ Put object s3a simulation Execute echo "Randomtext" > /tmp/testfile ${result} = Execute AWSS3APICli and checkrc head-object --bucket ${BUCKET} --key ${PREFIX}/word.txt 255 ${result} = Execute AWSS3ApiCli list-objects --bucket ${BUCKET} --prefix ${PREFIX}/word.txt/ - Should Not contain ${result} word.txt + Should Not Match Regexp ${result} "Key".*word.txt ${result} = Execute AWSS3APICli and checkrc head-object --bucket ${BUCKET} --key ${PREFIX}/word.txt._COPYING_ 255 ${result} = Execute AWSS3ApiCli list-objects --bucket ${BUCKET} --prefix ${PREFIX}/word.txt._COPYING_/ - Should Not contain ${result} word.txt._COPYING_ + Should Not Match Regexp ${result} "Key".*word.txt._COPYING_ ${result} = Execute AWSS3ApiCli put-object --bucket ${BUCKET} --key ${PREFIX}/word.txt._COPYING_ --body /tmp/testfile Execute AWSS3APICli and checkrc head-object --bucket ${BUCKET} --key ${PREFIX}/word.txt._COPYING_ 0 Execute AWSS3APICli and checkrc head-object --bucket ${BUCKET} --key ${PREFIX}/word.txt 255 ${result} = Execute AWSS3ApiCli list-objects --bucket ${BUCKET} --prefix ${PREFIX}/word.txt/ - Should Not contain ${result} word.txt._COPYING_ + Should Not Match Regexp ${result} "Key".*word.txt._COPYING_ ${result} = Execute AWSS3ApiCli copy-object --bucket ${BUCKET} --key ${PREFIX}/word.txt --copy-source ${BUCKET}/${PREFIX}/word.txt._COPYING_ Execute AWSS3APICli and checkrc head-object --bucket ${BUCKET} --key ${PREFIX}/word.txt 0 Execute AWSS3APICli delete-object --bucket ${BUCKET} --key ${PREFIX}/word.txt._COPYING_ - Execute AWSS3APICli and checkrc head-object --bucket ${BUCKET} --key ${PREFIX}/word.txt._COPYING_ 255 \ No newline at end of file + Execute AWSS3APICli and checkrc head-object --bucket ${BUCKET} --key ${PREFIX}/word.txt._COPYING_ 255 diff --git a/hadoop-ozone/dist/src/main/smoketest/s3/objectputget.robot b/hadoop-ozone/dist/src/main/smoketest/s3/objectputget.robot index 12fb985348a..82a985f1d50 100644 --- a/hadoop-ozone/dist/src/main/smoketest/s3/objectputget.robot +++ b/hadoop-ozone/dist/src/main/smoketest/s3/objectputget.robot @@ -44,6 +44,7 @@ Put object to s3 Get object from s3 ${result} = Execute AWSS3ApiCli get-object --bucket ${BUCKET} --key ${PREFIX}/putobject/key=value/f1 /tmp/testfile.result Compare files /tmp/testfile /tmp/testfile.result + Should not contain ${result} TagCount ${result} = Execute AWSS3ApiCli get-object --bucket ${BUCKET} --key ${PREFIX}/putobject/key=value/zerobyte /tmp/zerobyte.result Compare files /tmp/zerobyte /tmp/zerobyte.result diff --git a/hadoop-ozone/dist/src/main/smoketest/s3/objecttagging.robot b/hadoop-ozone/dist/src/main/smoketest/s3/objecttagging.robot new file mode 100644 index 00000000000..9098673680d --- /dev/null +++ b/hadoop-ozone/dist/src/main/smoketest/s3/objecttagging.robot @@ -0,0 +1,73 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License + +*** Settings *** +Documentation S3 gateway test with aws cli +Library OperatingSystem +Library String +Resource ../commonlib.robot +Resource commonawslib.robot +Test Timeout 5 minutes +Suite Setup Setup s3 tests + +*** Variables *** +${ENDPOINT_URL} http://s3g:9878 +${OZONE_TEST} true +${BUCKET} generated + + +*** Test Cases *** + +Put object tagging +# Create an object and call put-object-tagging + Execute echo "Randomtext" > /tmp/testfile + ${result} = Execute AWSS3ApiCli put-object --bucket ${BUCKET} --key ${PREFIX}/putobject/key=value/f1 --body /tmp/testfile + ${result} = Execute AWSS3ApiCli list-objects --bucket ${BUCKET} --prefix ${PREFIX}/putobject/key=value/ + Should contain ${result} f1 + + ${result} = Execute AWSS3ApiCli put-object-tagging --bucket ${BUCKET} --key ${PREFIX}/putobject/key=value/f1 --tagging '{"TagSet": [{ "Key": "tag-key1", "Value": "tag-value1" }]}' + ${result} = Execute AWSS3APICli get-object --bucket ${BUCKET} --key ${PREFIX}/putobject/key=value/f1 /tmp/testfile2.result + Should contain ${result} TagCount + ${tagCount} = Execute and checkrc echo '${result}' | jq -r '.TagCount' 0 + Should Be Equal ${tagCount} 1 + +# Calling put-object-tagging again to overwrite the existing tags + ${result} = Execute AWSS3ApiCli put-object-tagging --bucket ${BUCKET} --key ${PREFIX}/putobject/key=value/f1 --tagging '{"TagSet": [{ "Key": "tag-key2", "Value": "tag-value2" },{ "Key": "tag-key3", "Value": "tag-value3" }]}' + ${result} = Execute AWSS3APICli get-object --bucket ${BUCKET} --key ${PREFIX}/putobject/key=value/f1 /tmp/testfile2.result + Should contain ${result} TagCount + ${tagCount} = Execute and checkrc echo '${result}' | jq -r '.TagCount' 0 + Should Be Equal ${tagCount} 2 + +# Calling put-object-tagging on non-existent key + ${result} = Execute AWSS3APICli and checkrc put-object-tagging --bucket ${BUCKET} --key ${PREFIX}/nonexistent --tagging '{"TagSet": [{ "Key": "tag-key1", "Value": "tag-value1" }]}' 255 + Should contain ${result} NoSuchKey + +#This test depends on the previous test case. Can't be executes alone +Get object tagging + + ${result} = Execute AWSS3ApiCli get-object-tagging --bucket ${BUCKET} --key ${PREFIX}/putobject/key=value/f1 + Should contain ${result} TagSet + ${tagCount} = Execute and checkrc echo '${result}' | jq '.TagSet | length' 0 + Should Be Equal ${tagCount} 2 + + +#This test depends on the previous test case. Can't be executes alone +Delete object tagging + + ${result} = Execute AWSS3ApiCli delete-object-tagging --bucket ${BUCKET} --key ${PREFIX}/putobject/key=value/f1 + ${result} = Execute AWSS3ApiCli get-object-tagging --bucket ${BUCKET} --key ${PREFIX}/putobject/key=value/f1 + Should contain ${result} TagSet + ${tagCount} = Execute and checkrc echo '${result}' | jq '.TagSet | length' 0 + Should Be Equal ${tagCount} 0 diff --git a/hadoop-ozone/dist/src/main/smoketest/s3/s3_compatbility_check.sh b/hadoop-ozone/dist/src/main/smoketest/s3/s3_compatbility_check.sh index b9a4c68587d..ab2807167d0 100755 --- a/hadoop-ozone/dist/src/main/smoketest/s3/s3_compatbility_check.sh +++ b/hadoop-ozone/dist/src/main/smoketest/s3/s3_compatbility_check.sh @@ -83,5 +83,6 @@ run_robot_test objectcopy run_robot_test objectmultidelete run_robot_test objecthead run_robot_test MultipartUpload +run_robot_test objecttagging rebot --outputdir results/ results/*.xml diff --git a/hadoop-ozone/dist/src/main/smoketest/s3/secretgenerate.robot b/hadoop-ozone/dist/src/main/smoketest/s3/secretgenerate.robot index e9b5dd5df72..e0c2fc7f818 100644 --- a/hadoop-ozone/dist/src/main/smoketest/s3/secretgenerate.robot +++ b/hadoop-ozone/dist/src/main/smoketest/s3/secretgenerate.robot @@ -45,15 +45,19 @@ S3 Gateway Secret Already Exists Should contain ${result} HTTP/1.1 400 S3_SECRET_ALREADY_EXISTS ignore_case=True S3 Gateway Generate Secret By Username - [Tags] robot:skip # TODO: Enable after HDDS-11041 is done. Pass Execution If '${SECURITY_ENABLED}' == 'false' Skipping this check as security is not enabled ${result} = Execute curl -X PUT --negotiate -u : -v ${ENDPOINT_URL}/secret/testuser Should contain ${result} HTTP/1.1 200 OK ignore_case=True Should Match Regexp ${result} .*.* S3 Gateway Generate Secret By Username For Other User - [Tags] robot:skip # TODO: Enable after HDDS-11041 is done. Pass Execution If '${SECURITY_ENABLED}' == 'false' Skipping this check as security is not enabled ${result} = Execute curl -X PUT --negotiate -u : -v ${ENDPOINT_URL}/secret/testuser2 Should contain ${result} HTTP/1.1 200 OK ignore_case=True Should Match Regexp ${result} .*.* + +S3 Gateway Reject Secret Generation By Non-admin User + Pass Execution If '${SECURITY_ENABLED}' == 'false' Skipping this check as security is not enabled + Run Keyword Kinit test user testuser2 testuser2.keytab + ${result} = Execute curl -X PUT --negotiate -u : -v ${ENDPOINT_URL}/secret/testuser + Should contain ${result} HTTP/1.1 403 FORBIDDEN ignore_case=True \ No newline at end of file diff --git a/hadoop-ozone/dist/src/main/smoketest/s3/secretrevoke.robot b/hadoop-ozone/dist/src/main/smoketest/s3/secretrevoke.robot index 59725c0416c..ffb03a85a8a 100644 --- a/hadoop-ozone/dist/src/main/smoketest/s3/secretrevoke.robot +++ b/hadoop-ozone/dist/src/main/smoketest/s3/secretrevoke.robot @@ -38,15 +38,19 @@ S3 Gateway Revoke Secret Should contain ${result} HTTP/1.1 200 OK ignore_case=True S3 Gateway Revoke Secret By Username - [Tags] robot:skip # TODO: Enable after HDDS-11041 is done. Pass Execution If '${SECURITY_ENABLED}' == 'false' Skipping this check as security is not enabled Execute ozone s3 getsecret -u testuser ${OM_HA_PARAM} ${result} = Execute curl -X DELETE --negotiate -u : -v ${ENDPOINT_URL}/secret/testuser Should contain ${result} HTTP/1.1 200 OK ignore_case=True S3 Gateway Revoke Secret By Username For Other User - [Tags] robot:skip # TODO: Enable after HDDS-11041 is done. Pass Execution If '${SECURITY_ENABLED}' == 'false' Skipping this check as security is not enabled Execute ozone s3 getsecret -u testuser2 ${OM_HA_PARAM} ${result} = Execute curl -X DELETE --negotiate -u : -v ${ENDPOINT_URL}/secret/testuser2 Should contain ${result} HTTP/1.1 200 OK ignore_case=True + +S3 Gateway Reject Secret Revoke By Non-admin User + Pass Execution If '${SECURITY_ENABLED}' == 'false' Skipping this check as security is not enabled + Run Keyword Kinit test user testuser2 testuser2.keytab + ${result} = Execute curl -X DELETE --negotiate -u : -v ${ENDPOINT_URL}/secret/testuser + Should contain ${result} HTTP/1.1 403 FORBIDDEN ignore_case=True \ No newline at end of file diff --git a/hadoop-ozone/dist/src/main/smoketest/security/bucket-encryption.robot b/hadoop-ozone/dist/src/main/smoketest/security/bucket-encryption.robot deleted file mode 100644 index a78f94e5fa9..00000000000 --- a/hadoop-ozone/dist/src/main/smoketest/security/bucket-encryption.robot +++ /dev/null @@ -1,45 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -*** Settings *** -Documentation Test for bucket encryption -Library BuiltIn -Library String -Resource ../commonlib.robot -Resource ../lib/os.robot -Resource ../ozone-lib/shell.robot -Suite Setup Setup Test -Test Timeout 5 minutes - -*** Variables *** -${KEY_NAME} key1 -${VOLUME} - -*** Keywords *** -Setup Test - ${volume} = Create Random Volume - Set Suite Variable ${VOLUME} ${volume} - - -*** Test Cases *** -Create Encrypted Bucket - ${output} = Execute ozone sh bucket create -k ${KEY_NAME} o3://${OM_SERVICE_ID}/${VOLUME}/encrypted-bucket - Should Not Contain ${output} INVALID_REQUEST - Bucket Exists o3://${OM_SERVICE_ID}/${VOLUME}/encrypted-bucket - -Create Key in Encrypted Bucket - ${key} = Set Variable o3://${OM_SERVICE_ID}/${VOLUME}/encrypted-bucket/passwd - ${output} = Execute ozone sh key put ${key} /etc/passwd - Key Should Match Local File ${key} /etc/passwd diff --git a/hadoop-ozone/dist/src/shell/conf/log4j.properties b/hadoop-ozone/dist/src/shell/conf/log4j.properties index 96e90ab5417..aa3d0b4bf43 100644 --- a/hadoop-ozone/dist/src/shell/conf/log4j.properties +++ b/hadoop-ozone/dist/src/shell/conf/log4j.properties @@ -20,7 +20,7 @@ hadoop.log.dir=. hadoop.log.file=hadoop.log # Define the root logger to the system property "hadoop.root.logger". -log4j.rootLogger=${hadoop.root.logger}, EventCounter +log4j.rootLogger=${hadoop.root.logger} # Logging Threshold log4j.threshold=ALL @@ -129,13 +129,6 @@ log4j.appender.DRFAS.DatePattern=.yyyy-MM-dd log4j.logger.com.amazonaws.http.AmazonHttpClient=ERROR #log4j.logger.org.apache.hadoop.fs.s3a.S3AFileSystem=WARN -# -# Event Counter Appender -# Sends counts of logging messages at different severity levels to Hadoop Metrics. -# -log4j.appender.EventCounter=org.apache.hadoop.log.metrics.EventCounter - - # Log levels of third-party libraries log4j.logger.org.apache.commons.beanutils=WARN diff --git a/hadoop-ozone/dist/src/shell/ozone/ozone-functions.sh b/hadoop-ozone/dist/src/shell/ozone/ozone-functions.sh index 84e2b73836e..0e357ddbe15 100755 --- a/hadoop-ozone/dist/src/shell/ozone/ozone-functions.sh +++ b/hadoop-ozone/dist/src/shell/ozone/ozone-functions.sh @@ -2817,14 +2817,6 @@ function ozone_assemble_classpath() { done ozone_add_classpath "${OZONE_HOME}/share/ozone/web" - #We need to add the artifact manually as it's not part the generated classpath desciptor - local MAIN_ARTIFACT - MAIN_ARTIFACT=$(find "$HDDS_LIB_JARS_DIR" -name "${OZONE_RUN_ARTIFACT_NAME}-*.jar") - if [[ -z "$MAIN_ARTIFACT" ]] || [[ ! -e "$MAIN_ARTIFACT" ]]; then - echo "ERROR: Component jar file $MAIN_ARTIFACT is missing from ${HDDS_LIB_JARS_DIR}" - fi - ozone_add_classpath "${MAIN_ARTIFACT}" - #Add optional jars to the classpath local OPTIONAL_CLASSPATH_DIR OPTIONAL_CLASSPATH_DIR="${HDDS_LIB_JARS_DIR}/${OZONE_RUN_ARTIFACT_NAME}" diff --git a/hadoop-ozone/fault-injection-test/mini-chaos-tests/pom.xml b/hadoop-ozone/fault-injection-test/mini-chaos-tests/pom.xml index 4548459105f..90961941a46 100644 --- a/hadoop-ozone/fault-injection-test/mini-chaos-tests/pom.xml +++ b/hadoop-ozone/fault-injection-test/mini-chaos-tests/pom.xml @@ -20,9 +20,9 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd"> ozone-fault-injection-test org.apache.ozone - 1.5.0-SNAPSHOT + 2.0.0-SNAPSHOT - 1.5.0-SNAPSHOT + 2.0.0-SNAPSHOT Apache Ozone Mini Ozone Chaos Tests Apache Ozone Mini Ozone Chaos Tests diff --git a/hadoop-ozone/fault-injection-test/network-tests/pom.xml b/hadoop-ozone/fault-injection-test/network-tests/pom.xml index 97d10cbf761..35874911730 100644 --- a/hadoop-ozone/fault-injection-test/network-tests/pom.xml +++ b/hadoop-ozone/fault-injection-test/network-tests/pom.xml @@ -20,7 +20,7 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd"> org.apache.ozone ozone-fault-injection-test - 1.5.0-SNAPSHOT + 2.0.0-SNAPSHOT ozone-network-tests Apache Ozone Network Tests diff --git a/hadoop-ozone/fault-injection-test/pom.xml b/hadoop-ozone/fault-injection-test/pom.xml index 432faab4877..e62f7e47dc0 100644 --- a/hadoop-ozone/fault-injection-test/pom.xml +++ b/hadoop-ozone/fault-injection-test/pom.xml @@ -20,10 +20,10 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd"> org.apache.ozone ozone - 1.5.0-SNAPSHOT + 2.0.0-SNAPSHOT ozone-fault-injection-test - 1.5.0-SNAPSHOT + 2.0.0-SNAPSHOT Apache Ozone Fault Injection Tests Apache Ozone Fault Injection Tests pom diff --git a/hadoop-ozone/httpfsgateway/pom.xml b/hadoop-ozone/httpfsgateway/pom.xml index 7664643b153..bacc730a00f 100644 --- a/hadoop-ozone/httpfsgateway/pom.xml +++ b/hadoop-ozone/httpfsgateway/pom.xml @@ -22,16 +22,17 @@ org.apache.ozone ozone - 1.5.0-SNAPSHOT + 2.0.0-SNAPSHOT ozone-httpfsgateway - 1.5.0-SNAPSHOT + 2.0.0-SNAPSHOT jar Apache Ozone HttpFS Apache Ozone HttpFS + false REPO NOT AVAIL REVISION NOT AVAIL yyyy-MM-dd'T'HH:mm:ssZ diff --git a/hadoop-ozone/httpfsgateway/src/main/resources/httpfs.properties b/hadoop-ozone/httpfsgateway/src/main/resources/httpfs.properties index 164896e1f05..16d13de384a 100644 --- a/hadoop-ozone/httpfsgateway/src/main/resources/httpfs.properties +++ b/hadoop-ozone/httpfsgateway/src/main/resources/httpfs.properties @@ -16,6 +16,3 @@ httpfs.version=${project.version} httpfs.source.repository=${httpfs.source.repository} httpfs.source.revision=${httpfs.source.revision} - -httpfs.build.username=${user.name} -httpfs.build.timestamp=${httpfs.build.timestamp} diff --git a/hadoop-ozone/insight/pom.xml b/hadoop-ozone/insight/pom.xml index bcfb1660244..fa3862a7f71 100644 --- a/hadoop-ozone/insight/pom.xml +++ b/hadoop-ozone/insight/pom.xml @@ -20,14 +20,16 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> org.apache.ozone ozone - 1.5.0-SNAPSHOT + 2.0.0-SNAPSHOT ozone-insight - 1.5.0-SNAPSHOT + 2.0.0-SNAPSHOT Apache Ozone Insight Tool Apache Ozone Insight Tool jar + + false diff --git a/hadoop-ozone/insight/src/main/java/org/apache/hadoop/ozone/insight/Insight.java b/hadoop-ozone/insight/src/main/java/org/apache/hadoop/ozone/insight/Insight.java index b4080796be2..690783ee411 100644 --- a/hadoop-ozone/insight/src/main/java/org/apache/hadoop/ozone/insight/Insight.java +++ b/hadoop-ozone/insight/src/main/java/org/apache/hadoop/ozone/insight/Insight.java @@ -34,10 +34,6 @@ mixinStandardHelpOptions = true) public class Insight extends GenericCli { - public Insight() { - super(Insight.class); - } - public static void main(String[] args) throws Exception { new Insight().run(args); } diff --git a/hadoop-ozone/integration-test/pom.xml b/hadoop-ozone/integration-test/pom.xml index f66f64d2874..f4a2f713185 100644 --- a/hadoop-ozone/integration-test/pom.xml +++ b/hadoop-ozone/integration-test/pom.xml @@ -20,10 +20,10 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> org.apache.ozone ozone - 1.5.0-SNAPSHOT + 2.0.0-SNAPSHOT ozone-integration-test - 1.5.0-SNAPSHOT + 2.0.0-SNAPSHOT Apache Ozone Integration Tests Apache Ozone Integration Tests jar @@ -141,6 +141,10 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> org.slf4j * + + com.sun.jersey + jersey-servlet + @@ -161,6 +165,10 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> org.slf4j * + + com.sun.jersey + jersey-servlet + @@ -216,6 +224,10 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> org.slf4j * + + com.sun.jersey + jersey-servlet + @@ -251,6 +263,11 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> org.slf4j jul-to-slf4j + + org.assertj + assertj-core + ${assertj.version} + diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/AbstractOzoneFileSystemTest.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/AbstractOzoneFileSystemTest.java index 69242d2b1f0..97306475188 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/AbstractOzoneFileSystemTest.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/AbstractOzoneFileSystemTest.java @@ -117,6 +117,7 @@ import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_FS_ITERATE_BATCH_SIZE; import static org.apache.hadoop.ozone.OzoneConsts.OM_KEY_PREFIX; import static org.apache.hadoop.ozone.OzoneConsts.OZONE_URI_DELIMITER; +import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_SERVER_LIST_MAX_SIZE; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.KEY_NOT_FOUND; import static org.apache.hadoop.ozone.om.helpers.BucketLayout.FILE_SYSTEM_OPTIMIZED; import static org.assertj.core.api.Assertions.assertThat; @@ -184,7 +185,7 @@ void init() throws Exception { conf.setFloat(OMConfigKeys.OZONE_FS_TRASH_INTERVAL_KEY, TRASH_INTERVAL); conf.setFloat(FS_TRASH_INTERVAL_KEY, TRASH_INTERVAL); conf.setFloat(FS_TRASH_CHECKPOINT_INTERVAL_KEY, TRASH_INTERVAL / 2); - + conf.setInt(OZONE_OM_SERVER_LIST_MAX_SIZE, 2); conf.setBoolean(OMConfigKeys.OZONE_OM_RATIS_ENABLE_KEY, omRatisEnabled); conf.setBoolean(OZONE_ACL_ENABLED, true); conf.setBoolean(OzoneConfigKeys.OZONE_HBASE_ENHANCEMENTS_ALLOWED, true); @@ -409,7 +410,7 @@ public void testCreateWithInvalidPaths() throws Exception { } private void checkInvalidPath(Path path) { - InvalidPathException pathException = assertThrows( + InvalidPathException pathException = GenericTestUtils.assertThrows( InvalidPathException.class, () -> fs.create(path, false) ); assertThat(pathException.getMessage()).contains("Invalid path Name"); @@ -1831,12 +1832,14 @@ public void testLoopInLinkBuckets() throws Exception { String rootPath = String.format("%s://%s.%s/", OzoneConsts.OZONE_URI_SCHEME, linkBucket1Name, linksVolume); - try { - FileSystem.get(URI.create(rootPath), cluster.getConf()); - fail("Should throw Exception due to loop in Link Buckets"); + try (FileSystem fileSystem = FileSystem.get(URI.create(rootPath), + cluster.getConf())) { + fail("Should throw Exception due to loop in Link Buckets" + + " while initialising fs with URI " + fileSystem.getUri()); } catch (OMException oe) { // Expected exception - assertEquals(OMException.ResultCodes.DETECTED_LOOP_IN_BUCKET_LINKS, oe.getResult()); + assertEquals(OMException.ResultCodes.DETECTED_LOOP_IN_BUCKET_LINKS, + oe.getResult()); } finally { volume.deleteBucket(linkBucket1Name); volume.deleteBucket(linkBucket2Name); @@ -1854,13 +1857,17 @@ public void testLoopInLinkBuckets() throws Exception { String rootPath2 = String.format("%s://%s.%s/", OzoneConsts.OZONE_URI_SCHEME, danglingLinkBucketName, linksVolume); + FileSystem fileSystem = null; try { - FileSystem.get(URI.create(rootPath2), cluster.getConf()); + fileSystem = FileSystem.get(URI.create(rootPath2), cluster.getConf()); } catch (OMException oe) { // Expected exception fail("Should not throw Exception and show orphan buckets"); } finally { volume.deleteBucket(danglingLinkBucketName); + if (fileSystem != null) { + fileSystem.close(); + } } } @@ -2087,8 +2094,8 @@ void testListStatus2() throws IOException { final long initialListStatusCount = omMetrics.getNumListStatus(); FileStatus[] statusList = fs.listStatus(createPath("/")); assertEquals(1, statusList.length); - assertChange(initialStats, statistics, Statistic.OBJECTS_LIST.getSymbol(), 1); - assertEquals(initialListStatusCount + 1, omMetrics.getNumListStatus()); + assertChange(initialStats, statistics, Statistic.OBJECTS_LIST.getSymbol(), 2); + assertEquals(initialListStatusCount + 2, omMetrics.getNumListStatus()); assertEquals(fs.getFileStatus(path), statusList[0]); dirPath = RandomStringUtils.randomAlphanumeric(5); @@ -2099,13 +2106,48 @@ void testListStatus2() throws IOException { statusList = fs.listStatus(createPath("/")); assertEquals(2, statusList.length); - assertChange(initialStats, statistics, Statistic.OBJECTS_LIST.getSymbol(), 2); - assertEquals(initialListStatusCount + 2, omMetrics.getNumListStatus()); + assertChange(initialStats, statistics, Statistic.OBJECTS_LIST.getSymbol(), 4); + assertEquals(initialListStatusCount + 4, omMetrics.getNumListStatus()); for (Path p : paths) { assertThat(Arrays.asList(statusList)).contains(fs.getFileStatus(p)); } } + @Test + public void testOzoneManagerListLocatedStatusAndListStatus() throws IOException { + String data = RandomStringUtils.randomAlphanumeric(20); + String directory = RandomStringUtils.randomAlphanumeric(5); + String filePath = RandomStringUtils.randomAlphanumeric(5); + Path path = createPath("/" + directory + "/" + filePath); + try (FSDataOutputStream stream = fs.create(path)) { + stream.writeBytes(data); + } + RemoteIterator listLocatedStatus = fs.listLocatedStatus(path); + int count = 0; + while (listLocatedStatus.hasNext()) { + LocatedFileStatus locatedFileStatus = listLocatedStatus.next(); + assertTrue(locatedFileStatus.getBlockLocations().length >= 1); + + for (BlockLocation blockLocation : locatedFileStatus.getBlockLocations()) { + assertTrue(blockLocation.getNames().length >= 1); + assertTrue(blockLocation.getHosts().length >= 1); + } + count++; + } + assertEquals(1, count); + count = 0; + RemoteIterator listStatus = fs.listStatusIterator(path); + while (listStatus.hasNext()) { + FileStatus fileStatus = listStatus.next(); + assertFalse(fileStatus instanceof LocatedFileStatus); + count++; + } + assertEquals(1, count); + FileStatus[] fileStatuses = fs.listStatus(path.getParent()); + assertEquals(1, fileStatuses.length); + assertFalse(fileStatuses[0] instanceof LocatedFileStatus); + } + @Test void testOzoneManagerFileSystemInterface() throws IOException { String dirPath = RandomStringUtils.randomAlphanumeric(5); @@ -2230,7 +2272,8 @@ void testFileSystemWithObjectStoreLayout() throws IOException { OzoneConfiguration config = new OzoneConfiguration(fs.getConf()); config.set(FS_DEFAULT_NAME_KEY, obsRootPath); - IllegalArgumentException e = assertThrows(IllegalArgumentException.class, () -> FileSystem.get(config)); + IllegalArgumentException e = GenericTestUtils.assertThrows(IllegalArgumentException.class, + () -> FileSystem.get(config)); assertThat(e.getMessage()).contains("OBJECT_STORE, which does not support file system semantics"); } } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/AbstractRootedOzoneFileSystemTest.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/AbstractRootedOzoneFileSystemTest.java index cfc9029019a..2251b105817 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/AbstractRootedOzoneFileSystemTest.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/AbstractRootedOzoneFileSystemTest.java @@ -1071,7 +1071,7 @@ private void listStatusRecursiveHelper(Path curPath, List result) private List callAdapterListStatus(String pathStr, boolean recursive, String startPath, long numEntries) throws IOException { return adapter.listStatus(pathStr, recursive, startPath, numEntries, - ofs.getUri(), ofs.getWorkingDirectory(), ofs.getUsername()) + ofs.getUri(), ofs.getWorkingDirectory(), ofs.getUsername(), false) .stream().map(ofs::convertFileStatus).collect(Collectors.toList()); } @@ -1207,7 +1207,7 @@ void testSharedTmpDir() throws IOException { ClientProtocol proxy = objectStore.getClientProxy(); // Get default acl rights for user OzoneAclConfig aclConfig = conf.getObject(OzoneAclConfig.class); - ACLType userRights = aclConfig.getUserDefaultRights(); + ACLType[] userRights = aclConfig.getUserDefaultRights(); // Construct ACL for world access // ACL admin owner, world read+write EnumSet aclRights = EnumSet.of(READ, WRITE); @@ -1310,7 +1310,7 @@ void testTempMount() throws IOException { ClientProtocol proxy = objectStore.getClientProxy(); // Get default acl rights for user OzoneAclConfig aclConfig = conf.getObject(OzoneAclConfig.class); - ACLType userRights = aclConfig.getUserDefaultRights(); + ACLType[] userRights = aclConfig.getUserDefaultRights(); // Construct ACL for world access OzoneAcl aclWorldAccess = new OzoneAcl(ACLIdentityType.WORLD, "", ACCESS, userRights); @@ -2311,7 +2311,7 @@ void testNonPrivilegedUserMkdirCreateBucket() throws IOException { // Get default acl rights for user OzoneAclConfig aclConfig = conf.getObject(OzoneAclConfig.class); - ACLType userRights = aclConfig.getUserDefaultRights(); + ACLType[] userRights = aclConfig.getUserDefaultRights(); // Construct ACL for world access OzoneAcl aclWorldAccess = new OzoneAcl(ACLIdentityType.WORLD, "", ACCESS, userRights); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/OzoneFileSystemTests.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/OzoneFileSystemTests.java index 47c584e048a..67baea88357 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/OzoneFileSystemTests.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/OzoneFileSystemTests.java @@ -65,15 +65,17 @@ public static void listStatusIteratorOnPageSize(OzoneConfiguration conf, URI uri = FileSystem.getDefaultUri(config); config.setBoolean( String.format("fs.%s.impl.disable.cache", uri.getScheme()), true); - FileSystem subject = FileSystem.get(uri, config); - Path dir = new Path(Objects.requireNonNull(rootPath), "listStatusIterator"); - try { - Set paths = new TreeSet<>(); - for (int dirCount : dirCounts) { - listStatusIterator(subject, dir, paths, dirCount); + try (FileSystem subject = FileSystem.get(uri, config)) { + Path dir = new Path(Objects.requireNonNull(rootPath), + "listStatusIterator"); + try { + Set paths = new TreeSet<>(); + for (int dirCount : dirCounts) { + listStatusIterator(subject, dir, paths, dirCount); + } + } finally { + subject.delete(dir, true); } - } finally { - subject.delete(dir, true); } } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestDirectoryDeletingServiceWithFSO.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestDirectoryDeletingServiceWithFSO.java index 8d161dedeb3..78fb4c66fc1 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestDirectoryDeletingServiceWithFSO.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestDirectoryDeletingServiceWithFSO.java @@ -529,7 +529,7 @@ public void testAOSKeyDeletingWithSnapshotCreateParallelExecution() when(ozoneManager.getOmSnapshotManager()).thenAnswer(i -> omSnapshotManager); DirectoryDeletingService service = Mockito.spy(new DirectoryDeletingService(1000, TimeUnit.MILLISECONDS, 1000, ozoneManager, - cluster.getConf())); + cluster.getConf(), 1)); service.shutdown(); final int initialSnapshotCount = (int) cluster.getOzoneManager().getMetadataManager().countRowsInTable(snapshotInfoTable); @@ -563,7 +563,7 @@ public void testAOSKeyDeletingWithSnapshotCreateParallelExecution() } return i.callRealMethod(); }).when(service).optimizeDirDeletesAndSubmitRequest(anyLong(), anyLong(), anyLong(), - anyLong(), anyList(), anyList(), eq(null), anyLong(), anyInt(), Mockito.any(), any()); + anyLong(), anyList(), anyList(), eq(null), anyLong(), anyInt(), Mockito.any(), any(), anyLong()); Mockito.doAnswer(i -> { store.createSnapshot(testVolumeName, testBucketName, snap2); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestHSync.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestHSync.java index c39e24571a8..f185addf6b8 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestHSync.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestHSync.java @@ -84,6 +84,7 @@ import org.apache.hadoop.ozone.client.io.KeyOutputStream; import org.apache.hadoop.ozone.client.io.OzoneInputStream; import org.apache.hadoop.ozone.client.io.OzoneOutputStream; +import org.apache.hadoop.ozone.common.ChecksumCache; import org.apache.hadoop.ozone.container.TestHelper; import org.apache.hadoop.ozone.container.keyvalue.KeyValueHandler; import org.apache.hadoop.ozone.container.keyvalue.impl.AbstractTestChunkManager; @@ -172,6 +173,7 @@ public class TestHSync { private static final int BLOCK_SIZE = 2 * MAX_FLUSH_SIZE; private static final int SERVICE_INTERVAL = 100; private static final int EXPIRE_THRESHOLD_MS = 140; + private static final int WAL_HEADER_LEN = 83; private static OpenKeyCleanupService openKeyCleanupService; @@ -225,8 +227,8 @@ public static void init() throws Exception { GenericTestUtils.setLogLevel(BlockOutputStream.LOG, Level.DEBUG); GenericTestUtils.setLogLevel(BlockInputStream.LOG, Level.DEBUG); GenericTestUtils.setLogLevel(KeyValueHandler.LOG, Level.DEBUG); - GenericTestUtils.setLogLevel(BufferPool.LOG, Level.DEBUG); + GenericTestUtils.setLogLevel(ChecksumCache.LOG, Level.DEBUG); openKeyCleanupService = (OpenKeyCleanupService) cluster.getOzoneManager().getKeyManager().getOpenKeyCleanupService(); @@ -417,6 +419,45 @@ private static String getChunkPathOnDataNode(FSDataOutputStream outputStream) return chunkPath; } + @Test + public void testHSyncSeek() throws Exception { + // Set the fs.defaultFS + final String rootPath = String.format("%s://%s.%s/", + OZONE_URI_SCHEME, bucket.getName(), bucket.getVolumeName()); + CONF.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY, rootPath); + + final String dir = OZONE_ROOT + bucket.getVolumeName() + + OZONE_URI_DELIMITER + bucket.getName(); + final Path key1 = new Path(dir, "key-hsync-seek"); + + final byte[] data = new byte[1024]; + final byte[] buffer = new byte[1024]; + ThreadLocalRandom.current().nextBytes(data); + + try (FileSystem fs = FileSystem.get(CONF)) { + // Create key1 + try (FSDataOutputStream os = fs.create(key1, true)) { + os.write(data, 0, WAL_HEADER_LEN); + // the first hsync will update the correct length in the key info at OM + os.hsync(); + os.write(data, 0, data.length); + os.hsync(); // the second hsync will not update the length at OM + try (FSDataInputStream in = fs.open(key1)) { + // the actual key length is WAL_HEADER_LEN + 1024, but the length in OM is WAL_HEADER_LEN (83) + in.seek(WAL_HEADER_LEN + 1); + final int n = in.read(buffer, 1, buffer.length - 1); + // expect to read 1023 bytes + assertEquals(buffer.length - 1, n); + for (int i = 1; i < buffer.length; i++) { + assertEquals(data[i], buffer[i], "expected at i=" + i); + } + } + } finally { + fs.delete(key1, false); + } + } + } + @ParameterizedTest @ValueSource(booleans = {false, true}) public void testO3fsHSync(boolean incrementalChunkList) throws Exception { @@ -455,6 +496,52 @@ public void testOfsHSync(boolean incrementalChunkList) throws Exception { } } + @Test + public void testHSyncOpenKeyCommitAfterExpiry() throws Exception { + // Set the fs.defaultFS + final String rootPath = String.format("%s://%s/", + OZONE_OFS_URI_SCHEME, CONF.get(OZONE_OM_ADDRESS_KEY)); + CONF.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY, rootPath); + + final Path key1 = new Path("hsync-key"); + final Path key2 = new Path("key2"); + + try (FileSystem fs = FileSystem.get(CONF)) { + // Create key1 with hsync + try (FSDataOutputStream os = fs.create(key1, true)) { + os.write(1); + os.hsync(); + // Create key2 without hsync + try (FSDataOutputStream os1 = fs.create(key2, true)) { + os1.write(1); + // There should be 2 key in openFileTable + assertThat(2 == getOpenKeyInfo(BUCKET_LAYOUT).size()); + // One key will be in fileTable as hsynced + assertThat(1 == getKeyInfo(BUCKET_LAYOUT).size()); + + // Resume openKeyCleanupService + openKeyCleanupService.resume(); + // Verify hsync openKey gets committed eventually + // Key without hsync is deleted + GenericTestUtils.waitFor(() -> + 0 == getOpenKeyInfo(BUCKET_LAYOUT).size(), 1000, 12000); + // Verify only one key is still present in fileTable + assertThat(1 == getKeyInfo(BUCKET_LAYOUT).size()); + + // Clean up + assertTrue(fs.delete(key1, false)); + waitForEmptyDeletedTable(); + } catch (OMException ex) { + assertEquals(OMException.ResultCodes.KEY_NOT_FOUND, ex.getResult()); + } + } catch (OMException ex) { + assertEquals(OMException.ResultCodes.KEY_NOT_FOUND, ex.getResult()); + } finally { + openKeyCleanupService.suspend(); + } + } + } + @Test public void testHSyncDeletedKey() throws Exception { // Verify that a key can't be successfully hsync'ed again after it's deleted, @@ -554,6 +641,21 @@ private List getOpenKeyInfo(BucketLayout bucketLayout) { return omKeyInfo; } + private List getKeyInfo(BucketLayout bucketLayout) { + List omKeyInfo = new ArrayList<>(); + + Table openFileTable = + cluster.getOzoneManager().getMetadataManager().getKeyTable(bucketLayout); + try (TableIterator> + iterator = openFileTable.iterator()) { + while (iterator.hasNext()) { + omKeyInfo.add(iterator.next().getValue()); + } + } catch (Exception e) { + } + return omKeyInfo; + } + @Test public void testUncommittedBlocks() throws Exception { waitForEmptyDeletedTable(); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFSBucketLayout.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFSBucketLayout.java index f1cedf59c3a..8e8cc63a7d9 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFSBucketLayout.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFSBucketLayout.java @@ -122,8 +122,16 @@ void teardown() throws IOException { void fileSystemWithUnsupportedDefaultBucketLayout(String layout) { OzoneConfiguration conf = configWithDefaultBucketLayout(layout); - OMException e = assertThrows(OMException.class, - () -> FileSystem.newInstance(conf)); + OMException e = assertThrows(OMException.class, () -> { + FileSystem fileSystem = null; + try { + fileSystem = FileSystem.newInstance(conf); + } finally { + if (fileSystem != null) { + fileSystem.close(); + } + } + }); assertThat(e.getMessage()) .contains(ERROR_MAP.get(layout)); } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileChecksum.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileChecksum.java index 649ed50a102..7b5a9580805 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileChecksum.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileChecksum.java @@ -18,6 +18,7 @@ package org.apache.hadoop.fs.ozone; import com.google.common.collect.ImmutableList; +import org.apache.hadoop.conf.StorageUnit; import org.apache.hadoop.fs.CommonConfigurationKeysPublic; import org.apache.hadoop.fs.FileChecksum; import org.apache.hadoop.fs.FileSystem; @@ -39,6 +40,7 @@ import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Timeout; import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.Arguments; import org.junit.jupiter.params.provider.MethodSource; import java.io.IOException; @@ -53,10 +55,13 @@ import static java.nio.charset.StandardCharsets.UTF_8; import static org.apache.commons.lang3.RandomStringUtils.randomAlphabetic; +import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_CHUNK_SIZE_KEY; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_NETWORK_TOPOLOGY_AWARE_READ_KEY; +import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_SCM_BLOCK_SIZE; import static org.apache.hadoop.ozone.TestDataUtil.createBucket; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_ADDRESS_KEY; import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.params.provider.Arguments.arguments; /** * Test FileChecksum API. @@ -68,10 +73,16 @@ public class TestOzoneFileChecksum { true, false }; - private static final int[] DATA_SIZES = DoubleStream.of(0.5, 1, 1.5, 2, 7, 8) - .mapToInt(mb -> (int) (1024 * 1024 * mb)) + private static final int[] DATA_SIZES_1 = DoubleStream.of(0.5, 1, 1.5, 2, 2.5, 3, 3.5, 4, 5, 6, 7, 8, 9, 10) + .mapToInt(mb -> (int) (1024 * 1024 * mb) + 510000) .toArray(); + private static final int[] DATA_SIZES_2 = DoubleStream.of(0.5, 1, 1.5, 2, 2.5, 3, 3.5, 4, 5, 6, 7, 8, 9, 10) + .mapToInt(mb -> (int) (1024 * 1024 * mb) + 820000) + .toArray(); + + private int[] dataSizes = new int[DATA_SIZES_1.length + DATA_SIZES_2.length]; + private OzoneConfiguration conf; private MiniOzoneCluster cluster = null; private FileSystem fs; @@ -84,6 +95,8 @@ public class TestOzoneFileChecksum { void setup() throws IOException, InterruptedException, TimeoutException { conf = new OzoneConfiguration(); + conf.setStorageSize(OZONE_SCM_CHUNK_SIZE_KEY, 1024 * 1024, StorageUnit.BYTES); + conf.setStorageSize(OZONE_SCM_BLOCK_SIZE, 2 * 1024 * 1024, StorageUnit.BYTES); cluster = MiniOzoneCluster.newBuilder(conf) .setNumDatanodes(5) .build(); @@ -95,9 +108,8 @@ void setup() throws IOException, OzoneConsts.OZONE_OFS_URI_SCHEME); conf.setBoolean(disableCache, true); conf.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY, rootPath); - fs = FileSystem.get(conf); - ofs = (RootedOzoneFileSystem) fs; - adapter = (BasicRootedOzoneClientAdapterImpl) ofs.getAdapter(); + System.arraycopy(DATA_SIZES_1, 0, dataSizes, 0, DATA_SIZES_1.length); + System.arraycopy(DATA_SIZES_2, 0, dataSizes, DATA_SIZES_1.length, DATA_SIZES_2.length); } @AfterEach @@ -112,9 +124,13 @@ void teardown() { * Test EC checksum with Replicated checksum. */ @ParameterizedTest - @MethodSource("missingIndexes") - void testEcFileChecksum(List missingIndexes) throws IOException { + @MethodSource("missingIndexesAndChecksumSize") + void testEcFileChecksum(List missingIndexes, double checksumSizeInMB) throws IOException { + conf.setInt("ozone.client.bytes.per.checksum", (int) (checksumSizeInMB * 1024 * 1024)); + fs = FileSystem.get(conf); + ofs = (RootedOzoneFileSystem) fs; + adapter = (BasicRootedOzoneClientAdapterImpl) ofs.getAdapter(); String volumeName = UUID.randomUUID().toString(); String legacyBucket = UUID.randomUUID().toString(); String ecBucketName = UUID.randomUUID().toString(); @@ -139,7 +155,7 @@ void testEcFileChecksum(List missingIndexes) throws IOException { Map replicatedChecksums = new HashMap<>(); - for (int dataLen : DATA_SIZES) { + for (int dataLen : dataSizes) { byte[] data = randomAlphabetic(dataLen).getBytes(UTF_8); try (OutputStream file = adapter.createFile(volumeName + "/" @@ -170,7 +186,7 @@ void testEcFileChecksum(List missingIndexes) throws IOException { clientConf.setBoolean(OZONE_NETWORK_TOPOLOGY_AWARE_READ_KEY, topologyAware); try (FileSystem fsForRead = FileSystem.get(clientConf)) { - for (int dataLen : DATA_SIZES) { + for (int dataLen : dataSizes) { // Compute checksum after failed DNs Path parent = new Path("/" + volumeName + "/" + ecBucketName + "/"); Path ecKey = new Path(parent, "test" + dataLen); @@ -187,14 +203,13 @@ void testEcFileChecksum(List missingIndexes) throws IOException { } } - static Stream> missingIndexes() { + static Stream missingIndexesAndChecksumSize() { return Stream.of( - ImmutableList.of(0, 1), - ImmutableList.of(1, 2), - ImmutableList.of(2, 3), - ImmutableList.of(3, 4), - ImmutableList.of(0, 3), - ImmutableList.of(0, 4) - ); + arguments(ImmutableList.of(0, 1), 0.001), + arguments(ImmutableList.of(1, 2), 0.01), + arguments(ImmutableList.of(2, 3), 0.1), + arguments(ImmutableList.of(3, 4), 0.5), + arguments(ImmutableList.of(0, 3), 1), + arguments(ImmutableList.of(0, 4), 2)); } } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFsSnapshot.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFsSnapshot.java index 8e0bd1ac7de..ccfa0625800 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFsSnapshot.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFsSnapshot.java @@ -24,6 +24,8 @@ import java.nio.file.Files; import java.nio.file.Path; import java.nio.file.Paths; +import java.util.ArrayList; +import java.util.List; import java.util.concurrent.TimeUnit; import java.util.stream.Stream; import java.util.concurrent.atomic.AtomicInteger; @@ -49,10 +51,13 @@ import static java.nio.charset.StandardCharsets.UTF_8; import static org.apache.hadoop.fs.FileSystem.FS_DEFAULT_NAME_KEY; +import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_FS_LISTING_PAGE_SIZE; +import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_FS_LISTING_PAGE_SIZE_DEFAULT; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_SNAPSHOT_DELETING_SERVICE_INTERVAL; import static org.apache.hadoop.ozone.OzoneConsts.OM_KEY_PREFIX; import static org.apache.hadoop.ozone.OzoneConsts.OZONE_OFS_URI_SCHEME; import static org.apache.hadoop.ozone.OzoneConsts.OM_SNAPSHOT_INDICATOR; +import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_SERVER_LIST_MAX_SIZE; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_SNAPSHOT_SST_FILTERING_SERVICE_INTERVAL; import static org.apache.hadoop.ozone.om.OmSnapshotManager.getSnapshotPath; import static org.assertj.core.api.Assertions.assertThat; @@ -91,6 +96,8 @@ static void initClass() throws Exception { conf.setBoolean(OMConfigKeys.OZONE_FILESYSTEM_SNAPSHOT_ENABLED_KEY, true); conf.setTimeDuration(OZONE_SNAPSHOT_DELETING_SERVICE_INTERVAL, 1, TimeUnit.SECONDS); conf.setInt(OZONE_SNAPSHOT_SST_FILTERING_SERVICE_INTERVAL, KeyManagerImpl.DISABLE_VALUE); + conf.setInt(OZONE_OM_SERVER_LIST_MAX_SIZE, 20); + conf.setInt(OZONE_FS_LISTING_PAGE_SIZE, 30); // Start the cluster cluster = MiniOzoneCluster.newHABuilder(conf) @@ -289,6 +296,13 @@ void testFsLsSnapshot(@TempDir Path tempDir) throws Exception { String snapshotPath2 = BUCKET_WITH_SNAPSHOT_INDICATOR_PATH + OM_KEY_PREFIX + snapshotName2; String snapshotKeyPath2 = snapshotPath2 + OM_KEY_PREFIX + key2; + List snapshotNames = new ArrayList<>(); + for (int i = 0; i < cluster.getConf().getInt(OZONE_FS_LISTING_PAGE_SIZE, + OZONE_FS_LISTING_PAGE_SIZE_DEFAULT) * 2; i++) { + snapshotNames.add(createSnapshot()); + } + String snapshotName3 = createSnapshot(); + int res = ToolRunner.run(shell, new String[]{"-deleteSnapshot", BUCKET_PATH, snapshotName1}); @@ -313,6 +327,10 @@ void testFsLsSnapshot(@TempDir Path tempDir) throws Exception { assertThat(listSnapOut).doesNotContain(snapshotName1); assertThat(listSnapOut).contains(snapshotName2); + assertThat(listSnapOut).contains(snapshotName3); + for (String snapshotName : snapshotNames) { + assertThat(listSnapOut).contains(snapshotName); + } // Check for snapshot keys with "ozone fs -ls" String listSnapKeyOut = execShellCommandAndGetOutput(1, diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestCommitInRatis.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestCommitInRatis.java similarity index 96% rename from hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestCommitInRatis.java rename to hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestCommitInRatis.java index 4ff671df616..7a1366ad682 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestCommitInRatis.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestCommitInRatis.java @@ -15,17 +15,13 @@ * the License. */ -package org.apache.hadoop.ozone.client.rpc; +package org.apache.hadoop.hdds.scm; import org.apache.hadoop.hdds.conf.DatanodeRatisServerConfig; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.conf.StorageUnit; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.ratis.conf.RatisClientConfig; -import org.apache.hadoop.hdds.scm.XceiverClientManager; -import org.apache.hadoop.hdds.scm.XceiverClientRatis; -import org.apache.hadoop.hdds.scm.XceiverClientSpi; -import org.apache.hadoop.hdds.scm.XceiverClientReply; import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline; import org.apache.hadoop.hdds.scm.pipeline.Pipeline; import org.apache.hadoop.hdds.scm.protocolPB. diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestSCMInstallSnapshot.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestSCMInstallSnapshot.java index 400c4868a99..e90c576e8dd 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestSCMInstallSnapshot.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestSCMInstallSnapshot.java @@ -128,15 +128,14 @@ private DBCheckpoint downloadSnapshot() throws Exception { public void testInstallCheckPoint() throws Exception { DBCheckpoint checkpoint = downloadSnapshot(); StorageContainerManager scm = cluster.getStorageContainerManager(); - DBStore db = HAUtils - .loadDB(conf, checkpoint.getCheckpointLocation().getParent().toFile(), - checkpoint.getCheckpointLocation().getFileName().toString(), - new SCMDBDefinition()); + final Path location = checkpoint.getCheckpointLocation(); + final DBStore db = HAUtils.loadDB(conf, location.getParent().toFile(), + location.getFileName().toString(), SCMDBDefinition.get()); // Hack the transaction index in the checkpoint so as to ensure the // checkpointed transaction index is higher than when it was downloaded // from. assertNotNull(db); - HAUtils.getTransactionInfoTable(db, new SCMDBDefinition()) + HAUtils.getTransactionInfoTable(db, SCMDBDefinition.get()) .put(OzoneConsts.TRANSACTION_INFO_KEY, TransactionInfo.valueOf(10, 100)); db.close(); ContainerID cid = diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestSCMInstallSnapshotWithHA.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestSCMInstallSnapshotWithHA.java index 10492736144..e55355525a6 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestSCMInstallSnapshotWithHA.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestSCMInstallSnapshotWithHA.java @@ -224,9 +224,8 @@ public void testInstallCorruptedCheckpointFailure() throws Exception { DBCheckpoint leaderDbCheckpoint = leaderSCM.getScmMetadataStore().getStore() .getCheckpoint(false); Path leaderCheckpointLocation = leaderDbCheckpoint.getCheckpointLocation(); - TransactionInfo leaderCheckpointTrxnInfo = HAUtils - .getTrxnInfoFromCheckpoint(conf, leaderCheckpointLocation, - new SCMDBDefinition()); + final TransactionInfo leaderCheckpointTrxnInfo = HAUtils.getTrxnInfoFromCheckpoint( + conf, leaderCheckpointLocation, SCMDBDefinition.get()); assertNotNull(leaderCheckpointLocation); // Take a backup of the current DB diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestSecretKeysApi.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestSecretKeysApi.java index fb92d91ee71..45458182344 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestSecretKeysApi.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestSecretKeysApi.java @@ -23,7 +23,6 @@ import org.apache.hadoop.hdds.protocol.SecretKeyProtocol; import org.apache.hadoop.hdds.scm.server.SCMHTTPServerConfig; import org.apache.hadoop.hdds.scm.server.StorageContainerManager; -import org.apache.hadoop.hdds.security.exception.SCMSecretKeyException; import org.apache.hadoop.hdds.security.symmetric.ManagedSecretKey; import org.apache.hadoop.ipc.RemoteException; import org.apache.hadoop.minikdc.MiniKdc; @@ -67,7 +66,6 @@ import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_CLIENT_ADDRESS_KEY; import static org.apache.hadoop.hdds.scm.server.SCMHTTPServerConfig.ConfigStrings.HDDS_SCM_HTTP_KERBEROS_KEYTAB_FILE_KEY; import static org.apache.hadoop.hdds.scm.server.SCMHTTPServerConfig.ConfigStrings.HDDS_SCM_HTTP_KERBEROS_PRINCIPAL_KEY; -import static org.apache.hadoop.hdds.security.exception.SCMSecretKeyException.ErrorCode.SECRET_KEY_NOT_ENABLED; import static org.apache.hadoop.hdds.utils.HddsServerUtil.getSecretKeyClientForDatanode; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ADMINISTRATORS; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_SECURITY_ENABLED_KEY; @@ -245,24 +243,14 @@ public void testSecretKeyApiSuccess() throws Exception { } /** - * Verify API behavior when block token is not enable. + * Verify API behavior. */ @Test - public void testSecretKeyApiNotEnabled() throws Exception { + public void testSecretKeyApi() throws Exception { startCluster(1); SecretKeyProtocol secretKeyProtocol = getSecretKeyProtocol(); - - SCMSecretKeyException ex = assertThrows(SCMSecretKeyException.class, - secretKeyProtocol::getCurrentSecretKey); - assertEquals(SECRET_KEY_NOT_ENABLED, ex.getErrorCode()); - - ex = assertThrows(SCMSecretKeyException.class, - () -> secretKeyProtocol.getSecretKey(UUID.randomUUID())); - assertEquals(SECRET_KEY_NOT_ENABLED, ex.getErrorCode()); - - ex = assertThrows(SCMSecretKeyException.class, - secretKeyProtocol::getAllSecretKeys); - assertEquals(SECRET_KEY_NOT_ENABLED, ex.getErrorCode()); + assertNull(secretKeyProtocol.getSecretKey(UUID.randomUUID())); + assertEquals(1, secretKeyProtocol.getAllSecretKeys().size()); } /** diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestStorageContainerManagerHA.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestStorageContainerManagerHA.java index 2986484d2ad..2f9c8c938a3 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestStorageContainerManagerHA.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestStorageContainerManagerHA.java @@ -24,6 +24,7 @@ import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.scm.container.ContainerID; import org.apache.hadoop.hdds.scm.ha.SCMHAMetrics; +import org.apache.hadoop.hdds.scm.ha.SCMHAUtils; import org.apache.hadoop.hdds.scm.ha.SCMRatisServerImpl; import org.apache.hadoop.hdds.scm.server.SCMStorageConfig; import org.apache.hadoop.hdds.scm.server.StorageContainerManager; @@ -43,9 +44,10 @@ import org.apache.ozone.test.GenericTestUtils; import org.apache.ratis.statemachine.SnapshotInfo; import org.junit.jupiter.api.AfterEach; -import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.Timeout; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import java.io.IOException; import java.time.Instant; @@ -54,7 +56,9 @@ import java.util.List; import java.util.Set; import java.util.UUID; +import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeoutException; +import java.util.concurrent.atomic.AtomicInteger; import static java.nio.charset.StandardCharsets.UTF_8; import static org.apache.hadoop.hdds.client.ReplicationFactor.ONE; @@ -72,6 +76,8 @@ @Timeout(300) public class TestStorageContainerManagerHA { + private static final Logger LOG = LoggerFactory.getLogger(TestStorageContainerManagerHA.class); + private MiniOzoneHAClusterImpl cluster = null; private OzoneConfiguration conf; private String omServiceId; @@ -86,7 +92,6 @@ public class TestStorageContainerManagerHA { * * @throws IOException */ - @BeforeEach public void init() throws Exception { conf = new OzoneConfiguration(); conf.set(ScmConfigKeys.OZONE_SCM_PIPELINE_CREATION_INTERVAL, "10s"); @@ -118,6 +123,7 @@ public void shutdown() { @Test void testAllSCMAreRunning() throws Exception { + init(); int count = 0; List scms = cluster.getStorageContainerManagers(); assertEquals(numOfSCMs, scms.size()); @@ -129,6 +135,9 @@ void testAllSCMAreRunning() throws Exception { count++; leaderScm = scm; } + if (SCMHAUtils.isSCMHAEnabled(conf)) { + assertNotNull(scm.getScmHAManager().getRatisServer().getLeaderId()); + } assertEquals(peerSize, numOfSCMs); } assertEquals(1, count); @@ -246,6 +255,7 @@ private boolean areAllScmInSync(long leaderIndex) { @Test public void testPrimordialSCM() throws Exception { + init(); StorageContainerManager scm1 = cluster.getStorageContainerManagers().get(0); StorageContainerManager scm2 = cluster.getStorageContainerManagers().get(1); OzoneConfiguration conf1 = scm1.getConfiguration(); @@ -264,6 +274,7 @@ public void testPrimordialSCM() throws Exception { @Test public void testBootStrapSCM() throws Exception { + init(); StorageContainerManager scm2 = cluster.getStorageContainerManagers().get(1); OzoneConfiguration conf2 = scm2.getConfiguration(); boolean isDeleted = scm2.getScmStorageConfig().getVersionFile().delete(); @@ -323,4 +334,72 @@ private void waitForLeaderToBeReady() }, 1000, (int) ScmConfigKeys .OZONE_SCM_HA_RATIS_LEADER_READY_WAIT_TIMEOUT_DEFAULT); } + + @Test + public void testSCMLeadershipMetric() throws IOException, InterruptedException { + // GIVEN + int scmInstancesCount = 3; + conf = new OzoneConfiguration(); + MiniOzoneHAClusterImpl.Builder haMiniClusterBuilder = MiniOzoneCluster.newHABuilder(conf) + .setSCMServiceId("scm-service-id") + .setOMServiceId("om-service-id") + .setNumOfActiveOMs(0) + .setNumOfStorageContainerManagers(scmInstancesCount) + .setNumOfActiveSCMs(1); + haMiniClusterBuilder.setNumDatanodes(0); + + // start single SCM instance without other Ozone services + // in order to initialize and bootstrap SCM instances only + cluster = haMiniClusterBuilder.build(); + + List storageContainerManagersList = cluster.getStorageContainerManagersList(); + + // stop the single SCM instance in order to imitate further simultaneous start of SCMs + storageContainerManagersList.get(0).stop(); + storageContainerManagersList.get(0).join(); + + // WHEN (imitate simultaneous start of the SCMs) + int retryCount = 0; + while (true) { + CountDownLatch scmInstancesCounter = new CountDownLatch(scmInstancesCount); + AtomicInteger failedSCMs = new AtomicInteger(); + for (StorageContainerManager scm : storageContainerManagersList) { + new Thread(() -> { + try { + scm.start(); + } catch (IOException e) { + failedSCMs.incrementAndGet(); + } finally { + scmInstancesCounter.countDown(); + } + }).start(); + } + scmInstancesCounter.await(); + if (failedSCMs.get() == 0) { + break; + } else { + for (StorageContainerManager scm : storageContainerManagersList) { + scm.stop(); + scm.join(); + LOG.info("Stopping StorageContainerManager server at {}", + scm.getClientRpcAddress()); + } + ++retryCount; + LOG.info("SCMs port conflicts, retried {} times", + retryCount); + failedSCMs.set(0); + } + } + + // THEN expect only one SCM node (leader) will have 'scmha_metrics_scmha_leader_state' metric set to 1 + int leaderCount = 0; + for (StorageContainerManager scm : storageContainerManagersList) { + if (scm.getScmHAMetrics() != null && scm.getScmHAMetrics().getSCMHAMetricsInfoLeaderState() == 1) { + leaderCount++; + break; + } + } + assertEquals(1, leaderCount); + } + } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestWatchForCommit.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestWatchForCommit.java similarity index 98% rename from hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestWatchForCommit.java rename to hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestWatchForCommit.java index bec14b23b0f..4c950e7d725 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestWatchForCommit.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestWatchForCommit.java @@ -15,7 +15,7 @@ * the License. */ -package org.apache.hadoop.ozone.client.rpc; +package org.apache.hadoop.hdds.scm; import java.io.IOException; import java.io.OutputStream; @@ -35,12 +35,6 @@ import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.ratis.conf.RatisClientConfig; -import org.apache.hadoop.hdds.scm.OzoneClientConfig; -import org.apache.hadoop.hdds.scm.XceiverClientRatis; -import org.apache.hadoop.hdds.scm.XceiverClientManager; -import org.apache.hadoop.hdds.scm.XceiverClientReply; -import org.apache.hadoop.hdds.scm.XceiverClientSpi; -import org.apache.hadoop.hdds.scm.ScmConfigKeys; import org.apache.hadoop.hdds.scm.client.HddsClientUtils; import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline; import org.apache.hadoop.hdds.scm.pipeline.Pipeline; diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestXceiverClientManager.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestXceiverClientManager.java index 95a0b0e17fd..83a9b106c8f 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestXceiverClientManager.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestXceiverClientManager.java @@ -29,16 +29,16 @@ import org.apache.hadoop.hdds.scm.storage.ContainerProtocolCalls; import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.container.common.SCMTestUtils; -import org.apache.ozone.test.GenericTestUtils; import org.junit.jupiter.api.AfterAll; import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.Timeout; +import org.junit.jupiter.api.io.TempDir; import org.junit.jupiter.params.ParameterizedTest; import org.junit.jupiter.params.provider.ValueSource; import java.io.IOException; -import java.util.UUID; +import java.nio.file.Path; import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_METADATA_DIR_NAME; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_SECURITY_ENABLED_KEY; @@ -82,12 +82,10 @@ public static void shutdown() { @ParameterizedTest(name = "Ozone security enabled: {0}") @ValueSource(booleans = {false, true}) - public void testCaching(boolean securityEnabled) throws IOException { + public void testCaching(boolean securityEnabled, @TempDir Path metaDir) throws IOException { OzoneConfiguration conf = new OzoneConfiguration(); conf.setBoolean(OZONE_SECURITY_ENABLED_KEY, securityEnabled); - String metaDir = GenericTestUtils.getTempPath( - TestXceiverClientManager.class.getName() + UUID.randomUUID()); - conf.set(HDDS_METADATA_DIR_NAME, metaDir); + conf.set(HDDS_METADATA_DIR_NAME, metaDir.toString()); ClientTrustManager trustManager = mock(ClientTrustManager.class); try (XceiverClientManager clientManager = new XceiverClientManager(conf, @@ -124,13 +122,11 @@ public void testCaching(boolean securityEnabled) throws IOException { } @Test - public void testFreeByReference() throws IOException { + public void testFreeByReference(@TempDir Path metaDir) throws IOException { OzoneConfiguration conf = new OzoneConfiguration(); ScmClientConfig clientConfig = conf.getObject(ScmClientConfig.class); clientConfig.setMaxSize(1); - String metaDir = GenericTestUtils.getTempPath( - TestXceiverClientManager.class.getName() + UUID.randomUUID()); - conf.set(HDDS_METADATA_DIR_NAME, metaDir); + conf.set(HDDS_METADATA_DIR_NAME, metaDir.toString()); try (XceiverClientManager clientManager = new XceiverClientManager(conf, clientConfig, null)) { Cache cache = @@ -181,13 +177,11 @@ public void testFreeByReference() throws IOException { } @Test - public void testFreeByEviction() throws IOException { + public void testFreeByEviction(@TempDir Path metaDir) throws IOException { OzoneConfiguration conf = new OzoneConfiguration(); ScmClientConfig clientConfig = conf.getObject(ScmClientConfig.class); clientConfig.setMaxSize(1); - String metaDir = GenericTestUtils.getTempPath( - TestXceiverClientManager.class.getName() + UUID.randomUUID()); - conf.set(HDDS_METADATA_DIR_NAME, metaDir); + conf.set(HDDS_METADATA_DIR_NAME, metaDir.toString()); try (XceiverClientManager clientManager = new XceiverClientManager(conf, clientConfig, null)) { Cache cache = diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestXceiverClientMetrics.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestXceiverClientMetrics.java index c4f62040536..d789112e471 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestXceiverClientMetrics.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestXceiverClientMetrics.java @@ -22,9 +22,9 @@ import static org.apache.ozone.test.MetricsAsserts.getLongCounter; import static org.apache.ozone.test.MetricsAsserts.getMetrics; +import java.nio.file.Path; import java.util.List; import java.util.ArrayList; -import java.util.UUID; import java.util.concurrent.CompletableFuture; import java.util.concurrent.CountDownLatch; @@ -41,10 +41,12 @@ import org.apache.hadoop.hdds.scm.protocolPB.StorageContainerLocationProtocolClientSideTranslatorPB; import org.apache.hadoop.ozone.container.common.SCMTestUtils; import org.apache.ozone.test.GenericTestUtils; +import org.apache.ozone.test.tag.Flaky; import org.junit.jupiter.api.AfterAll; import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.Timeout; +import org.junit.jupiter.api.io.TempDir; /** * This class tests the metrics of XceiverClient. @@ -76,11 +78,10 @@ public static void shutdown() { } @Test - public void testMetrics() throws Exception { + @Flaky("HDDS-11646") + public void testMetrics(@TempDir Path metaDir) throws Exception { OzoneConfiguration conf = new OzoneConfiguration(); - String metaDir = GenericTestUtils.getTempPath( - TestXceiverClientManager.class.getName() + UUID.randomUUID()); - conf.set(HDDS_METADATA_DIR_NAME, metaDir); + conf.set(HDDS_METADATA_DIR_NAME, metaDir.toString()); try (XceiverClientManager clientManager = new XceiverClientManager(conf)) { diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/storage/TestContainerCommandsEC.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/storage/TestContainerCommandsEC.java index 6f79839cd02..1b7eb837cf8 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/storage/TestContainerCommandsEC.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/storage/TestContainerCommandsEC.java @@ -460,7 +460,7 @@ public void testCreateRecoveryContainer() throws Exception { int replicaIndex = 4; XceiverClientSpi dnClient = xceiverClientManager.acquireClient( createSingleNodePipeline(newPipeline, newPipeline.getNodes().get(0), - replicaIndex)); + 2)); try { // To create the actual situation, container would have been in closed // state at SCM. diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneCluster.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneCluster.java index 9c76c0ec0c7..ff55ee83c17 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneCluster.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneCluster.java @@ -35,11 +35,14 @@ import org.apache.hadoop.ozone.client.OzoneClient; import org.apache.hadoop.ozone.om.OzoneManager; import org.apache.hadoop.ozone.recon.ReconServer; +import org.apache.hadoop.ozone.s3.Gateway; import org.apache.hadoop.security.authentication.client.AuthenticationException; import org.apache.ozone.test.GenericTestUtils; import org.apache.ratis.util.ExitUtils; import org.apache.ratis.util.function.CheckedFunction; +import com.amazonaws.services.s3.AmazonS3; + /** * Interface used for MiniOzoneClusters. */ @@ -142,10 +145,17 @@ void waitForPipelineTobeReady(HddsProtos.ReplicationFactor factor, /** * Returns a {@link ReconServer} instance. * - * @return List of {@link ReconServer} + * @return {@link ReconServer} instance if it is initialized, otherwise null. */ ReconServer getReconServer(); + /** + * Returns a {@link Gateway} instance. + * + * @return {@link Gateway} instance if it is initialized, otherwise null. + */ + Gateway getS3G(); + /** * Returns an {@link OzoneClient} to access the {@link MiniOzoneCluster}. * The caller is responsible for closing the client after use. @@ -154,6 +164,11 @@ void waitForPipelineTobeReady(HddsProtos.ReplicationFactor factor, */ OzoneClient newClient() throws IOException; + /** + * Returns an {@link AmazonS3} to access the {@link MiniOzoneCluster}. + */ + AmazonS3 newS3Client(); + /** * Returns StorageContainerLocationClient to communicate with * {@link StorageContainerManager} associated with the MiniOzoneCluster. @@ -219,6 +234,21 @@ void restartHddsDatanode(DatanodeDetails dn, boolean waitForDatanode) */ void stopRecon(); + /** + * Start S3G. + */ + void startS3G(); + + /** + * Restart S3G. + */ + void restartS3G(); + + /** + * Stop S3G. + */ + void stopS3G(); + /** * Shutdown the MiniOzoneCluster and delete the storage dirs. */ @@ -273,6 +303,7 @@ abstract class Builder { protected String omId = UUID.randomUUID().toString(); protected boolean includeRecon = false; + protected boolean includeS3G = false; protected int dnInitialVersion = DatanodeVersion.FUTURE_VERSION.toProtoValue(); protected int dnCurrentVersion = DatanodeVersion.COMBINED_PUTBLOCK_WRITECHUNK_RPC.toProtoValue(); @@ -382,6 +413,11 @@ public Builder includeRecon(boolean include) { return this; } + public Builder includeS3G(boolean include) { + this.includeS3G = include; + return this; + } + /** * Constructs and returns MiniOzoneCluster. * diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java index 50013b57f4c..3594996856a 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java @@ -32,6 +32,14 @@ import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeoutException; +import com.amazonaws.ClientConfiguration; +import com.amazonaws.auth.AWSCredentialsProvider; +import com.amazonaws.auth.AWSStaticCredentialsProvider; +import com.amazonaws.auth.BasicAWSCredentials; +import com.amazonaws.client.builder.AwsClientBuilder; +import com.amazonaws.regions.Regions; +import com.amazonaws.services.s3.AmazonS3; +import com.amazonaws.services.s3.AmazonS3ClientBuilder; import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.hdds.DatanodeVersion; import org.apache.hadoop.hdds.HddsConfigKeys; @@ -58,6 +66,7 @@ import org.apache.hadoop.hdds.scm.server.StorageContainerManager; import org.apache.hadoop.hdds.security.symmetric.SecretKeyClient; import org.apache.hadoop.hdds.security.x509.certificate.client.CertificateClient; +import org.apache.hadoop.hdds.server.http.HttpConfig; import org.apache.hadoop.hdds.utils.IOUtils; import org.apache.hadoop.hdds.utils.db.CodecBuffer; import org.apache.hadoop.hdds.utils.db.CodecTestUtil; @@ -73,6 +82,10 @@ import org.apache.hadoop.ozone.om.OzoneManager; import org.apache.hadoop.ozone.recon.ConfigurationProvider; import org.apache.hadoop.ozone.recon.ReconServer; +import org.apache.hadoop.ozone.s3.Gateway; +import org.apache.hadoop.ozone.s3.OzoneClientCache; +import org.apache.hadoop.ozone.s3.OzoneConfigurationHolder; +import org.apache.hadoop.ozone.s3.S3GatewayConfigKeys; import org.apache.hadoop.security.authentication.client.AuthenticationException; import org.apache.ozone.test.GenericTestUtils; @@ -84,9 +97,14 @@ import static org.apache.hadoop.hdds.recon.ReconConfigKeys.OZONE_RECON_DATANODE_ADDRESS_KEY; import static org.apache.hadoop.hdds.recon.ReconConfigKeys.OZONE_RECON_HTTP_ADDRESS_KEY; import static org.apache.hadoop.hdds.recon.ReconConfigKeys.OZONE_RECON_TASK_SAFEMODE_WAIT_THRESHOLD; +import static org.apache.hadoop.hdds.server.http.HttpConfig.getHttpPolicy; +import static org.apache.hadoop.hdds.server.http.HttpServer2.HTTPS_SCHEME; +import static org.apache.hadoop.hdds.server.http.HttpServer2.HTTP_SCHEME; import static org.apache.hadoop.ozone.recon.ReconServerConfigKeys.OZONE_RECON_DB_DIR; import static org.apache.hadoop.ozone.recon.ReconServerConfigKeys.OZONE_RECON_OM_SNAPSHOT_DB_DIR; import static org.apache.hadoop.ozone.recon.ReconServerConfigKeys.OZONE_RECON_SCM_DB_DIR; +import static org.apache.hadoop.ozone.s3.S3GatewayConfigKeys.OZONE_S3G_HTTPS_ADDRESS_KEY; +import static org.apache.hadoop.ozone.s3.S3GatewayConfigKeys.OZONE_S3G_HTTP_ADDRESS_KEY; import static org.apache.ozone.test.GenericTestUtils.PortAllocator.anyHostWithFreePort; import static org.apache.ozone.test.GenericTestUtils.PortAllocator.getFreePort; import static org.apache.ozone.test.GenericTestUtils.PortAllocator.localhostWithFreePort; @@ -120,6 +138,7 @@ public class MiniOzoneClusterImpl implements MiniOzoneCluster { private OzoneManager ozoneManager; private final List hddsDatanodes; private ReconServer reconServer; + private Gateway s3g; // Timeout for the cluster to be ready private int waitForClusterToBeReadyTimeout = 120000; // 2 min @@ -136,13 +155,15 @@ private MiniOzoneClusterImpl(OzoneConfiguration conf, OzoneManager ozoneManager, StorageContainerManager scm, List hddsDatanodes, - ReconServer reconServer) { + ReconServer reconServer, + Gateway s3g) { this.conf = conf; this.ozoneManager = ozoneManager; this.scm = scm; this.hddsDatanodes = hddsDatanodes; this.reconServer = reconServer; this.scmConfigurator = scmConfigurator; + this.s3g = s3g; } /** @@ -268,6 +289,11 @@ public ReconServer getReconServer() { return this.reconServer; } + @Override + public Gateway getS3G() { + return this.s3g; + } + @Override public int getHddsDatanodeIndex(DatanodeDetails dn) throws IOException { for (HddsDatanodeService service : hddsDatanodes) { @@ -286,6 +312,54 @@ public OzoneClient newClient() throws IOException { return client; } + @Override + public AmazonS3 newS3Client() { + // TODO: Parameterize tests between Virtual host style and Path style + return createS3Client(true); + } + + public AmazonS3 createS3Client(boolean enablePathStyle) { + final String accessKey = "user"; + final String secretKey = "password"; + final Regions region = Regions.DEFAULT_REGION; + + final String protocol; + final HttpConfig.Policy webPolicy = getHttpPolicy(conf); + String host; + + if (webPolicy.isHttpsEnabled()) { + protocol = HTTPS_SCHEME; + host = conf.get(OZONE_S3G_HTTPS_ADDRESS_KEY); + } else { + protocol = HTTP_SCHEME; + host = conf.get(OZONE_S3G_HTTP_ADDRESS_KEY); + } + + String endpoint = protocol + "://" + host; + + AWSCredentialsProvider credentials = new AWSStaticCredentialsProvider( + new BasicAWSCredentials(accessKey, secretKey) + ); + + + ClientConfiguration clientConfiguration = new ClientConfiguration(); + LOG.info("S3 Endpoint is {}", endpoint); + + AmazonS3 s3Client = + AmazonS3ClientBuilder.standard() + .withPathStyleAccessEnabled(enablePathStyle) + .withEndpointConfiguration( + new AwsClientBuilder.EndpointConfiguration( + endpoint, region.getName() + ) + ) + .withClientConfiguration(clientConfiguration) + .withCredentials(credentials) + .build(); + + return s3Client; + } + protected OzoneClient createClient() throws IOException { return OzoneClientFactory.getRpcClient(conf); } @@ -428,6 +502,7 @@ public void stop() { stopDatanodes(hddsDatanodes); stopSCM(scm); stopRecon(reconServer); + stopS3G(s3g); } private void startHddsDatanode(HddsDatanodeService datanode) { @@ -467,6 +542,23 @@ public void stopRecon() { stopRecon(reconServer); } + @Override + public void startS3G() { + s3g = new Gateway(); + s3g.execute(NO_ARGS); + } + + @Override + public void restartS3G() { + stopS3G(s3g); + startS3G(); + } + + @Override + public void stopS3G() { + stopS3G(s3g); + } + private CertificateClient getCAClient() { return this.caClient; } @@ -521,6 +613,19 @@ private static void stopRecon(ReconServer reconServer) { } } + private static void stopS3G(Gateway s3g) { + try { + if (s3g != null) { + LOG.info("Stopping S3G"); + // TODO (HDDS-11539): Remove this workaround once the @PreDestroy issue is fixed + OzoneClientCache.closeClient(); + s3g.stop(); + } + } catch (Exception e) { + LOG.error("Exception while shutting down S3 Gateway.", e); + } + } + /** * Builder for configuring the MiniOzoneCluster to run. */ @@ -544,15 +649,17 @@ public MiniOzoneCluster build() throws IOException { OzoneManager om = null; ReconServer reconServer = null; List hddsDatanodes = Collections.emptyList(); + Gateway s3g = null; try { scm = createAndStartSingleSCM(); om = createAndStartSingleOM(); + s3g = createS3G(); reconServer = createRecon(); hddsDatanodes = createHddsDatanodes(); MiniOzoneClusterImpl cluster = new MiniOzoneClusterImpl(conf, scmConfigurator, om, scm, - hddsDatanodes, reconServer); + hddsDatanodes, reconServer, s3g); cluster.setCAClient(certClient); cluster.setSecretKeyClient(secretKeyClient); @@ -567,6 +674,9 @@ public MiniOzoneCluster build() throws IOException { if (includeRecon) { stopRecon(reconServer); } + if (includeS3G) { + stopS3G(s3g); + } if (startDataNodes) { stopDatanodes(hddsDatanodes); } @@ -740,6 +850,16 @@ protected ReconServer createRecon() { return reconServer; } + protected Gateway createS3G() { + Gateway s3g = null; + if (includeS3G) { + configureS3G(); + s3g = new Gateway(); + s3g.execute(NO_ARGS); + } + return s3g; + } + /** * Creates HddsDatanodeService(s) instance. * @@ -806,5 +926,14 @@ protected void configureRecon() { ConfigurationProvider.setConfiguration(conf); } + private void configureS3G() { + OzoneConfigurationHolder.resetConfiguration(); + + conf.set(S3GatewayConfigKeys.OZONE_S3G_HTTP_ADDRESS_KEY, localhostWithFreePort()); + conf.set(S3GatewayConfigKeys.OZONE_S3G_HTTPS_ADDRESS_KEY, localhostWithFreePort()); + + OzoneConfigurationHolder.setConfiguration(conf); + } + } } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestBlockTokens.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestBlockTokens.java index 26c1868084f..625cf15ea56 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestBlockTokens.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestBlockTokens.java @@ -46,12 +46,12 @@ import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.token.Token; -import org.apache.ozone.test.GenericTestUtils; import org.apache.ratis.util.ExitUtils; import org.junit.jupiter.api.AfterAll; import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.Timeout; +import org.junit.jupiter.api.io.TempDir; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -109,9 +109,10 @@ public final class TestBlockTokens { private static final int EXPIRY_DURATION_IN_MS = 10000; private static final int ROTATION_CHECK_DURATION_IN_MS = 100; + @TempDir + private static File workDir; private static MiniKdc miniKdc; private static OzoneConfiguration conf; - private static File workDir; private static File ozoneKeytab; private static File spnegoKeytab; private static File testUserKeytab; @@ -129,9 +130,6 @@ public static void init() throws Exception { ExitUtils.disableSystemExit(); - workDir = - GenericTestUtils.getTestDir(TestBlockTokens.class.getSimpleName()); - startMiniKdc(); setSecureConfig(); createCredentialsInKDC(); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestBlockTokensCLI.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestBlockTokensCLI.java index 87242cb2790..038248945a4 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestBlockTokensCLI.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestBlockTokensCLI.java @@ -33,6 +33,7 @@ import org.apache.hadoop.security.UserGroupInformation; import org.apache.ozone.test.GenericTestUtils; import org.apache.ratis.util.ExitUtils; +import org.junit.jupiter.api.io.TempDir; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -86,10 +87,12 @@ public final class TestBlockTokensCLI { private static final Logger LOG = LoggerFactory .getLogger(TestBlockTokensCLI.class); + + @TempDir + private static File workDir; private static MiniKdc miniKdc; private static OzoneAdmin ozoneAdmin; private static OzoneConfiguration conf; - private static File workDir; private static File ozoneKeytab; private static File spnegoKeytab; private static String host; @@ -100,13 +103,12 @@ public final class TestBlockTokensCLI { @BeforeAll public static void init() throws Exception { - conf = new OzoneConfiguration(); + ozoneAdmin = new OzoneAdmin(); + conf = ozoneAdmin.getOzoneConf(); conf.set(OZONE_SCM_CLIENT_ADDRESS_KEY, "localhost"); ExitUtils.disableSystemExit(); - workDir = - GenericTestUtils.getTestDir(TestBlockTokens.class.getSimpleName()); omServiceId = "om-service-test"; scmServiceId = "scm-service-test"; @@ -116,7 +118,6 @@ public static void init() throws Exception { setSecretKeysConfig(); startCluster(); client = cluster.newClient(); - ozoneAdmin = new OzoneAdmin(conf); } @AfterAll diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestContainerOperations.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestContainerOperations.java index cbd1829ef0c..798e8a15991 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestContainerOperations.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestContainerOperations.java @@ -77,6 +77,7 @@ public static void setup() throws Exception { ozoneConf = new OzoneConfiguration(); ozoneConf.setClass(ScmConfigKeys.OZONE_SCM_CONTAINER_PLACEMENT_IMPL_KEY, SCMContainerPlacementCapacity.class, PlacementPolicy.class); + ozoneConf.set(ScmConfigKeys.OZONE_SCM_CONTAINER_LIST_MAX_COUNT, "1"); cluster = MiniOzoneCluster.newBuilder(ozoneConf).setNumDatanodes(3).build(); storageClient = new ContainerOperationClient(ozoneConf); cluster.waitForClusterToBeReady(); @@ -144,6 +145,24 @@ public void testCreate() throws Exception { .getContainerID()); } + /** + * Test to try to list number of containers over the max number Ozone allows. + * @throws Exception + */ + @Test + public void testListContainerExceedMaxAllowedCountOperations() throws Exception { + // create 2 containers in cluster where the limit of max count for + // listing container is set to 1 + for (int i = 0; i < 2; i++) { + storageClient.createContainer(HddsProtos + .ReplicationType.STAND_ALONE, HddsProtos.ReplicationFactor + .ONE, OzoneConsts.OZONE); + } + + assertEquals(1, storageClient.listContainer(0, 2) + .getContainerInfoList().size()); + } + /** * A simple test to get Pipeline with {@link ContainerOperationClient}. * @throws Exception diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestDataUtil.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestDataUtil.java index 4488e467c29..eb54e4a2519 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestDataUtil.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestDataUtil.java @@ -164,21 +164,57 @@ private static BucketLayout getDefaultBucketLayout(OzoneClient client) { public static OzoneBucket createBucket(OzoneClient client, String vol, BucketArgs bucketArgs, String bukName) throws IOException { + return createBucket(client, vol, bucketArgs, bukName, false); + } + + public static OzoneBucket createBucket(OzoneClient client, + String vol, BucketArgs bucketArgs, String bukName, + boolean createLinkedBucket) + throws IOException { ObjectStore objectStore = client.getObjectStore(); OzoneVolume volume = objectStore.getVolume(vol); - volume.createBucket(bukName, bucketArgs); - return volume.getBucket(bukName); + String sourceBucket = bukName; + if (createLinkedBucket) { + sourceBucket = bukName + RandomStringUtils.randomNumeric(5); + } + volume.createBucket(sourceBucket, bucketArgs); + OzoneBucket ozoneBucket = volume.getBucket(sourceBucket); + if (createLinkedBucket) { + ozoneBucket = createLinkedBucket(client, vol, sourceBucket, bukName); + } + return ozoneBucket; + } + + public static OzoneBucket createLinkedBucket(OzoneClient client, String vol, String sourceBucketName, + String linkedBucketName) throws IOException { + BucketArgs.Builder bb = new BucketArgs.Builder() + .setStorageType(StorageType.DEFAULT) + .setVersioning(false) + .setSourceVolume(vol) + .setSourceBucket(sourceBucketName); + return createBucket(client, vol, bb.build(), linkedBucketName); + } + + public static OzoneBucket createVolumeAndBucket(OzoneClient client, + BucketLayout bucketLayout) + throws IOException { + return createVolumeAndBucket(client, bucketLayout, false); } public static OzoneBucket createVolumeAndBucket(OzoneClient client, - BucketLayout bucketLayout) throws IOException { + BucketLayout bucketLayout, boolean createLinkedBucket) throws IOException { final int attempts = 5; for (int i = 0; i < attempts; i++) { try { String volumeName = "volume" + RandomStringUtils.randomNumeric(5); String bucketName = "bucket" + RandomStringUtils.randomNumeric(5); - return createVolumeAndBucket(client, volumeName, bucketName, + OzoneBucket ozoneBucket = createVolumeAndBucket(client, volumeName, bucketName, bucketLayout); + if (createLinkedBucket) { + String targetBucketName = ozoneBucket.getName() + RandomStringUtils.randomNumeric(5); + ozoneBucket = createLinkedBucket(client, volumeName, bucketName, targetBucketName); + } + return ozoneBucket; } catch (OMException e) { if (e.getResult() != OMException.ResultCodes.VOLUME_ALREADY_EXISTS && e.getResult() != OMException.ResultCodes.BUCKET_ALREADY_EXISTS) { diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestDelegationToken.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestDelegationToken.java index 099c1d2e1ff..e0c2a292397 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestDelegationToken.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestDelegationToken.java @@ -119,10 +119,11 @@ public final class TestDelegationToken { @TempDir private Path folder; + @TempDir + private File workDir; private MiniKdc miniKdc; private OzoneConfiguration conf; - private File workDir; private File scmKeytab; private File spnegoKeytab; private File omKeyTab; @@ -166,8 +167,6 @@ public void init() { conf.setBoolean(OZONE_SECURITY_ENABLED_KEY, true); conf.set(HADOOP_SECURITY_AUTHENTICATION, KERBEROS.name()); - workDir = GenericTestUtils.getTestDir(getClass().getSimpleName()); - startMiniKdc(); setSecureConfig(); createCredentialsInKDC(); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestMultipartObjectGet.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestMultipartObjectGet.java index 8a55dc7b7d0..2a150683001 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestMultipartObjectGet.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestMultipartObjectGet.java @@ -139,7 +139,7 @@ private CompleteMultipartUploadRequest.Part uploadPart(String uploadID, ByteArrayInputStream body = new ByteArrayInputStream(content.getBytes(UTF_8)); Response response = REST.put(BUCKET, KEY, content.length(), - partNumber, uploadID, body); + partNumber, uploadID, null, null, body); assertEquals(200, response.getStatus()); assertNotNull(response.getHeaderString(OzoneConsts.ETAG)); @@ -168,7 +168,7 @@ private void completeMultipartUpload( private void getObjectMultipart(int partNumber, long bytes) throws IOException, OS3Exception { Response response = - REST.get(BUCKET, KEY, partNumber, null, 100, null); + REST.get(BUCKET, KEY, partNumber, null, 100, null, null); assertEquals(200, response.getStatus()); assertEquals(bytes, response.getLength()); assertEquals("3", response.getHeaderString(MP_PARTS_COUNT)); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestOMSortDatanodes.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestOMSortDatanodes.java index cef872597e4..6b7dde3934d 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestOMSortDatanodes.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestOMSortDatanodes.java @@ -18,7 +18,6 @@ package org.apache.hadoop.ozone; import com.google.common.collect.ImmutableMap; -import org.apache.commons.io.FileUtils; import org.apache.hadoop.hdds.DFSConfigKeysLegacy; import org.apache.hadoop.hdds.HddsConfigKeys; import org.apache.hadoop.hdds.conf.OzoneConfiguration; @@ -32,14 +31,15 @@ import org.apache.hadoop.hdds.scm.server.StorageContainerManager; import org.apache.hadoop.net.StaticMapping; +import org.apache.hadoop.ozone.client.OzoneClient; import org.apache.hadoop.ozone.om.KeyManagerImpl; import org.apache.hadoop.ozone.om.OmTestManagers; import org.apache.hadoop.ozone.om.OzoneManager; -import org.apache.ozone.test.GenericTestUtils; import org.junit.jupiter.api.AfterAll; import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.Timeout; +import org.junit.jupiter.api.io.TempDir; import java.io.File; import java.util.List; @@ -61,23 +61,25 @@ @Timeout(300) public class TestOMSortDatanodes { + @TempDir + private static File dir; private static OzoneConfiguration config; private static StorageContainerManager scm; private static NodeManager nodeManager; private static KeyManagerImpl keyManager; private static StorageContainerLocationProtocol mockScmContainerClient; private static OzoneManager om; - private static File dir; private static final int NODE_COUNT = 10; private static final Map EDGE_NODES = ImmutableMap.of( "edge0", "/rack0", "edge1", "/rack1" ); + private static OzoneClient ozoneClient; + @BeforeAll public static void setup() throws Exception { config = new OzoneConfiguration(); - dir = GenericTestUtils.getRandomizedTestDir(); config.set(HddsConfigKeys.OZONE_METADATA_DIRS, dir.toString()); config.set(NET_TOPOLOGY_NODE_SWITCH_MAPPING_IMPL_KEY, StaticMapping.class.getName()); @@ -109,11 +111,15 @@ public static void setup() throws Exception { = new OmTestManagers(config, scm.getBlockProtocolServer(), mockScmContainerClient); om = omTestManagers.getOzoneManager(); + ozoneClient = omTestManagers.getRpcClient(); keyManager = (KeyManagerImpl)omTestManagers.getKeyManager(); } @AfterAll public static void cleanup() throws Exception { + if (ozoneClient != null) { + ozoneClient.close(); + } if (scm != null) { scm.stop(); scm.join(); @@ -121,7 +127,6 @@ public static void cleanup() throws Exception { if (om != null) { om.stop(); } - FileUtils.deleteDirectory(dir); } @Test diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestOzoneConfigurationFields.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestOzoneConfigurationFields.java index 8a219514d34..1fbfc1f1f70 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestOzoneConfigurationFields.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestOzoneConfigurationFields.java @@ -126,6 +126,7 @@ private void addPropertiesNotInXml() { OMConfigKeys.OZONE_RANGER_HTTPS_ADDRESS_KEY, OMConfigKeys.OZONE_OM_RANGER_HTTPS_ADMIN_API_USER, OMConfigKeys.OZONE_OM_RANGER_HTTPS_ADMIN_API_PASSWD, + OMConfigKeys.OZONE_THREAD_NUMBER_DIR_DELETION, ScmConfigKeys.OZONE_SCM_PIPELINE_PLACEMENT_IMPL_KEY, ScmConfigKeys.OZONE_SCM_HA_PREFIX, S3GatewayConfigKeys.OZONE_S3G_FSO_DIRECTORY_CREATION_ENABLED, diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestSecureOzoneCluster.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestSecureOzoneCluster.java index 4f41d516153..637e8bd9e4f 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestSecureOzoneCluster.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestSecureOzoneCluster.java @@ -64,6 +64,7 @@ import org.apache.hadoop.hdds.scm.server.StorageContainerManager; import org.apache.hadoop.hdds.security.exception.SCMSecurityException; import org.apache.hadoop.hdds.security.SecurityConfig; +import org.apache.hadoop.hdds.security.symmetric.ManagedSecretKey; import org.apache.hadoop.hdds.security.symmetric.SecretKeyManager; import org.apache.hadoop.hdds.security.x509.certificate.authority.CAType; import org.apache.hadoop.hdds.security.x509.certificate.authority.DefaultApprover; @@ -100,6 +101,7 @@ import org.apache.hadoop.ozone.om.protocolPB.OzoneManagerProtocolClientSideTranslatorPB; import org.apache.hadoop.ozone.security.OMCertificateClient; import org.apache.hadoop.ozone.security.OzoneTokenIdentifier; +import org.apache.hadoop.ozone.security.SecretKeyTestClient; import org.apache.hadoop.security.KerberosAuthException; import org.apache.hadoop.security.SaslRpcServer.AuthMethod; import org.apache.hadoop.security.SecurityUtil; @@ -152,7 +154,6 @@ import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.USER_MISMATCH; import static org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod.KERBEROS; -import org.apache.ozone.test.LambdaTestUtils; import org.apache.ozone.test.tag.Flaky; import org.apache.ozone.test.tag.Unhealthy; import org.apache.ratis.protocol.ClientId; @@ -1182,10 +1183,10 @@ public String renewAndStoreKeyAndCertificate(boolean force) throws CertificateEx } /** - * Tests delegation token renewal after a certificate renew. + * Tests delegation token renewal after a secret key rotation. */ @Test - void testDelegationTokenRenewCrossCertificateRenew() throws Exception { + void testDelegationTokenRenewCrossSecretKeyRotation() throws Exception { initSCM(); try { scm = HddsTestUtils.getScmSimple(conf); @@ -1206,11 +1207,12 @@ void testDelegationTokenRenewCrossCertificateRenew() throws Exception { CertificateClientTestImpl certClient = new CertificateClientTestImpl(newConf, true); - X509Certificate omCert = certClient.getCertificate(); - String omCertId1 = omCert.getSerialNumber().toString(); // Start OM om.setCertClient(certClient); om.setScmTopologyClient(new ScmTopologyClient(scmBlockClient)); + SecretKeyTestClient secretKeyClient = new SecretKeyTestClient(); + ManagedSecretKey secretKey1 = secretKeyClient.getCurrentSecretKey(); + om.setSecretKeyClient(secretKeyClient); om.start(); GenericTestUtils.waitFor(() -> om.isLeaderReady(), 100, 10000); @@ -1231,30 +1233,26 @@ void testDelegationTokenRenewCrossCertificateRenew() throws Exception { assertEquals(SecurityUtil.buildTokenService( om.getNodeDetails().getRpcAddress()).toString(), token1.getService().toString()); - assertEquals(omCertId1, token1.decodeIdentifier().getOmCertSerialId()); + assertEquals(secretKey1.getId().toString(), token1.decodeIdentifier().getSecretKeyId()); // Renew delegation token long expiryTime = omClient.renewDelegationToken(token1); assertThat(expiryTime).isGreaterThan(0); - // Wait for OM certificate to renew - LambdaTestUtils.await(certLifetime, 100, () -> - !StringUtils.equals(token1.decodeIdentifier().getOmCertSerialId(), - omClient.getDelegationToken(new Text("om")) - .decodeIdentifier().getOmCertSerialId())); - String omCertId2 = - certClient.getCertificate().getSerialNumber().toString(); - assertNotEquals(omCertId1, omCertId2); + // Rotate secret key + secretKeyClient.rotate(); + ManagedSecretKey secretKey2 = secretKeyClient.getCurrentSecretKey(); + assertNotEquals(secretKey1.getId(), secretKey2.getId()); // Get a new delegation token Token token2 = omClient.getDelegationToken( new Text("om")); - assertEquals(omCertId2, token2.decodeIdentifier().getOmCertSerialId()); + assertEquals(secretKey2.getId().toString(), token2.decodeIdentifier().getSecretKeyId()); - // Because old certificate is still valid, so renew old token will succeed + // Because old secret key is still valid, so renew old token will succeed expiryTime = omClient.renewDelegationToken(token1); assertThat(expiryTime) .isGreaterThan(0) - .isLessThan(omCert.getNotAfter().getTime()); + .isLessThan(secretKey2.getExpiryTime().toEpochMilli()); } finally { if (scm != null) { scm.stop(); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/AbstractTestECKeyOutputStream.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/AbstractTestECKeyOutputStream.java deleted file mode 100644 index 3063e2587e4..00000000000 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/AbstractTestECKeyOutputStream.java +++ /dev/null @@ -1,492 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

    - * http://www.apache.org/licenses/LICENSE-2.0 - *

    - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ -package org.apache.hadoop.ozone.client.rpc; - -import org.apache.commons.lang3.NotImplementedException; -import org.apache.hadoop.hdds.HddsConfigKeys; -import org.apache.hadoop.hdds.client.DefaultReplicationConfig; -import org.apache.hadoop.hdds.client.ECReplicationConfig; -import org.apache.hadoop.hdds.client.RatisReplicationConfig; -import org.apache.hadoop.hdds.client.ReplicationConfig; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.conf.StorageUnit; -import org.apache.hadoop.hdds.protocol.DatanodeDetails; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; -import org.apache.hadoop.hdds.scm.OzoneClientConfig; -import org.apache.hadoop.hdds.scm.ScmConfigKeys; -import org.apache.hadoop.hdds.scm.cli.ContainerOperationClient; -import org.apache.hadoop.hdds.scm.pipeline.Pipeline; -import org.apache.hadoop.hdds.scm.pipeline.PipelineManager; -import org.apache.hadoop.hdds.utils.IOUtils; -import org.apache.hadoop.ozone.ClientConfigForTesting; -import org.apache.hadoop.ozone.MiniOzoneCluster; -import org.apache.hadoop.ozone.OzoneConfigKeys; -import org.apache.hadoop.ozone.client.BucketArgs; -import org.apache.hadoop.ozone.client.ObjectStore; -import org.apache.hadoop.ozone.client.OzoneBucket; -import org.apache.hadoop.ozone.client.OzoneClient; -import org.apache.hadoop.ozone.client.OzoneClientFactory; -import org.apache.hadoop.ozone.client.OzoneKey; -import org.apache.hadoop.ozone.client.OzoneKeyDetails; -import org.apache.hadoop.ozone.client.OzoneVolume; -import org.apache.hadoop.ozone.client.io.ECKeyOutputStream; -import org.apache.hadoop.ozone.client.io.KeyOutputStream; -import org.apache.hadoop.ozone.client.io.OzoneInputStream; -import org.apache.hadoop.ozone.client.io.OzoneOutputStream; -import org.apache.hadoop.ozone.container.TestHelper; -import org.apache.ozone.test.GenericTestUtils; -import org.junit.jupiter.api.AfterAll; -import org.junit.jupiter.api.BeforeAll; -import org.junit.jupiter.api.Test; - -import java.io.IOException; -import java.util.Arrays; -import java.util.HashMap; -import java.util.UUID; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.TimeoutException; - -import static java.nio.charset.StandardCharsets.UTF_8; -import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_DEADNODE_INTERVAL; -import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_STALENODE_INTERVAL; -import static org.assertj.core.api.Assertions.assertThat; -import static org.junit.jupiter.api.Assertions.assertArrayEquals; -import static org.junit.jupiter.api.Assertions.assertEquals; -import static org.junit.jupiter.api.Assertions.assertInstanceOf; -import static org.junit.jupiter.api.Assertions.assertThrows; -import static org.junit.jupiter.api.Assertions.fail; - -/** - * Tests key output stream. - */ -abstract class AbstractTestECKeyOutputStream { - private static MiniOzoneCluster cluster; - private static OzoneConfiguration conf = new OzoneConfiguration(); - private static OzoneClient client; - private static ObjectStore objectStore; - private static int chunkSize; - private static int flushSize; - private static int maxFlushSize; - private static int blockSize; - private static String volumeName; - private static String bucketName; - private static String keyString; - private static int dataBlocks = 3; - private static int inputSize = dataBlocks * chunkSize; - private static byte[][] inputChunks = new byte[dataBlocks][chunkSize]; - - /** - * Create a MiniDFSCluster for testing. - */ - protected static void init(boolean zeroCopyEnabled) throws Exception { - chunkSize = 1024 * 1024; - flushSize = 2 * chunkSize; - maxFlushSize = 2 * flushSize; - blockSize = 2 * maxFlushSize; - - OzoneClientConfig clientConfig = conf.getObject(OzoneClientConfig.class); - clientConfig.setChecksumType(ContainerProtos.ChecksumType.NONE); - clientConfig.setStreamBufferFlushDelay(false); - conf.setFromObject(clientConfig); - - // If SCM detects dead node too quickly, then container would be moved to - // closed state and all in progress writes will get exception. To avoid - // that, we are just keeping higher timeout and none of the tests depending - // on deadnode detection timeout currently. - conf.setTimeDuration(OZONE_SCM_STALENODE_INTERVAL, 30, TimeUnit.SECONDS); - conf.setTimeDuration(OZONE_SCM_DEADNODE_INTERVAL, 60, TimeUnit.SECONDS); - conf.setTimeDuration("hdds.ratis.raft.server.rpc.slowness.timeout", 300, - TimeUnit.SECONDS); - conf.setTimeDuration( - "hdds.ratis.raft.server.notification.no-leader.timeout", 300, - TimeUnit.SECONDS); - conf.setQuietMode(false); - conf.setStorageSize(OzoneConfigKeys.OZONE_SCM_BLOCK_SIZE, 4, - StorageUnit.MB); - conf.setTimeDuration(HddsConfigKeys.HDDS_HEARTBEAT_INTERVAL, 500, - TimeUnit.MILLISECONDS); - conf.setTimeDuration(HddsConfigKeys.HDDS_CONTAINER_REPORT_INTERVAL, 1, - TimeUnit.SECONDS); - conf.setBoolean(OzoneConfigKeys.OZONE_EC_GRPC_ZERO_COPY_ENABLED, - zeroCopyEnabled); - conf.setInt(ScmConfigKeys.OZONE_SCM_RATIS_PIPELINE_LIMIT, 10); - // "Enable" hsync to verify that hsync would be blocked by ECKeyOutputStream - conf.setBoolean(OzoneConfigKeys.OZONE_HBASE_ENHANCEMENTS_ALLOWED, true); - conf.setBoolean("ozone.client.hbase.enhancements.allowed", true); - conf.setBoolean(OzoneConfigKeys.OZONE_FS_HSYNC_ENABLED, true); - - ClientConfigForTesting.newBuilder(StorageUnit.BYTES) - .setBlockSize(blockSize) - .setChunkSize(chunkSize) - .setStreamBufferFlushSize(flushSize) - .setStreamBufferMaxSize(maxFlushSize) - .applyTo(conf); - - cluster = MiniOzoneCluster.newBuilder(conf) - .setNumDatanodes(10) - .build(); - cluster.waitForClusterToBeReady(); - client = OzoneClientFactory.getRpcClient(conf); - objectStore = client.getObjectStore(); - keyString = UUID.randomUUID().toString(); - volumeName = "testeckeyoutputstream"; - bucketName = volumeName; - objectStore.createVolume(volumeName); - objectStore.getVolume(volumeName).createBucket(bucketName); - initInputChunks(); - } - - @BeforeAll - public static void init() throws Exception { - init(false); - } - - /** - * Shutdown MiniDFSCluster. - */ - @AfterAll - public static void shutdown() { - IOUtils.closeQuietly(client); - if (cluster != null) { - cluster.shutdown(); - } - } - - @Test - public void testCreateKeyWithECReplicationConfig() throws Exception { - try (OzoneOutputStream key = TestHelper - .createKey(keyString, new ECReplicationConfig(3, 2, - ECReplicationConfig.EcCodec.RS, chunkSize), inputSize, - objectStore, volumeName, bucketName)) { - assertInstanceOf(ECKeyOutputStream.class, key.getOutputStream()); - } - } - - @Test - public void testCreateKeyWithOutBucketDefaults() throws Exception { - OzoneVolume volume = objectStore.getVolume(volumeName); - OzoneBucket bucket = volume.getBucket(bucketName); - try (OzoneOutputStream out = bucket.createKey("myKey", inputSize)) { - assertInstanceOf(KeyOutputStream.class, out.getOutputStream()); - for (byte[] inputChunk : inputChunks) { - out.write(inputChunk); - } - } - } - - @Test - public void testCreateKeyWithBucketDefaults() throws Exception { - String myBucket = UUID.randomUUID().toString(); - OzoneVolume volume = objectStore.getVolume(volumeName); - final BucketArgs.Builder bucketArgs = BucketArgs.newBuilder(); - bucketArgs.setDefaultReplicationConfig( - new DefaultReplicationConfig( - new ECReplicationConfig(3, 2, ECReplicationConfig.EcCodec.RS, - chunkSize))); - - volume.createBucket(myBucket, bucketArgs.build()); - OzoneBucket bucket = volume.getBucket(myBucket); - - try (OzoneOutputStream out = bucket.createKey(keyString, inputSize)) { - assertInstanceOf(ECKeyOutputStream.class, out.getOutputStream()); - for (byte[] inputChunk : inputChunks) { - out.write(inputChunk); - } - } - byte[] buf = new byte[chunkSize]; - try (OzoneInputStream in = bucket.readKey(keyString)) { - for (byte[] inputChunk : inputChunks) { - int read = in.read(buf, 0, chunkSize); - assertEquals(chunkSize, read); - assertArrayEquals(buf, inputChunk); - } - } - } - - @Test - public void testOverwriteECKeyWithRatisKey() throws Exception { - String myBucket = UUID.randomUUID().toString(); - OzoneVolume volume = objectStore.getVolume(volumeName); - final BucketArgs.Builder bucketArgs = BucketArgs.newBuilder(); - volume.createBucket(myBucket, bucketArgs.build()); - OzoneBucket bucket = volume.getBucket(myBucket); - createKeyAndCheckReplicationConfig(keyString, bucket, - new ECReplicationConfig(3, 2, ECReplicationConfig.EcCodec.RS, - chunkSize)); - - //Overwrite with RATIS/THREE - createKeyAndCheckReplicationConfig(keyString, bucket, - RatisReplicationConfig.getInstance(HddsProtos.ReplicationFactor.THREE)); - - //Overwrite with RATIS/ONE - createKeyAndCheckReplicationConfig(keyString, bucket, - RatisReplicationConfig.getInstance(HddsProtos.ReplicationFactor.ONE)); - } - - @Test - public void testOverwriteRatisKeyWithECKey() throws Exception { - String myBucket = UUID.randomUUID().toString(); - OzoneVolume volume = objectStore.getVolume(volumeName); - final BucketArgs.Builder bucketArgs = BucketArgs.newBuilder(); - volume.createBucket(myBucket, bucketArgs.build()); - OzoneBucket bucket = volume.getBucket(myBucket); - - createKeyAndCheckReplicationConfig(keyString, bucket, - RatisReplicationConfig.getInstance(HddsProtos.ReplicationFactor.THREE)); - // Overwrite with EC key - createKeyAndCheckReplicationConfig(keyString, bucket, - new ECReplicationConfig(3, 2, ECReplicationConfig.EcCodec.RS, - chunkSize)); - } - - private void createKeyAndCheckReplicationConfig(String keyName, - OzoneBucket bucket, ReplicationConfig replicationConfig) - throws IOException { - try (OzoneOutputStream out = bucket - .createKey(keyName, inputSize, replicationConfig, new HashMap<>())) { - for (byte[] inputChunk : inputChunks) { - out.write(inputChunk); - } - } - OzoneKeyDetails key = bucket.getKey(keyName); - assertEquals(replicationConfig, key.getReplicationConfig()); - } - - @Test - public void testCreateRatisKeyAndWithECBucketDefaults() throws Exception { - OzoneBucket bucket = getOzoneBucket(); - try (OzoneOutputStream out = bucket.createKey( - "testCreateRatisKeyAndWithECBucketDefaults", 2000, - RatisReplicationConfig.getInstance(HddsProtos.ReplicationFactor.THREE), - new HashMap<>())) { - assertInstanceOf(KeyOutputStream.class, out.getOutputStream()); - for (byte[] inputChunk : inputChunks) { - out.write(inputChunk); - } - } - } - - @Test - public void test13ChunksInSingleWriteOp() throws IOException { - testMultipleChunksInSingleWriteOp(13); - } - - @Test - public void testChunksInSingleWriteOpWithOffset() throws IOException { - testMultipleChunksInSingleWriteOp(11, 25, 19); - } - - @Test - public void test15ChunksInSingleWriteOp() throws IOException { - testMultipleChunksInSingleWriteOp(15); - } - - @Test - public void test20ChunksInSingleWriteOp() throws IOException { - testMultipleChunksInSingleWriteOp(20); - } - - @Test - public void test21ChunksInSingleWriteOp() throws IOException { - testMultipleChunksInSingleWriteOp(21); - } - - private void testMultipleChunksInSingleWriteOp(int offset, - int bufferChunks, int numChunks) - throws IOException { - byte[] inputData = getInputBytes(offset, bufferChunks, numChunks); - final OzoneBucket bucket = getOzoneBucket(); - String keyName = - String.format("testMultipleChunksInSingleWriteOpOffset" + - "%dBufferChunks%dNumChunks", offset, bufferChunks, - numChunks); - try (OzoneOutputStream out = bucket.createKey(keyName, 4096, - new ECReplicationConfig(3, 2, ECReplicationConfig.EcCodec.RS, - chunkSize), new HashMap<>())) { - out.write(inputData, offset, numChunks * chunkSize); - } - - validateContent(offset, numChunks * chunkSize, inputData, bucket, - bucket.getKey(keyName)); - } - - private void testMultipleChunksInSingleWriteOp(int numChunks) - throws IOException { - testMultipleChunksInSingleWriteOp(0, numChunks, numChunks); - } - - @Test - public void testECContainerKeysCountAndNumContainerReplicas() - throws IOException, InterruptedException, TimeoutException { - byte[] inputData = getInputBytes(1); - final OzoneBucket bucket = getOzoneBucket(); - ContainerOperationClient containerOperationClient = - new ContainerOperationClient(conf); - - ECReplicationConfig repConfig = new ECReplicationConfig( - 3, 2, ECReplicationConfig.EcCodec.RS, chunkSize); - // Close all EC pipelines so we must get a fresh pipeline and hence - // container for this test. - PipelineManager pm = - cluster.getStorageContainerManager().getPipelineManager(); - for (Pipeline p : pm.getPipelines(repConfig)) { - pm.closePipeline(p, true); - } - - String keyName = UUID.randomUUID().toString(); - try (OzoneOutputStream out = bucket.createKey(keyName, 4096, - repConfig, new HashMap<>())) { - out.write(inputData); - } - OzoneKeyDetails key = bucket.getKey(keyName); - long currentKeyContainerID = - key.getOzoneKeyLocations().get(0).getContainerID(); - - GenericTestUtils.waitFor(() -> { - try { - return (containerOperationClient.getContainer(currentKeyContainerID) - .getNumberOfKeys() == 1) && (containerOperationClient - .getContainerReplicas(currentKeyContainerID).size() == 5); - } catch (IOException exception) { - fail("Unexpected exception " + exception); - return false; - } - }, 100, 10000); - validateContent(inputData, bucket, key); - } - - private void validateContent(byte[] inputData, OzoneBucket bucket, - OzoneKey key) throws IOException { - validateContent(0, inputData.length, inputData, bucket, key); - } - - private void validateContent(int offset, int length, byte[] inputData, - OzoneBucket bucket, - OzoneKey key) throws IOException { - try (OzoneInputStream is = bucket.readKey(key.getName())) { - byte[] fileContent = new byte[length]; - assertEquals(length, is.read(fileContent)); - assertEquals(new String(Arrays.copyOfRange(inputData, offset, - offset + length), UTF_8), - new String(fileContent, UTF_8)); - } - } - - private OzoneBucket getOzoneBucket() throws IOException { - String myBucket = UUID.randomUUID().toString(); - OzoneVolume volume = objectStore.getVolume(volumeName); - final BucketArgs.Builder bucketArgs = BucketArgs.newBuilder(); - bucketArgs.setDefaultReplicationConfig( - new DefaultReplicationConfig( - new ECReplicationConfig(3, 2, ECReplicationConfig.EcCodec.RS, - chunkSize))); - - volume.createBucket(myBucket, bucketArgs.build()); - return volume.getBucket(myBucket); - } - - private static void initInputChunks() { - for (int i = 0; i < dataBlocks; i++) { - inputChunks[i] = getBytesWith(i + 1, chunkSize); - } - } - - private static byte[] getBytesWith(int singleDigitNumber, int total) { - StringBuilder builder = new StringBuilder(singleDigitNumber); - for (int i = 1; i <= total; i++) { - builder.append(singleDigitNumber); - } - return builder.toString().getBytes(UTF_8); - } - - @Test - public void testWriteShouldSucceedWhenDNKilled() throws Exception { - int numChunks = 3; - byte[] inputData = getInputBytes(numChunks); - final OzoneBucket bucket = getOzoneBucket(); - String keyName = "testWriteShouldSucceedWhenDNKilled" + numChunks; - DatanodeDetails nodeToKill = null; - try { - try (OzoneOutputStream out = bucket.createKey(keyName, 1024, - new ECReplicationConfig(3, 2, ECReplicationConfig.EcCodec.RS, - chunkSize), new HashMap<>())) { - ECKeyOutputStream ecOut = (ECKeyOutputStream) out.getOutputStream(); - out.write(inputData); - // Kill a node from first pipeline - nodeToKill = ecOut.getStreamEntries() - .get(0).getPipeline().getFirstNode(); - cluster.shutdownHddsDatanode(nodeToKill); - - out.write(inputData); - - // Wait for flushing thread to finish its work. - final long checkpoint = System.currentTimeMillis(); - ecOut.insertFlushCheckpoint(checkpoint); - GenericTestUtils.waitFor(() -> ecOut.getFlushCheckpoint() == checkpoint, - 100, 10000); - - // Check the second blockGroup pipeline to make sure that the failed - // node is not selected. - assertThat(ecOut.getStreamEntries().get(1).getPipeline().getNodes()) - .doesNotContain(nodeToKill); - } - - try (OzoneInputStream is = bucket.readKey(keyName)) { - // We wrote "inputData" twice, so do two reads and ensure the correct - // data comes back. - for (int i = 0; i < 2; i++) { - byte[] fileContent = new byte[inputData.length]; - assertEquals(inputData.length, is.read(fileContent)); - assertEquals(new String(inputData, UTF_8), - new String(fileContent, UTF_8)); - } - } - } finally { - cluster.restartHddsDatanode(nodeToKill, true); - } - } - - private byte[] getInputBytes(int numChunks) { - return getInputBytes(0, numChunks, numChunks); - } - - private byte[] getInputBytes(int offset, int bufferChunks, int numChunks) { - byte[] inputData = new byte[offset + bufferChunks * chunkSize]; - for (int i = 0; i < numChunks; i++) { - int start = offset + (i * chunkSize); - Arrays.fill(inputData, start, start + chunkSize - 1, - String.valueOf(i % 9).getBytes(UTF_8)[0]); - } - return inputData; - } - - @Test - public void testBlockedHflushAndHsync() throws Exception { - // Expect ECKeyOutputStream hflush and hsync calls to throw exception - try (OzoneOutputStream oOut = TestHelper.createKey( - keyString, new ECReplicationConfig(3, 2, ECReplicationConfig.EcCodec.RS, chunkSize), - inputSize, objectStore, volumeName, bucketName)) { - assertInstanceOf(ECKeyOutputStream.class, oOut.getOutputStream()); - KeyOutputStream kOut = (KeyOutputStream) oOut.getOutputStream(); - - assertThrows(NotImplementedException.class, () -> kOut.hflush()); - assertThrows(NotImplementedException.class, () -> kOut.hsync()); - } - } - -} diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/OzoneRpcClientTests.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/OzoneRpcClientTests.java index eb9f35f518c..7e518687bea 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/OzoneRpcClientTests.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/OzoneRpcClientTests.java @@ -24,6 +24,7 @@ import java.io.RandomAccessFile; import java.nio.ByteBuffer; import java.nio.channels.FileChannel; +import java.nio.file.Path; import java.security.MessageDigest; import java.security.NoSuchAlgorithmException; import java.security.PrivilegedExceptionAction; @@ -32,12 +33,14 @@ import java.util.Arrays; import java.util.Collections; import java.util.HashMap; +import java.util.HashSet; import java.util.Iterator; import java.util.LinkedHashMap; import java.util.List; import java.util.Locale; import java.util.Map; import java.util.Optional; +import java.util.Set; import java.util.TreeMap; import java.util.UUID; import java.util.concurrent.CountDownLatch; @@ -88,7 +91,6 @@ import org.apache.hadoop.ozone.client.ObjectStore; import org.apache.hadoop.ozone.client.OzoneBucket; import org.apache.hadoop.ozone.client.OzoneClient; -import org.apache.hadoop.ozone.client.OzoneClientException; import org.apache.hadoop.ozone.client.OzoneClientFactory; import org.apache.hadoop.ozone.client.OzoneKey; import org.apache.hadoop.ozone.client.OzoneKeyDetails; @@ -170,6 +172,8 @@ import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.PARTIAL_RENAME; import static org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLIdentityType.GROUP; import static org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLIdentityType.USER; +import static org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLType.ALL; +import static org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLType.LIST; import static org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLType.READ; import static org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLType.WRITE; @@ -188,10 +192,12 @@ import static org.junit.jupiter.api.Assertions.fail; import static org.slf4j.event.Level.DEBUG; +import org.apache.ozone.test.tag.Unhealthy; import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.MethodOrderer; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.TestMethodOrder; +import org.junit.jupiter.api.io.TempDir; import org.junit.jupiter.params.ParameterizedTest; import org.junit.jupiter.params.provider.CsvSource; import org.junit.jupiter.params.provider.EnumSource; @@ -221,6 +227,7 @@ abstract class OzoneRpcClientTests extends OzoneTestBase { private static OzoneAcl inheritedGroupAcl = new OzoneAcl(GROUP, remoteGroupName, ACCESS, READ); private static MessageDigest eTagProvider; + private static Set ozoneClients = new HashSet<>(); @BeforeAll public static void initialize() throws NoSuchAlgorithmException { @@ -250,6 +257,7 @@ static void startCluster(OzoneConfiguration conf, MiniOzoneCluster.Builder build .build(); cluster.waitForClusterToBeReady(); ozClient = OzoneClientFactory.getRpcClient(conf); + ozoneClients.add(ozClient); store = ozClient.getObjectStore(); storageContainerLocationClient = cluster.getStorageContainerLocationClient(); @@ -259,10 +267,9 @@ static void startCluster(OzoneConfiguration conf, MiniOzoneCluster.Builder build /** * Close OzoneClient and shutdown MiniOzoneCluster. */ - static void shutdownCluster() throws IOException { - if (ozClient != null) { - ozClient.close(); - } + static void shutdownCluster() { + org.apache.hadoop.hdds.utils.IOUtils.closeQuietly(ozoneClients); + ozoneClients.clear(); if (storageContainerLocationClient != null) { storageContainerLocationClient.close(); @@ -274,6 +281,7 @@ static void shutdownCluster() throws IOException { } private static void setOzClient(OzoneClient ozClient) { + ozoneClients.add(ozClient); OzoneRpcClientTests.ozClient = ozClient; } @@ -388,10 +396,10 @@ public void testBucketSetOwner() throws IOException { .setVolumeName(volumeName).setBucketName(bucketName) .setStoreType(OzoneObj.StoreType.OZONE) .setResType(OzoneObj.ResourceType.BUCKET).build(); - store.addAcl(volumeObj, new OzoneAcl(USER, "user1", ACCESS, ACLType.ALL)); - store.addAcl(volumeObj, new OzoneAcl(USER, "user2", ACCESS, ACLType.ALL)); - store.addAcl(bucketObj, new OzoneAcl(USER, "user1", ACCESS, ACLType.ALL)); - store.addAcl(bucketObj, new OzoneAcl(USER, "user2", ACCESS, ACLType.ALL)); + store.addAcl(volumeObj, new OzoneAcl(USER, "user1", ACCESS, ALL)); + store.addAcl(volumeObj, new OzoneAcl(USER, "user2", ACCESS, ALL)); + store.addAcl(bucketObj, new OzoneAcl(USER, "user1", ACCESS, ALL)); + store.addAcl(bucketObj, new OzoneAcl(USER, "user2", ACCESS, ALL)); createKeyForUser(volumeName, bucketName, key1, content, user1); createKeyForUser(volumeName, bucketName, key2, content, user2); @@ -619,7 +627,7 @@ public void testDeleteVolume() @Test public void testCreateVolumeWithMetadata() - throws IOException, OzoneClientException { + throws IOException { String volumeName = UUID.randomUUID().toString(); VolumeArgs volumeArgs = VolumeArgs.newBuilder() .addMetadata("key1", "val1") @@ -634,7 +642,7 @@ public void testCreateVolumeWithMetadata() @Test public void testCreateBucketWithMetadata() - throws IOException, OzoneClientException { + throws IOException { String volumeName = UUID.randomUUID().toString(); String bucketName = UUID.randomUUID().toString(); store.createVolume(volumeName); @@ -773,7 +781,7 @@ public void testCreateBucketWithAllArgument() String volumeName = UUID.randomUUID().toString(); String bucketName = UUID.randomUUID().toString(); OzoneAcl userAcl = new OzoneAcl(USER, "test", - ACCESS, ACLType.ALL); + ACCESS, ALL); ReplicationConfig repConfig = new ECReplicationConfig(3, 2); store.createVolume(volumeName); OzoneVolume volume = store.getVolume(volumeName); @@ -812,7 +820,7 @@ public void testAddBucketAcl() OzoneVolume volume = store.getVolume(volumeName); volume.createBucket(bucketName); List acls = new ArrayList<>(); - acls.add(new OzoneAcl(USER, "test", ACCESS, ACLType.ALL)); + acls.add(new OzoneAcl(USER, "test", ACCESS, ALL)); OzoneBucket bucket = volume.getBucket(bucketName); for (OzoneAcl acl : acls) { assertTrue(bucket.addAcl(acl)); @@ -828,7 +836,7 @@ public void testRemoveBucketAcl() String volumeName = UUID.randomUUID().toString(); String bucketName = UUID.randomUUID().toString(); OzoneAcl userAcl = new OzoneAcl(USER, "test", - ACCESS, ACLType.ALL); + ACCESS, ALL); store.createVolume(volumeName); OzoneVolume volume = store.getVolume(volumeName); BucketArgs.Builder builder = BucketArgs.newBuilder() @@ -847,9 +855,9 @@ public void testRemoveBucketAclUsingRpcClientRemoveAcl() String volumeName = UUID.randomUUID().toString(); String bucketName = UUID.randomUUID().toString(); OzoneAcl userAcl = new OzoneAcl(USER, "test", - ACCESS, ACLType.ALL); + ACCESS, ALL); OzoneAcl acl2 = new OzoneAcl(USER, "test1", - ACCESS, ACLType.ALL); + ACCESS, ALL); store.createVolume(volumeName); OzoneVolume volume = store.getVolume(volumeName); BucketArgs.Builder builder = BucketArgs.newBuilder() @@ -907,6 +915,64 @@ public void testAclsAfterCallingSetBucketProperty() throws Exception { } + @Test + public void testAclDeDuplication() + throws IOException { + String volumeName = UUID.randomUUID().toString(); + String bucketName = UUID.randomUUID().toString(); + OzoneAcl userAcl1 = new OzoneAcl(USER, "test", DEFAULT, READ); + UserGroupInformation currentUser = UserGroupInformation.getCurrentUser(); + OzoneAcl currentUserAcl = new OzoneAcl(USER, currentUser.getShortUserName(), ACCESS, ALL); + OzoneAcl currentUserPrimaryGroupAcl = new OzoneAcl(GROUP, currentUser.getPrimaryGroupName(), ACCESS, READ, LIST); + VolumeArgs createVolumeArgs = VolumeArgs.newBuilder() + .setOwner(currentUser.getShortUserName()) + .setAdmin(currentUser.getShortUserName()) + .addAcl(userAcl1) + .addAcl(currentUserAcl) + .addAcl(currentUserPrimaryGroupAcl) + .build(); + + store.createVolume(volumeName, createVolumeArgs); + OzoneVolume volume = store.getVolume(volumeName); + List volumeAcls = volume.getAcls(); + assertEquals(3, volumeAcls.size()); + assertTrue(volumeAcls.contains(userAcl1)); + assertTrue(volumeAcls.contains(currentUserAcl)); + assertTrue(volumeAcls.contains(currentUserPrimaryGroupAcl)); + + // normal bucket + BucketArgs.Builder builder = BucketArgs.newBuilder() + .addAcl(currentUserAcl).addAcl(currentUserPrimaryGroupAcl); + volume.createBucket(bucketName, builder.build()); + OzoneBucket bucket = volume.getBucket(bucketName); + List bucketAcls = bucket.getAcls(); + assertEquals(bucketName, bucket.getName()); + assertEquals(3, bucketAcls.size()); + assertTrue(bucketAcls.contains(currentUserAcl)); + assertTrue(bucketAcls.contains(currentUserPrimaryGroupAcl)); + assertTrue(bucketAcls.get(2).getName().equals(userAcl1.getName())); + assertTrue(bucketAcls.get(2).getAclList().equals(userAcl1.getAclList())); + assertTrue(bucketAcls.get(2).getAclScope().equals(ACCESS)); + + // link bucket + OzoneAcl userAcl2 = new OzoneAcl(USER, "test-link", DEFAULT, READ); + String linkBucketName = "link-" + bucketName; + builder = BucketArgs.newBuilder().setSourceVolume(volumeName).setSourceBucket(bucketName) + .addAcl(currentUserAcl).addAcl(currentUserPrimaryGroupAcl).addAcl(userAcl2); + volume.createBucket(linkBucketName, builder.build()); + OzoneBucket linkBucket = volume.getBucket(linkBucketName); + List linkBucketAcls = linkBucket.getAcls(); + assertEquals(linkBucketName, linkBucket.getName()); + assertEquals(5, linkBucketAcls.size()); + assertTrue(linkBucketAcls.contains(currentUserAcl)); + assertTrue(linkBucketAcls.contains(currentUserPrimaryGroupAcl)); + assertTrue(linkBucketAcls.contains(userAcl2)); + assertTrue(linkBucketAcls.contains(OzoneAcl.LINK_BUCKET_DEFAULT_ACL)); + assertTrue(linkBucketAcls.get(4).getName().equals(userAcl1.getName())); + assertTrue(linkBucketAcls.get(4).getAclList().equals(userAcl1.getAclList())); + assertTrue(linkBucketAcls.get(4).getAclScope().equals(ACCESS)); + } + @Test public void testSetBucketStorageType() throws IOException { @@ -3023,10 +3089,10 @@ public void testMultipartUploadWithACL() throws Exception { OzoneBucket bucket = volume.getBucket(bucketName); // Add ACL on Bucket - OzoneAcl acl1 = new OzoneAcl(USER, "Monday", DEFAULT, ACLType.ALL); - OzoneAcl acl2 = new OzoneAcl(USER, "Friday", DEFAULT, ACLType.ALL); - OzoneAcl acl3 = new OzoneAcl(USER, "Jan", ACCESS, ACLType.ALL); - OzoneAcl acl4 = new OzoneAcl(USER, "Feb", ACCESS, ACLType.ALL); + OzoneAcl acl1 = new OzoneAcl(USER, "Monday", DEFAULT, ALL); + OzoneAcl acl2 = new OzoneAcl(USER, "Friday", DEFAULT, ALL); + OzoneAcl acl3 = new OzoneAcl(USER, "Jan", ACCESS, ALL); + OzoneAcl acl4 = new OzoneAcl(USER, "Feb", ACCESS, ALL); bucket.addAcl(acl1); bucket.addAcl(acl2); bucket.addAcl(acl3); @@ -3140,6 +3206,37 @@ void testMultipartUploadOverride(ReplicationConfig replication) doMultipartUpload(bucket, keyName, (byte)97, replication); } + + /** + * This test prints out that there is a memory leak in the test logs + * which during post-processing is caught by the CI thereby failing the + * CI run. Hence, disabling this for CI. + */ + @Unhealthy + public void testClientLeakDetector() throws Exception { + OzoneClient client = OzoneClientFactory.getRpcClient(cluster.getConf()); + String volumeName = UUID.randomUUID().toString(); + String bucketName = UUID.randomUUID().toString(); + String keyName = UUID.randomUUID().toString(); + GenericTestUtils.LogCapturer ozoneClientFactoryLogCapturer = + GenericTestUtils.LogCapturer.captureLogs( + OzoneClientFactory.getLogger()); + + client.getObjectStore().createVolume(volumeName); + OzoneVolume volume = client.getObjectStore().getVolume(volumeName); + volume.createBucket(bucketName); + OzoneBucket bucket = volume.getBucket(bucketName); + byte[] data = new byte[10]; + Arrays.fill(data, (byte) 1); + try (OzoneOutputStream out = bucket.createKey(keyName, 10, + ReplicationConfig.fromTypeAndFactor(RATIS, ONE), new HashMap<>())) { + out.write(data); + } + client = null; + System.gc(); + GenericTestUtils.waitFor(() -> ozoneClientFactoryLogCapturer.getOutput() + .contains("is not closed properly"), 100, 2000); + } @Test public void testMultipartUploadOwner() throws Exception { // Save the old user, and switch to the old user after test @@ -3166,10 +3263,10 @@ public void testMultipartUploadOwner() throws Exception { .setVolumeName(volumeName).setBucketName(bucketName) .setStoreType(OzoneObj.StoreType.OZONE) .setResType(OzoneObj.ResourceType.BUCKET).build(); - store.addAcl(volumeObj, new OzoneAcl(USER, "user1", ACCESS, ACLType.ALL)); - store.addAcl(volumeObj, new OzoneAcl(USER, "awsUser1", ACCESS, ACLType.ALL)); - store.addAcl(bucketObj, new OzoneAcl(USER, "user1", ACCESS, ACLType.ALL)); - store.addAcl(bucketObj, new OzoneAcl(USER, "awsUser1", ACCESS, ACLType.ALL)); + store.addAcl(volumeObj, new OzoneAcl(USER, "user1", ACCESS, ALL)); + store.addAcl(volumeObj, new OzoneAcl(USER, "awsUser1", ACCESS, ALL)); + store.addAcl(bucketObj, new OzoneAcl(USER, "user1", ACCESS, ALL)); + store.addAcl(bucketObj, new OzoneAcl(USER, "awsUser1", ACCESS, ALL)); // user1 MultipartUpload a key UserGroupInformation.setLoginUser(user1); @@ -3906,7 +4003,7 @@ public void testNativeAclsForPrefix() throws Exception { aclsGet = store.getAcl(prefixObj); assertEquals(0, aclsGet.size()); - OzoneAcl group1Acl = new OzoneAcl(GROUP, "group1", ACCESS, ACLType.ALL); + OzoneAcl group1Acl = new OzoneAcl(GROUP, "group1", ACCESS, ALL); List acls = new ArrayList<>(); acls.add(user1Acl); acls.add(group1Acl); @@ -3943,14 +4040,12 @@ private List getAclList(OzoneConfiguration conf) //User ACL UserGroupInformation ugi = UserGroupInformation.getCurrentUser(); OzoneAclConfig aclConfig = conf.getObject(OzoneAclConfig.class); - ACLType userRights = aclConfig.getUserDefaultRights(); - ACLType groupRights = aclConfig.getGroupDefaultRights(); - - listOfAcls.add(new OzoneAcl(USER, ugi.getUserName(), ACCESS, userRights)); - //Group ACLs of the User - List userGroups = Arrays.asList(ugi.getGroupNames()); - userGroups.stream().forEach((group) -> listOfAcls.add( - new OzoneAcl(GROUP, group, ACCESS, groupRights))); + ACLType[] userRights = aclConfig.getUserDefaultRights(); + ACLType[] groupRights = aclConfig.getGroupDefaultRights(); + + listOfAcls.add(new OzoneAcl(USER, ugi.getShortUserName(), ACCESS, userRights)); + //Group ACL of the User + listOfAcls.add(new OzoneAcl(GROUP, ugi.getPrimaryGroupName(), ACCESS, groupRights)); return listOfAcls; } @@ -4019,7 +4114,7 @@ private void validateOzoneAccessAcl(OzoneObj ozObj) throws IOException { OzoneAcl ua = new OzoneAcl(USER, "userx", ACCESS, ACLType.READ_ACL); OzoneAcl ug = new OzoneAcl(GROUP, "userx", - ACCESS, ACLType.ALL); + ACCESS, ALL); store.setAcl(ozObj, Arrays.asList(ua, ug)); newAcls = store.getAcl(ozObj); assertEquals(2, newAcls.size()); @@ -4774,17 +4869,13 @@ void testMultiPartUploadWithStream(ReplicationConfig replicationConfig) } @Test - public void testUploadWithStreamAndMemoryMappedBuffer() throws IOException { - // create a local dir - final String dir = GenericTestUtils.getTempPath( - getClass().getSimpleName()); - GenericTestUtils.assertDirCreation(new File(dir)); + public void testUploadWithStreamAndMemoryMappedBuffer(@TempDir Path dir) throws IOException { // create a local file final int chunkSize = 1024; final byte[] data = new byte[8 * chunkSize]; ThreadLocalRandom.current().nextBytes(data); - final File file = new File(dir, "data"); + final File file = new File(dir.toString(), "data"); try (FileOutputStream out = new FileOutputStream(file)) { out.write(data); } @@ -4955,4 +5046,136 @@ public void reset() throws IOException { init(); } } + + @ParameterizedTest + @MethodSource("bucketLayouts") + public void testPutObjectTagging(BucketLayout bucketLayout) throws Exception { + String volumeName = UUID.randomUUID().toString(); + String bucketName = UUID.randomUUID().toString(); + + String value = "sample value"; + store.createVolume(volumeName); + OzoneVolume volume = store.getVolume(volumeName); + BucketArgs bucketArgs = + BucketArgs.newBuilder().setBucketLayout(bucketLayout).build(); + volume.createBucket(bucketName, bucketArgs); + OzoneBucket bucket = volume.getBucket(bucketName); + + String keyName = UUID.randomUUID().toString(); + + OzoneOutputStream out = bucket.createKey(keyName, + value.getBytes(UTF_8).length, anyReplication(), new HashMap<>()); + out.write(value.getBytes(UTF_8)); + out.close(); + + OzoneKey key = bucket.getKey(keyName); + assertTrue(key.getTags().isEmpty()); + + Map tags = new HashMap<>(); + tags.put("tag-key-1", "tag-value-1"); + tags.put("tag-key-2", "tag-value-2"); + + bucket.putObjectTagging(keyName, tags); + + OzoneKey updatedKey = bucket.getKey(keyName); + assertEquals(tags.size(), updatedKey.getTags().size()); + assertEquals(key.getModificationTime(), updatedKey.getModificationTime()); + assertThat(updatedKey.getTags()).containsAllEntriesOf(tags); + + // Do another putObjectTagging, it should override the previous one + Map secondTags = new HashMap<>(); + secondTags.put("tag-key-3", "tag-value-3"); + + bucket.putObjectTagging(keyName, secondTags); + + updatedKey = bucket.getKey(keyName); + assertEquals(secondTags.size(), updatedKey.getTags().size()); + assertThat(updatedKey.getTags()).containsAllEntriesOf(secondTags); + assertThat(updatedKey.getTags()).doesNotContainKeys("tag-key-1", "tag-key-2"); + + if (bucketLayout.equals(BucketLayout.FILE_SYSTEM_OPTIMIZED)) { + String dirKey = "dir1/"; + bucket.createDirectory(dirKey); + OMException exception = assertThrows(OMException.class, + () -> bucket.putObjectTagging(dirKey, tags)); + assertThat(exception.getResult()).isEqualTo(ResultCodes.NOT_SUPPORTED_OPERATION); + } + } + + @ParameterizedTest + @MethodSource("bucketLayouts") + public void testDeleteObjectTagging(BucketLayout bucketLayout) throws Exception { + String volumeName = UUID.randomUUID().toString(); + String bucketName = UUID.randomUUID().toString(); + + String value = "sample value"; + store.createVolume(volumeName); + OzoneVolume volume = store.getVolume(volumeName); + BucketArgs bucketArgs = + BucketArgs.newBuilder().setBucketLayout(bucketLayout).build(); + volume.createBucket(bucketName, bucketArgs); + OzoneBucket bucket = volume.getBucket(bucketName); + + String keyName = UUID.randomUUID().toString(); + + Map tags = new HashMap<>(); + tags.put("tag-key-1", "tag-value-1"); + tags.put("tag-key-2", "tag-value-2"); + + OzoneOutputStream out = bucket.createKey(keyName, + value.getBytes(UTF_8).length, anyReplication(), new HashMap<>(), tags); + out.write(value.getBytes(UTF_8)); + out.close(); + + OzoneKey key = bucket.getKey(keyName); + assertFalse(key.getTags().isEmpty()); + + bucket.deleteObjectTagging(keyName); + + OzoneKey updatedKey = bucket.getKey(keyName); + assertEquals(0, updatedKey.getTags().size()); + assertEquals(key.getModificationTime(), updatedKey.getModificationTime()); + + if (bucketLayout.equals(BucketLayout.FILE_SYSTEM_OPTIMIZED)) { + String dirKey = "dir1/"; + bucket.createDirectory(dirKey); + OMException exception = assertThrows(OMException.class, + () -> bucket.deleteObjectTagging(dirKey)); + assertThat(exception.getResult()).isEqualTo(ResultCodes.NOT_SUPPORTED_OPERATION); + } + } + + @ParameterizedTest + @MethodSource("bucketLayouts") + public void testGetObjectTagging(BucketLayout bucketLayout) throws Exception { + String volumeName = UUID.randomUUID().toString(); + String bucketName = UUID.randomUUID().toString(); + + String value = "sample value"; + store.createVolume(volumeName); + OzoneVolume volume = store.getVolume(volumeName); + BucketArgs bucketArgs = + BucketArgs.newBuilder().setBucketLayout(bucketLayout).build(); + volume.createBucket(bucketName, bucketArgs); + OzoneBucket bucket = volume.getBucket(bucketName); + + String keyName = UUID.randomUUID().toString(); + + Map tags = new HashMap<>(); + tags.put("tag-key-1", "tag-value-1"); + tags.put("tag-key-2", "tag-value-2"); + + OzoneOutputStream out = bucket.createKey(keyName, + value.getBytes(UTF_8).length, anyReplication(), new HashMap<>(), tags); + out.write(value.getBytes(UTF_8)); + out.close(); + + OzoneKey key = bucket.getKey(keyName); + assertEquals(tags.size(), key.getTags().size()); + + Map tagsRetrieved = bucket.getObjectTagging(keyName); + + assertEquals(tags.size(), tagsRetrieved.size()); + assertThat(tagsRetrieved).containsAllEntriesOf(tags); + } } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBCSID.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBCSID.java index d668bb4b652..c66ca2931bd 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBCSID.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBCSID.java @@ -32,13 +32,11 @@ import org.apache.hadoop.ozone.om.helpers.OmKeyArgs; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo; -import org.apache.ozone.test.GenericTestUtils; import org.junit.jupiter.api.AfterAll; import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.Timeout; -import java.io.File; import java.io.IOException; import java.util.HashMap; import java.util.List; @@ -75,10 +73,6 @@ public class TestBCSID { */ @BeforeAll public static void init() throws Exception { - String path = GenericTestUtils - .getTempPath(TestBCSID.class.getSimpleName()); - File baseDir = new File(path); - baseDir.mkdirs(); conf.setTimeDuration(HDDS_CONTAINER_REPORT_INTERVAL, 200, TimeUnit.MILLISECONDS); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBlockOutputStream.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBlockOutputStream.java index eb3709c9a85..63692c0dfc7 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBlockOutputStream.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBlockOutputStream.java @@ -49,6 +49,7 @@ import org.apache.hadoop.ozone.client.io.OzoneOutputStream; import org.apache.hadoop.ozone.container.TestHelper; +import org.apache.ozone.test.tag.Flaky; import org.junit.jupiter.api.AfterAll; import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.TestInstance; @@ -274,6 +275,7 @@ void testWriteLessThanChunkSize(boolean flushDelay, boolean enablePiggybacking) @ParameterizedTest @MethodSource("clientParameters") + @Flaky("HDDS-11564") void testWriteExactlyFlushSize(boolean flushDelay, boolean enablePiggybacking) throws Exception { OzoneClientConfig config = newClientConfig(cluster.getConf(), flushDelay, enablePiggybacking); try (OzoneClient client = newClient(cluster.getConf(), config)) { @@ -481,6 +483,7 @@ void testWriteMoreThanChunkSize(boolean flushDelay, boolean enablePiggybacking) @ParameterizedTest @MethodSource("clientParameters") + @Flaky("HDDS-11564") void testWriteMoreThanFlushSize(boolean flushDelay, boolean enablePiggybacking) throws Exception { OzoneClientConfig config = newClientConfig(cluster.getConf(), flushDelay, enablePiggybacking); try (OzoneClient client = newClient(cluster.getConf(), config)) { @@ -571,6 +574,7 @@ void testWriteMoreThanFlushSize(boolean flushDelay, boolean enablePiggybacking) @ParameterizedTest @MethodSource("clientParameters") + @Flaky("HDDS-11564") void testWriteExactlyMaxFlushSize(boolean flushDelay, boolean enablePiggybacking) throws Exception { OzoneClientConfig config = newClientConfig(cluster.getConf(), flushDelay, enablePiggybacking); try (OzoneClient client = newClient(cluster.getConf(), config)) { @@ -665,6 +669,7 @@ void testWriteExactlyMaxFlushSize(boolean flushDelay, boolean enablePiggybacking @ParameterizedTest @MethodSource("clientParameters") + @Flaky("HDDS-11564") void testWriteMoreThanMaxFlushSize(boolean flushDelay, boolean enablePiggybacking) throws Exception { OzoneClientConfig config = newClientConfig(cluster.getConf(), flushDelay, enablePiggybacking); try (OzoneClient client = newClient(cluster.getConf(), config)) { diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBlockOutputStreamWithFailures.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBlockOutputStreamWithFailures.java index 5e5461634c0..010bd93834b 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBlockOutputStreamWithFailures.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBlockOutputStreamWithFailures.java @@ -272,6 +272,7 @@ void testWatchForCommitDatanodeFailure(boolean flushDelay, boolean enablePiggyba @ParameterizedTest @MethodSource("clientParameters") + @Flaky("HDDS-11849") void test2DatanodesFailure(boolean flushDelay, boolean enablePiggybacking) throws Exception { OzoneClientConfig config = newClientConfig(cluster.getConf(), flushDelay, enablePiggybacking); try (OzoneClient client = newClient(cluster.getConf(), config)) { diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerReplicationEndToEnd.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerReplicationEndToEnd.java index 78a4e78647e..8d894471289 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerReplicationEndToEnd.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerReplicationEndToEnd.java @@ -49,7 +49,6 @@ import org.junit.jupiter.api.Test; import org.slf4j.LoggerFactory; -import java.io.File; import java.io.IOException; import java.time.Duration; import java.util.HashMap; @@ -80,7 +79,6 @@ public class TestContainerReplicationEndToEnd { private static ObjectStore objectStore; private static String volumeName; private static String bucketName; - private static String path; private static XceiverClientManager xceiverClientManager; private static long containerReportInterval; @@ -92,10 +90,7 @@ public class TestContainerReplicationEndToEnd { @BeforeAll public static void init() throws Exception { conf = new OzoneConfiguration(); - path = GenericTestUtils - .getTempPath(TestContainerStateMachineFailures.class.getSimpleName()); - File baseDir = new File(path); - baseDir.mkdirs(); + containerReportInterval = 2000; conf.setTimeDuration(HDDS_CONTAINER_REPORT_INTERVAL, diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachine.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachine.java index 3f1c31edfe7..dc00b0acc55 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachine.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachine.java @@ -47,7 +47,6 @@ import org.apache.hadoop.ozone.container.common.transport.server.ratis.RatisServerConfiguration; import org.apache.hadoop.ozone.om.OzoneManager; import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo; -import org.apache.ozone.test.GenericTestUtils; import static java.nio.charset.StandardCharsets.UTF_8; import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_BLOCK_TOKEN_ENABLED; @@ -78,7 +77,6 @@ public class TestContainerStateMachine { private ObjectStore objectStore; private String volumeName; private String bucketName; - private String path; /** * Create a MiniDFSCluster for testing. @@ -87,10 +85,6 @@ public class TestContainerStateMachine { */ @BeforeEach public void setup() throws Exception { - path = GenericTestUtils - .getTempPath(TestContainerStateMachine.class.getSimpleName()); - File baseDir = new File(path); - baseDir.mkdirs(); conf.setInt(ScmConfigKeys.OZONE_DATANODE_PIPELINE_LIMIT, 1); conf.setTimeDuration(HDDS_HEARTBEAT_INTERVAL, 200, TimeUnit.MILLISECONDS); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachineFailureOnRead.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachineFailureOnRead.java index eea068a8742..f351ad8927a 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachineFailureOnRead.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachineFailureOnRead.java @@ -46,7 +46,6 @@ import org.apache.hadoop.ozone.client.io.OzoneOutputStream; import org.apache.hadoop.ozone.container.ozoneimpl.TestOzoneContainer; import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo; -import org.apache.ozone.test.GenericTestUtils; import static java.nio.charset.StandardCharsets.UTF_8; import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_COMMAND_STATUS_REPORT_INTERVAL; @@ -80,10 +79,6 @@ public class TestContainerStateMachineFailureOnRead { @BeforeEach public void setup() throws Exception { conf = new OzoneConfiguration(); - String path = GenericTestUtils - .getTempPath(TestContainerStateMachineFailures.class.getSimpleName()); - File baseDir = new File(path); - baseDir.mkdirs(); conf.setTimeDuration(HDDS_CONTAINER_REPORT_INTERVAL, 200, TimeUnit.MILLISECONDS); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachineFailures.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachineFailures.java index b6eaca8e80d..e3759521c82 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachineFailures.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachineFailures.java @@ -19,6 +19,7 @@ import java.io.File; import java.io.IOException; +import java.io.UncheckedIOException; import java.nio.charset.StandardCharsets; import java.nio.file.Path; import java.time.Duration; @@ -32,6 +33,7 @@ import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicInteger; +import org.apache.commons.io.FileUtils; import org.apache.commons.lang3.ArrayUtils; import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.hdds.HddsUtils; @@ -40,6 +42,7 @@ import org.apache.hadoop.hdds.client.ReplicationType; import org.apache.hadoop.hdds.conf.DatanodeRatisServerConfig; import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.ratis.conf.RatisClientConfig; @@ -50,6 +53,7 @@ import org.apache.hadoop.hdds.scm.client.HddsClientUtils; import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerNotOpenException; import org.apache.hadoop.hdds.scm.pipeline.Pipeline; +import org.apache.hadoop.hdds.utils.HddsServerUtil; import org.apache.hadoop.hdds.utils.IOUtils; import org.apache.hadoop.ozone.HddsDatanodeService; import org.apache.hadoop.ozone.MiniOzoneCluster; @@ -264,6 +268,57 @@ public void testContainerStateMachineCloseOnMissingPipeline() key.close(); } + + @Test + public void testContainerStateMachineRestartWithDNChangePipeline() + throws Exception { + try (OzoneOutputStream key = objectStore.getVolume(volumeName).getBucket(bucketName) + .createKey("testDNRestart", 1024, ReplicationConfig.fromTypeAndFactor(ReplicationType.RATIS, + ReplicationFactor.THREE), new HashMap<>())) { + key.write("ratis".getBytes(UTF_8)); + key.flush(); + + KeyOutputStream groupOutputStream = (KeyOutputStream) key. + getOutputStream(); + List locationInfoList = + groupOutputStream.getLocationInfoList(); + assertEquals(1, locationInfoList.size()); + + OmKeyLocationInfo omKeyLocationInfo = locationInfoList.get(0); + Pipeline pipeline = omKeyLocationInfo.getPipeline(); + List datanodes = + new ArrayList<>(TestHelper.getDatanodeServices(cluster, + pipeline)); + + DatanodeDetails dn = datanodes.get(0).getDatanodeDetails(); + + // Delete all data volumes. + cluster.getHddsDatanode(dn).getDatanodeStateMachine().getContainer().getVolumeSet().getVolumesList() + .stream().forEach(v -> { + try { + FileUtils.deleteDirectory(v.getStorageDir()); + } catch (IOException e) { + throw new RuntimeException(e); + } + }); + + // Delete datanode.id datanodeIdFile. + File datanodeIdFile = new File(HddsServerUtil.getDatanodeIdFilePath(cluster.getHddsDatanode(dn).getConf())); + boolean deleted = datanodeIdFile.delete(); + assertTrue(deleted); + cluster.restartHddsDatanode(dn, false); + GenericTestUtils.waitFor(() -> { + try { + key.write("ratis".getBytes(UTF_8)); + key.flush(); + return groupOutputStream.getLocationInfoList().size() > 1; + } catch (IOException e) { + throw new UncheckedIOException(e); + } + }, 1000, 30000); + } + } + @Test public void testContainerStateMachineFailures() throws Exception { OzoneOutputStream key = diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachineFlushDelay.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachineFlushDelay.java index 229059d84ad..b59d4885d71 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachineFlushDelay.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachineFlushDelay.java @@ -39,7 +39,6 @@ import org.apache.hadoop.ozone.container.ContainerTestHelper; import org.apache.hadoop.ozone.om.OzoneManager; import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo; -import org.apache.ozone.test.GenericTestUtils; import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; @@ -72,7 +71,6 @@ public class TestContainerStateMachineFlushDelay { private ObjectStore objectStore; private String volumeName; private String bucketName; - private String path; private int chunkSize; private int flushSize; private int maxFlushSize; @@ -91,10 +89,6 @@ public void setup() throws Exception { maxFlushSize = 2 * flushSize; blockSize = 2 * maxFlushSize; keyString = UUID.randomUUID().toString(); - path = GenericTestUtils - .getTempPath(TestContainerStateMachineFlushDelay.class.getSimpleName()); - File baseDir = new File(path); - baseDir.mkdirs(); conf.setBoolean(HDDS_BLOCK_TOKEN_ENABLED, true); // conf.setBoolean(OZONE_SECURITY_ENABLED_KEY, true); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestDeleteWithInAdequateDN.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestDeleteWithInAdequateDN.java index d4ff8573627..bb42d8a0f57 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestDeleteWithInAdequateDN.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestDeleteWithInAdequateDN.java @@ -17,7 +17,6 @@ package org.apache.hadoop.ozone.client.rpc; -import java.io.File; import java.io.IOException; import java.time.Duration; import java.util.HashMap; @@ -60,7 +59,6 @@ import org.apache.hadoop.ozone.om.helpers.OmKeyArgs; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo; -import org.apache.ozone.test.GenericTestUtils; import static java.nio.charset.StandardCharsets.UTF_8; import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_COMMAND_STATUS_REPORT_INTERVAL; @@ -91,7 +89,6 @@ public class TestDeleteWithInAdequateDN { private static ObjectStore objectStore; private static String volumeName; private static String bucketName; - private static String path; private static XceiverClientManager xceiverClientManager; private static final int FACTOR_THREE_PIPELINE_COUNT = 1; @@ -105,10 +102,6 @@ public static void init() throws Exception { final int numOfDatanodes = 3; conf = new OzoneConfiguration(); - path = GenericTestUtils - .getTempPath(TestContainerStateMachineFailures.class.getSimpleName()); - File baseDir = new File(path); - baseDir.mkdirs(); conf.setTimeDuration(HDDS_HEARTBEAT_INTERVAL, 100, TimeUnit.MILLISECONDS); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestECKeyOutputStream.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestECKeyOutputStream.java index c5147ecfb01..bd1fbe7382a 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestECKeyOutputStream.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestECKeyOutputStream.java @@ -17,15 +17,575 @@ package org.apache.hadoop.ozone.client.rpc; +import org.apache.commons.lang3.NotImplementedException; +import org.apache.hadoop.hdds.HddsConfigKeys; +import org.apache.hadoop.hdds.client.DefaultReplicationConfig; +import org.apache.hadoop.hdds.client.ECReplicationConfig; +import org.apache.hadoop.hdds.client.RatisReplicationConfig; +import org.apache.hadoop.hdds.client.ReplicationConfig; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.conf.StorageUnit; +import org.apache.hadoop.hdds.protocol.DatanodeDetails; +import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos; +import org.apache.hadoop.hdds.scm.OzoneClientConfig; +import org.apache.hadoop.hdds.scm.ScmConfigKeys; +import org.apache.hadoop.hdds.scm.cli.ContainerOperationClient; +import org.apache.hadoop.hdds.scm.container.ContainerID; +import org.apache.hadoop.hdds.scm.container.ContainerNotFoundException; +import org.apache.hadoop.hdds.scm.pipeline.Pipeline; +import org.apache.hadoop.hdds.scm.pipeline.PipelineManager; +import org.apache.hadoop.hdds.utils.IOUtils; +import org.apache.hadoop.ozone.ClientConfigForTesting; +import org.apache.hadoop.ozone.MiniOzoneCluster; +import org.apache.hadoop.ozone.OzoneConfigKeys; +import org.apache.hadoop.ozone.client.BucketArgs; +import org.apache.hadoop.ozone.client.ObjectStore; +import org.apache.hadoop.ozone.client.OzoneBucket; +import org.apache.hadoop.ozone.client.OzoneClient; +import org.apache.hadoop.ozone.client.OzoneClientFactory; +import org.apache.hadoop.ozone.client.OzoneKey; +import org.apache.hadoop.ozone.client.OzoneKeyDetails; +import org.apache.hadoop.ozone.client.OzoneVolume; +import org.apache.hadoop.ozone.client.io.ECKeyOutputStream; +import org.apache.hadoop.ozone.client.io.KeyOutputStream; +import org.apache.hadoop.ozone.client.io.OzoneInputStream; +import org.apache.hadoop.ozone.client.io.OzoneOutputStream; +import org.apache.hadoop.ozone.container.TestHelper; +import org.apache.hadoop.ozone.container.common.interfaces.Handler; +import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo; +import org.apache.ozone.test.GenericTestUtils; +import org.apache.ozone.test.tag.Unhealthy; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Test; +import org.mockito.MockedStatic; +import org.mockito.Mockito; + +import java.io.IOException; +import java.util.Arrays; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Random; +import java.util.UUID; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; +import java.util.concurrent.atomic.AtomicReference; + +import static java.nio.charset.StandardCharsets.UTF_8; +import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_DEADNODE_INTERVAL; +import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_STALENODE_INTERVAL; +import static org.assertj.core.api.Assertions.assertThat; +import static org.junit.jupiter.api.Assertions.assertArrayEquals; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertInstanceOf; +import static org.junit.jupiter.api.Assertions.assertNotEquals; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.api.Assertions.fail; +import static org.mockito.ArgumentMatchers.any; /** - * Tests key output stream without zero-copy enabled. + * Tests key output stream. */ -public class TestECKeyOutputStream extends - AbstractTestECKeyOutputStream { +public class TestECKeyOutputStream { + private static MiniOzoneCluster cluster; + private static OzoneConfiguration conf = new OzoneConfiguration(); + private static OzoneClient client; + private static ObjectStore objectStore; + private static int chunkSize; + private static int flushSize; + private static int maxFlushSize; + private static int blockSize; + private static String volumeName; + private static String bucketName; + private static String keyString; + private static int dataBlocks = 3; + private static int inputSize = dataBlocks * chunkSize; + private static byte[][] inputChunks = new byte[dataBlocks][chunkSize]; + + private static void initConf(OzoneConfiguration configuration) { + OzoneClientConfig clientConfig = configuration.getObject(OzoneClientConfig.class); + clientConfig.setChecksumType(ContainerProtos.ChecksumType.NONE); + clientConfig.setStreamBufferFlushDelay(false); + configuration.setFromObject(clientConfig); + + // If SCM detects dead node too quickly, then container would be moved to + // closed state and all in progress writes will get exception. To avoid + // that, we are just keeping higher timeout and none of the tests depending + // on deadnode detection timeout currently. + configuration.setTimeDuration(OZONE_SCM_STALENODE_INTERVAL, 30, TimeUnit.SECONDS); + configuration.setTimeDuration(OZONE_SCM_DEADNODE_INTERVAL, 60, TimeUnit.SECONDS); + configuration.setTimeDuration("hdds.ratis.raft.server.rpc.slowness.timeout", 300, + TimeUnit.SECONDS); + configuration.set("ozone.replication.allowed-configs", "(^((STANDALONE|RATIS)/(ONE|THREE))|(EC/(3-2|6-3|10-4)-" + + "(512|1024|2048|4096|1)k)$)"); + configuration.setTimeDuration( + "hdds.ratis.raft.server.notification.no-leader.timeout", 300, + TimeUnit.SECONDS); + configuration.setQuietMode(false); + configuration.setStorageSize(OzoneConfigKeys.OZONE_SCM_BLOCK_SIZE, 4, + StorageUnit.MB); + configuration.setTimeDuration(HddsConfigKeys.HDDS_HEARTBEAT_INTERVAL, 500, + TimeUnit.MILLISECONDS); + configuration.setTimeDuration(HddsConfigKeys.HDDS_CONTAINER_REPORT_INTERVAL, 1, + TimeUnit.SECONDS); + configuration.setInt(ScmConfigKeys.OZONE_SCM_RATIS_PIPELINE_LIMIT, 10); + // "Enable" hsync to verify that hsync would be blocked by ECKeyOutputStream + configuration.setBoolean(OzoneConfigKeys.OZONE_HBASE_ENHANCEMENTS_ALLOWED, true); + configuration.setBoolean("ozone.client.hbase.enhancements.allowed", true); + configuration.setBoolean(OzoneConfigKeys.OZONE_FS_HSYNC_ENABLED, true); + + ClientConfigForTesting.newBuilder(StorageUnit.BYTES) + .setBlockSize(blockSize) + .setChunkSize(chunkSize) + .setStreamBufferFlushSize(flushSize) + .setStreamBufferMaxSize(maxFlushSize) + .applyTo(configuration); + } + + /** + * Create a MiniDFSCluster for testing. + */ @BeforeAll - public static void init() throws Exception { - init(false); + protected static void init() throws Exception { + chunkSize = 1024 * 1024; + flushSize = 2 * chunkSize; + maxFlushSize = 2 * flushSize; + blockSize = 2 * maxFlushSize; + initConf(conf); + cluster = MiniOzoneCluster.newBuilder(conf) + .setNumDatanodes(10) + .build(); + cluster.waitForClusterToBeReady(); + client = OzoneClientFactory.getRpcClient(conf); + objectStore = client.getObjectStore(); + keyString = UUID.randomUUID().toString(); + volumeName = "testeckeyoutputstream"; + bucketName = volumeName; + objectStore.createVolume(volumeName); + objectStore.getVolume(volumeName).createBucket(bucketName); + initInputChunks(); + } + + /** + * Shutdown MiniDFSCluster. + */ + @AfterAll + public static void shutdown() { + IOUtils.closeQuietly(client); + if (cluster != null) { + cluster.shutdown(); + } + } + + @Test + public void testCreateKeyWithECReplicationConfig() throws Exception { + try (OzoneOutputStream key = TestHelper + .createKey(keyString, new ECReplicationConfig(3, 2, + ECReplicationConfig.EcCodec.RS, chunkSize), inputSize, + objectStore, volumeName, bucketName)) { + assertInstanceOf(ECKeyOutputStream.class, key.getOutputStream()); + } + } + + @Test + @Unhealthy("HDDS-11821") + public void testECKeyCreatetWithDatanodeIdChange() + throws Exception { + AtomicReference failed = new AtomicReference<>(false); + AtomicReference miniOzoneCluster = new AtomicReference<>(); + OzoneClient client1 = null; + try (MockedStatic mockedHandler = Mockito.mockStatic(Handler.class, Mockito.CALLS_REAL_METHODS)) { + Map handlers = new HashMap<>(); + mockedHandler.when(() -> Handler + .getHandlerForContainerType(any(), any(), any(), any(), any(), any(), any(), any())) + .thenAnswer(i -> { + Handler handler = Mockito.spy((Handler) i.callRealMethod()); + handlers.put(handler.getDatanodeId(), handler); + return handler; + }); + OzoneConfiguration ozoneConfiguration = new OzoneConfiguration(); + initConf(ozoneConfiguration); + miniOzoneCluster.set(MiniOzoneCluster.newBuilder(ozoneConfiguration).setNumDatanodes(10).build()); + miniOzoneCluster.get().waitForClusterToBeReady(); + client1 = miniOzoneCluster.get().newClient(); + ObjectStore store = client1.getObjectStore(); + store.createVolume(volumeName); + store.getVolume(volumeName).createBucket(bucketName); + OzoneOutputStream key = TestHelper.createKey(keyString, new ECReplicationConfig(3, 2, + ECReplicationConfig.EcCodec.RS, 1024), inputSize, store, volumeName, bucketName); + byte[] b = new byte[6 * 1024]; + ECKeyOutputStream groupOutputStream = (ECKeyOutputStream) key.getOutputStream(); + List locationInfoList = groupOutputStream.getLocationInfoList(); + while (locationInfoList.isEmpty()) { + locationInfoList = groupOutputStream.getLocationInfoList(); + Random random = new Random(); + random.nextBytes(b); + assertInstanceOf(ECKeyOutputStream.class, key.getOutputStream()); + key.write(b); + key.flush(); + } + + assertEquals(1, locationInfoList.size()); + + OmKeyLocationInfo omKeyLocationInfo = locationInfoList.get(0); + long containerId = omKeyLocationInfo.getContainerID(); + Pipeline pipeline = omKeyLocationInfo.getPipeline(); + DatanodeDetails dnWithReplicaIndex1 = + pipeline.getReplicaIndexes().entrySet().stream().filter(e -> e.getValue() == 1).map(Map.Entry::getKey) + .findFirst().get(); + Mockito.when(handlers.get(dnWithReplicaIndex1.getUuidString()).getDatanodeId()) + .thenAnswer(i -> { + if (!failed.get()) { + // Change dnId for one write chunk request. + failed.set(true); + return dnWithReplicaIndex1.getUuidString() + "_failed"; + } else { + return dnWithReplicaIndex1.getUuidString(); + } + }); + locationInfoList = groupOutputStream.getLocationInfoList(); + while (locationInfoList.size() == 1) { + locationInfoList = groupOutputStream.getLocationInfoList(); + Random random = new Random(); + random.nextBytes(b); + assertInstanceOf(ECKeyOutputStream.class, key.getOutputStream()); + key.write(b); + key.flush(); + } + assertEquals(2, locationInfoList.size()); + assertNotEquals(locationInfoList.get(1).getPipeline().getId(), pipeline.getId()); + GenericTestUtils.waitFor(() -> { + try { + return miniOzoneCluster.get().getStorageContainerManager().getContainerManager() + .getContainer(ContainerID.valueOf(containerId)).getState().equals( + HddsProtos.LifeCycleState.CLOSED); + } catch (ContainerNotFoundException e) { + throw new RuntimeException(e); + } + }, 1000, 30000); + key.close(); + Assertions.assertTrue(failed.get()); + } finally { + IOUtils.closeQuietly(client1); + if (miniOzoneCluster.get() != null) { + miniOzoneCluster.get().shutdown(); + } + } } + + @Test + public void testCreateKeyWithOutBucketDefaults() throws Exception { + OzoneVolume volume = objectStore.getVolume(volumeName); + OzoneBucket bucket = volume.getBucket(bucketName); + try (OzoneOutputStream out = bucket.createKey("myKey", inputSize)) { + assertInstanceOf(KeyOutputStream.class, out.getOutputStream()); + for (byte[] inputChunk : inputChunks) { + out.write(inputChunk); + } + } + } + + @Test + public void testCreateKeyWithBucketDefaults() throws Exception { + String myBucket = UUID.randomUUID().toString(); + OzoneVolume volume = objectStore.getVolume(volumeName); + final BucketArgs.Builder bucketArgs = BucketArgs.newBuilder(); + bucketArgs.setDefaultReplicationConfig( + new DefaultReplicationConfig( + new ECReplicationConfig(3, 2, ECReplicationConfig.EcCodec.RS, + chunkSize))); + + volume.createBucket(myBucket, bucketArgs.build()); + OzoneBucket bucket = volume.getBucket(myBucket); + + try (OzoneOutputStream out = bucket.createKey(keyString, inputSize)) { + assertInstanceOf(ECKeyOutputStream.class, out.getOutputStream()); + for (byte[] inputChunk : inputChunks) { + out.write(inputChunk); + } + } + byte[] buf = new byte[chunkSize]; + try (OzoneInputStream in = bucket.readKey(keyString)) { + for (byte[] inputChunk : inputChunks) { + int read = in.read(buf, 0, chunkSize); + assertEquals(chunkSize, read); + assertArrayEquals(buf, inputChunk); + } + } + } + + @Test + public void testOverwriteECKeyWithRatisKey() throws Exception { + String myBucket = UUID.randomUUID().toString(); + OzoneVolume volume = objectStore.getVolume(volumeName); + final BucketArgs.Builder bucketArgs = BucketArgs.newBuilder(); + volume.createBucket(myBucket, bucketArgs.build()); + OzoneBucket bucket = volume.getBucket(myBucket); + createKeyAndCheckReplicationConfig(keyString, bucket, + new ECReplicationConfig(3, 2, ECReplicationConfig.EcCodec.RS, + chunkSize)); + + //Overwrite with RATIS/THREE + createKeyAndCheckReplicationConfig(keyString, bucket, + RatisReplicationConfig.getInstance(HddsProtos.ReplicationFactor.THREE)); + + //Overwrite with RATIS/ONE + createKeyAndCheckReplicationConfig(keyString, bucket, + RatisReplicationConfig.getInstance(HddsProtos.ReplicationFactor.ONE)); + } + + @Test + public void testOverwriteRatisKeyWithECKey() throws Exception { + String myBucket = UUID.randomUUID().toString(); + OzoneVolume volume = objectStore.getVolume(volumeName); + final BucketArgs.Builder bucketArgs = BucketArgs.newBuilder(); + volume.createBucket(myBucket, bucketArgs.build()); + OzoneBucket bucket = volume.getBucket(myBucket); + + createKeyAndCheckReplicationConfig(keyString, bucket, + RatisReplicationConfig.getInstance(HddsProtos.ReplicationFactor.THREE)); + // Overwrite with EC key + createKeyAndCheckReplicationConfig(keyString, bucket, + new ECReplicationConfig(3, 2, ECReplicationConfig.EcCodec.RS, + chunkSize)); + } + + private void createKeyAndCheckReplicationConfig(String keyName, + OzoneBucket bucket, ReplicationConfig replicationConfig) + throws IOException { + try (OzoneOutputStream out = bucket + .createKey(keyName, inputSize, replicationConfig, new HashMap<>())) { + for (byte[] inputChunk : inputChunks) { + out.write(inputChunk); + } + } + OzoneKeyDetails key = bucket.getKey(keyName); + assertEquals(replicationConfig, key.getReplicationConfig()); + } + + @Test + public void testCreateRatisKeyAndWithECBucketDefaults() throws Exception { + OzoneBucket bucket = getOzoneBucket(); + try (OzoneOutputStream out = bucket.createKey( + "testCreateRatisKeyAndWithECBucketDefaults", 2000, + RatisReplicationConfig.getInstance(HddsProtos.ReplicationFactor.THREE), + new HashMap<>())) { + assertInstanceOf(KeyOutputStream.class, out.getOutputStream()); + for (byte[] inputChunk : inputChunks) { + out.write(inputChunk); + } + } + } + + @Test + public void test13ChunksInSingleWriteOp() throws IOException { + testMultipleChunksInSingleWriteOp(13); + } + + @Test + public void testChunksInSingleWriteOpWithOffset() throws IOException { + testMultipleChunksInSingleWriteOp(11, 25, 19); + } + + @Test + public void test15ChunksInSingleWriteOp() throws IOException { + testMultipleChunksInSingleWriteOp(15); + } + + @Test + public void test20ChunksInSingleWriteOp() throws IOException { + testMultipleChunksInSingleWriteOp(20); + } + + @Test + public void test21ChunksInSingleWriteOp() throws IOException { + testMultipleChunksInSingleWriteOp(21); + } + + private void testMultipleChunksInSingleWriteOp(int offset, + int bufferChunks, int numChunks) + throws IOException { + byte[] inputData = getInputBytes(offset, bufferChunks, numChunks); + final OzoneBucket bucket = getOzoneBucket(); + String keyName = + String.format("testMultipleChunksInSingleWriteOpOffset" + + "%dBufferChunks%dNumChunks", offset, bufferChunks, + numChunks); + try (OzoneOutputStream out = bucket.createKey(keyName, 4096, + new ECReplicationConfig(3, 2, ECReplicationConfig.EcCodec.RS, + chunkSize), new HashMap<>())) { + out.write(inputData, offset, numChunks * chunkSize); + } + + validateContent(offset, numChunks * chunkSize, inputData, bucket, + bucket.getKey(keyName)); + } + + private void testMultipleChunksInSingleWriteOp(int numChunks) + throws IOException { + testMultipleChunksInSingleWriteOp(0, numChunks, numChunks); + } + + @Test + public void testECContainerKeysCountAndNumContainerReplicas() + throws IOException, InterruptedException, TimeoutException { + byte[] inputData = getInputBytes(1); + final OzoneBucket bucket = getOzoneBucket(); + ContainerOperationClient containerOperationClient = + new ContainerOperationClient(conf); + + ECReplicationConfig repConfig = new ECReplicationConfig( + 3, 2, ECReplicationConfig.EcCodec.RS, chunkSize); + // Close all EC pipelines so we must get a fresh pipeline and hence + // container for this test. + PipelineManager pm = + cluster.getStorageContainerManager().getPipelineManager(); + for (Pipeline p : pm.getPipelines(repConfig)) { + pm.closePipeline(p, true); + } + + String keyName = UUID.randomUUID().toString(); + try (OzoneOutputStream out = bucket.createKey(keyName, 4096, + repConfig, new HashMap<>())) { + out.write(inputData); + } + OzoneKeyDetails key = bucket.getKey(keyName); + long currentKeyContainerID = + key.getOzoneKeyLocations().get(0).getContainerID(); + + GenericTestUtils.waitFor(() -> { + try { + return (containerOperationClient.getContainer(currentKeyContainerID) + .getNumberOfKeys() == 1) && (containerOperationClient + .getContainerReplicas(currentKeyContainerID).size() == 5); + } catch (IOException exception) { + fail("Unexpected exception " + exception); + return false; + } + }, 100, 10000); + validateContent(inputData, bucket, key); + } + + private void validateContent(byte[] inputData, OzoneBucket bucket, + OzoneKey key) throws IOException { + validateContent(0, inputData.length, inputData, bucket, key); + } + + private void validateContent(int offset, int length, byte[] inputData, + OzoneBucket bucket, + OzoneKey key) throws IOException { + try (OzoneInputStream is = bucket.readKey(key.getName())) { + byte[] fileContent = new byte[length]; + assertEquals(length, is.read(fileContent)); + assertEquals(new String(Arrays.copyOfRange(inputData, offset, + offset + length), UTF_8), + new String(fileContent, UTF_8)); + } + } + + private OzoneBucket getOzoneBucket() throws IOException { + String myBucket = UUID.randomUUID().toString(); + OzoneVolume volume = objectStore.getVolume(volumeName); + final BucketArgs.Builder bucketArgs = BucketArgs.newBuilder(); + bucketArgs.setDefaultReplicationConfig( + new DefaultReplicationConfig( + new ECReplicationConfig(3, 2, ECReplicationConfig.EcCodec.RS, + chunkSize))); + + volume.createBucket(myBucket, bucketArgs.build()); + return volume.getBucket(myBucket); + } + + private static void initInputChunks() { + for (int i = 0; i < dataBlocks; i++) { + inputChunks[i] = getBytesWith(i + 1, chunkSize); + } + } + + private static byte[] getBytesWith(int singleDigitNumber, int total) { + StringBuilder builder = new StringBuilder(singleDigitNumber); + for (int i = 1; i <= total; i++) { + builder.append(singleDigitNumber); + } + return builder.toString().getBytes(UTF_8); + } + + @Test + public void testWriteShouldSucceedWhenDNKilled() throws Exception { + int numChunks = 3; + byte[] inputData = getInputBytes(numChunks); + final OzoneBucket bucket = getOzoneBucket(); + String keyName = "testWriteShouldSucceedWhenDNKilled" + numChunks; + DatanodeDetails nodeToKill = null; + try { + try (OzoneOutputStream out = bucket.createKey(keyName, 1024, + new ECReplicationConfig(3, 2, ECReplicationConfig.EcCodec.RS, + chunkSize), new HashMap<>())) { + ECKeyOutputStream ecOut = (ECKeyOutputStream) out.getOutputStream(); + out.write(inputData); + // Kill a node from first pipeline + nodeToKill = ecOut.getStreamEntries() + .get(0).getPipeline().getFirstNode(); + cluster.shutdownHddsDatanode(nodeToKill); + + out.write(inputData); + + // Wait for flushing thread to finish its work. + final long checkpoint = System.currentTimeMillis(); + ecOut.insertFlushCheckpoint(checkpoint); + GenericTestUtils.waitFor(() -> ecOut.getFlushCheckpoint() == checkpoint, + 100, 10000); + + // Check the second blockGroup pipeline to make sure that the failed + // node is not selected. + assertThat(ecOut.getStreamEntries().get(1).getPipeline().getNodes()) + .doesNotContain(nodeToKill); + } + + try (OzoneInputStream is = bucket.readKey(keyName)) { + // We wrote "inputData" twice, so do two reads and ensure the correct + // data comes back. + for (int i = 0; i < 2; i++) { + byte[] fileContent = new byte[inputData.length]; + assertEquals(inputData.length, is.read(fileContent)); + assertEquals(new String(inputData, UTF_8), + new String(fileContent, UTF_8)); + } + } + } finally { + cluster.restartHddsDatanode(nodeToKill, true); + } + } + + private byte[] getInputBytes(int numChunks) { + return getInputBytes(0, numChunks, numChunks); + } + + private byte[] getInputBytes(int offset, int bufferChunks, int numChunks) { + byte[] inputData = new byte[offset + bufferChunks * chunkSize]; + for (int i = 0; i < numChunks; i++) { + int start = offset + (i * chunkSize); + Arrays.fill(inputData, start, start + chunkSize - 1, + String.valueOf(i % 9).getBytes(UTF_8)[0]); + } + return inputData; + } + + @Test + public void testBlockedHflushAndHsync() throws Exception { + // Expect ECKeyOutputStream hflush and hsync calls to throw exception + try (OzoneOutputStream oOut = TestHelper.createKey( + keyString, new ECReplicationConfig(3, 2, ECReplicationConfig.EcCodec.RS, chunkSize), + inputSize, objectStore, volumeName, bucketName)) { + assertInstanceOf(ECKeyOutputStream.class, oOut.getOutputStream()); + KeyOutputStream kOut = (KeyOutputStream) oOut.getOutputStream(); + + assertThrows(NotImplementedException.class, () -> kOut.hflush()); + assertThrows(NotImplementedException.class, () -> kOut.hsync()); + } + } + } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneAtRestEncryption.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneAtRestEncryption.java index b3d38fe8bc2..5252f49daa3 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneAtRestEncryption.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneAtRestEncryption.java @@ -112,6 +112,7 @@ import org.junit.jupiter.api.AfterAll; import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.io.TempDir; import org.junit.jupiter.params.ParameterizedTest; import org.junit.jupiter.params.provider.EnumSource; @@ -124,7 +125,7 @@ class TestOzoneAtRestEncryption { private static OzoneManager ozoneManager; private static StorageContainerLocationProtocolClientSideTranslatorPB storageContainerLocationClient; - + @TempDir private static File testDir; private static OzoneConfiguration conf; private static final String TEST_KEY = "key1"; @@ -140,9 +141,6 @@ class TestOzoneAtRestEncryption { @BeforeAll static void init() throws Exception { - testDir = GenericTestUtils.getTestDir( - TestSecureOzoneRpcClient.class.getSimpleName()); - File kmsDir = new File(testDir, UUID.randomUUID().toString()); assertTrue(kmsDir.mkdirs()); MiniKMS.Builder miniKMSBuilder = new MiniKMS.Builder(); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientWithKeyLatestVersion.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientWithKeyLatestVersion.java index fd32698eec2..62429368690 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientWithKeyLatestVersion.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientWithKeyLatestVersion.java @@ -31,6 +31,7 @@ import org.apache.hadoop.ozone.client.OzoneClientFactory; import org.apache.hadoop.ozone.client.OzoneVolume; import org.apache.hadoop.ozone.om.helpers.OzoneFileStatus; +import org.apache.hadoop.ozone.om.helpers.OzoneFileStatusLight; import org.junit.jupiter.api.AfterAll; import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.TestInstance; @@ -151,5 +152,11 @@ private void assertListStatus(OzoneBucket bucket, String keyName, List versions = files.get(0).getKeyInfo().getKeyLocationVersions(); assertEquals(expectedVersionCount, versions.size()); + + List lightFiles = bucket.listStatusLight(keyName, false, "", 1); + + assertNotNull(lightFiles); + assertEquals(1, lightFiles.size()); + } } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestSecureOzoneRpcClient.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestSecureOzoneRpcClient.java index b2766599ae4..fc29e031548 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestSecureOzoneRpcClient.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestSecureOzoneRpcClient.java @@ -68,6 +68,7 @@ import org.junit.jupiter.api.AfterAll; import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.io.TempDir; import org.junit.jupiter.params.ParameterizedTest; import org.junit.jupiter.params.provider.ValueSource; @@ -99,15 +100,15 @@ */ class TestSecureOzoneRpcClient extends OzoneRpcClientTests { + @TempDir + private static File testDir; private static String keyProviderUri = "kms://http@kms:9600/kms"; @BeforeAll public static void init() throws Exception { - File testDir = GenericTestUtils.getTestDir( - TestSecureOzoneRpcClient.class.getSimpleName()); OzoneManager.setTestSecureOmFlag(true); OzoneConfiguration conf = new OzoneConfiguration(); - conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, testDir.getAbsolutePath()); + conf.set(OZONE_METADATA_DIRS, testDir.getAbsolutePath()); conf.setInt(ScmConfigKeys.OZONE_SCM_PIPELINE_OWNER_CONTAINER_COUNT, 1); conf.setBoolean(HddsConfigKeys.HDDS_BLOCK_TOKEN_ENABLED, true); conf.set(OZONE_METADATA_DIRS, testDir.getAbsolutePath()); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestBlockDeletion.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestBlockDeletion.java index 5ff8d713649..719715ac8b3 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestBlockDeletion.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestBlockDeletion.java @@ -17,11 +17,18 @@ */ package org.apache.hadoop.ozone.container.common.statemachine.commandhandler; +import java.io.IOException; +import java.time.Duration; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.UUID; +import java.util.concurrent.TimeUnit; import java.util.stream.Stream; -import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos; -import org.apache.hadoop.hdds.scm.container.replication.ReplicationManager; -import org.apache.hadoop.hdds.utils.IOUtils; import org.apache.commons.lang3.RandomStringUtils; import org.apache.hadoop.hdds.client.ECReplicationConfig; import org.apache.hadoop.hdds.client.RatisReplicationConfig; @@ -31,20 +38,23 @@ import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; +import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos; import org.apache.hadoop.hdds.ratis.RatisHelper; import org.apache.hadoop.hdds.scm.ScmConfig; import org.apache.hadoop.hdds.scm.ScmConfigKeys; import org.apache.hadoop.hdds.scm.block.DeletedBlockLogImpl; import org.apache.hadoop.hdds.scm.block.SCMBlockDeletingService; import org.apache.hadoop.hdds.scm.block.ScmBlockDeletingServiceMetrics; -import org.apache.hadoop.hdds.scm.container.ContainerInfo; import org.apache.hadoop.hdds.scm.container.ContainerID; +import org.apache.hadoop.hdds.scm.container.ContainerInfo; import org.apache.hadoop.hdds.scm.container.ContainerNotFoundException; import org.apache.hadoop.hdds.scm.container.ContainerReplica; import org.apache.hadoop.hdds.scm.container.ContainerStateManager; import org.apache.hadoop.hdds.scm.container.replication.LegacyReplicationManager; +import org.apache.hadoop.hdds.scm.container.replication.ReplicationManager; import org.apache.hadoop.hdds.scm.server.StorageContainerManager; import org.apache.hadoop.hdds.server.events.EventQueue; +import org.apache.hadoop.hdds.utils.IOUtils; import org.apache.hadoop.hdds.utils.db.Table; import org.apache.hadoop.ozone.HddsDatanodeService; import org.apache.hadoop.ozone.MiniOzoneCluster; @@ -80,17 +90,6 @@ import org.slf4j.LoggerFactory; import org.slf4j.event.Level; -import java.io.IOException; -import java.time.Duration; -import java.util.HashMap; -import java.util.Map; -import java.util.Set; -import java.util.List; -import java.util.HashSet; -import java.util.ArrayList; -import java.util.UUID; -import java.util.concurrent.TimeUnit; - import static java.lang.Math.max; import static java.nio.charset.StandardCharsets.UTF_8; import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_COMMAND_STATUS_REPORT_INTERVAL; @@ -610,7 +609,7 @@ public void testContainerDeleteWithInvalidKeyCount() final int valueSize = value.getBytes(UTF_8).length; final int keyCount = 1; List containerIdList = new ArrayList<>(); - containerInfos.stream().forEach(container -> { + containerInfos.forEach(container -> { assertEquals(valueSize, container.getUsedBytes()); assertEquals(keyCount, container.getNumberOfKeys()); containerIdList.add(container.getContainerID()); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/TestCSMMetrics.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/TestCSMMetrics.java index e68831b494f..40df94858e6 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/TestCSMMetrics.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/TestCSMMetrics.java @@ -22,8 +22,8 @@ import static org.apache.ozone.test.MetricsAsserts.getDoubleGauge; import static org.apache.ozone.test.MetricsAsserts.getMetrics; -import java.io.File; import java.io.IOException; +import java.nio.file.Path; import java.util.List; import java.util.ArrayList; @@ -49,7 +49,6 @@ import org.apache.hadoop.ozone.container.common.transport.server .XceiverServerSpi; import org.apache.hadoop.ozone.container.ozoneimpl.ContainerController; -import org.apache.ozone.test.GenericTestUtils; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import static org.apache.ratis.rpc.SupportedRpcType.GRPC; @@ -66,14 +65,14 @@ import org.apache.ratis.util.function.CheckedBiFunction; import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.io.TempDir; /** * This class tests the metrics of ContainerStateMachine. */ public class TestCSMMetrics { - private static final String TEST_DIR = - GenericTestUtils.getTestDir("dfs").getAbsolutePath() - + File.separator; + @TempDir + private static Path testDir; @BeforeAll public static void setup() { @@ -154,6 +153,14 @@ static void runContainerStateMachineMetrics( assertCounter("NumContainerNotOpenVerifyFailures", 0L, metric); assertCounter("WriteChunkMsNumOps", 1L, metric); + applyTransactionLatency = getDoubleGauge( + "ApplyTransactionNsAvgTime", metric); + assertThat(applyTransactionLatency).isGreaterThan(0.0); + writeStateMachineLatency = getDoubleGauge( + "WriteStateMachineDataNsAvgTime", metric); + assertThat(writeStateMachineLatency).isGreaterThan(0.0); + + //Read Chunk ContainerProtos.ContainerCommandRequestProto readChunkRequest = ContainerTestHelper.getReadChunkRequest(pipeline, writeChunkRequest @@ -166,12 +173,6 @@ static void runContainerStateMachineMetrics( RaftGroupId.valueOf(pipeline.getId().getId())); assertCounter("NumQueryStateMachineOps", 1L, metric); assertCounter("NumApplyTransactionOps", 1L, metric); - applyTransactionLatency = getDoubleGauge( - "ApplyTransactionNsAvgTime", metric); - assertThat(applyTransactionLatency).isGreaterThan(0.0); - writeStateMachineLatency = getDoubleGauge( - "WriteStateMachineDataNsAvgTime", metric); - assertThat(writeStateMachineLatency).isGreaterThan(0.0); } finally { if (client != null) { @@ -184,8 +185,8 @@ static void runContainerStateMachineMetrics( static XceiverServerRatis newXceiverServerRatis( DatanodeDetails dn, OzoneConfiguration conf) throws IOException { conf.setInt(OzoneConfigKeys.HDDS_CONTAINER_RATIS_IPC_PORT, - dn.getPort(DatanodeDetails.Port.Name.RATIS).getValue()); - final String dir = TEST_DIR + dn.getUuid(); + dn.getRatisPort().getValue()); + final String dir = testDir.resolve(dn.getUuidString()).toString(); conf.set(OzoneConfigKeys.HDDS_CONTAINER_RATIS_DATANODE_STORAGE_DIR, dir); final ContainerDispatcher dispatcher = new TestContainerDispatcher(); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/metrics/TestContainerMetrics.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/metrics/TestContainerMetrics.java index f55912b26b0..e0c0fde6fe2 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/metrics/TestContainerMetrics.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/metrics/TestContainerMetrics.java @@ -24,7 +24,6 @@ import java.util.UUID; import org.apache.commons.io.FileUtils; -import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.hdds.DFSConfigKeysLegacy; import org.apache.hadoop.hdds.client.BlockID; import org.apache.hadoop.hdds.conf.OzoneConfiguration; @@ -57,7 +56,6 @@ import org.apache.hadoop.ozone.container.common.volume.StorageVolume; import org.apache.hadoop.ozone.container.common.volume.VolumeSet; import org.apache.hadoop.ozone.container.ozoneimpl.ContainerController; -import org.apache.ozone.test.GenericTestUtils; import com.google.common.collect.Maps; import static org.apache.ozone.test.MetricsAsserts.assertCounter; @@ -70,7 +68,6 @@ import org.apache.ratis.util.function.CheckedBiFunction; import org.apache.ratis.util.function.CheckedConsumer; import org.apache.ratis.util.function.CheckedFunction; -import org.junit.jupiter.api.AfterAll; import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.Test; @@ -82,7 +79,8 @@ */ @Timeout(300) public class TestContainerMetrics { - static final String TEST_DIR = GenericTestUtils.getRandomizedTempPath() + File.separator; + @TempDir + private static Path testDir; @TempDir private Path tempDir; private static final OzoneConfiguration CONF = new OzoneConfiguration(); @@ -94,17 +92,8 @@ public static void setup() { CONF.setInt(DFSConfigKeysLegacy.DFS_METRICS_PERCENTILES_INTERVALS_KEY, DFS_METRICS_PERCENTILES_INTERVALS); CONF.setBoolean(OzoneConfigKeys.HDDS_CONTAINER_RATIS_DATASTREAM_ENABLED, false); - CONF.set(OzoneConfigKeys.OZONE_METADATA_DIRS, TEST_DIR); - - } + CONF.set(OzoneConfigKeys.OZONE_METADATA_DIRS, testDir.toString()); - @AfterAll - public static void cleanup() { - // clean up volume dir - File file = new File(TEST_DIR); - if (file.exists()) { - FileUtil.fullyDelete(file); - } } @AfterEach @@ -119,7 +108,7 @@ public void testContainerMetrics() throws Exception { runTestClientServer(pipeline -> CONF .setInt(OzoneConfigKeys.HDDS_CONTAINER_IPC_PORT, pipeline.getFirstNode() - .getPort(DatanodeDetails.Port.Name.STANDALONE).getValue()), + .getStandalonePort().getValue()), pipeline -> new XceiverClientGrpc(pipeline, CONF), (dn, volumeSet) -> new XceiverServerGrpc(dn, CONF, createDispatcher(dn, volumeSet), null), (dn, p) -> { @@ -172,7 +161,7 @@ static void runTestClientServer( initConf.accept(pipeline); DatanodeDetails dn = pipeline.getFirstNode(); - volumeSet = createVolumeSet(dn, TEST_DIR + dn.getUuidString()); + volumeSet = createVolumeSet(dn, testDir.resolve(dn.getUuidString()).toString()); server = createServer.apply(dn, volumeSet); server.start(); initServer.accept(dn, pipeline); @@ -235,8 +224,8 @@ static void runTestClientServer( private XceiverServerSpi newXceiverServerRatis(DatanodeDetails dn, MutableVolumeSet volumeSet) throws IOException { CONF.setInt(OzoneConfigKeys.HDDS_CONTAINER_RATIS_IPC_PORT, - dn.getPort(DatanodeDetails.Port.Name.RATIS).getValue()); - final String dir = TEST_DIR + dn.getUuid(); + dn.getRatisPort().getValue()); + final String dir = testDir.resolve(dn.getUuidString()).toString(); CONF.set(OzoneConfigKeys.HDDS_CONTAINER_RATIS_DATANODE_STORAGE_DIR, dir); final ContainerDispatcher dispatcher = createDispatcher(dn, volumeSet); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java index 1c5da04c0a3..553ea03f1fa 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java @@ -18,6 +18,7 @@ package org.apache.hadoop.ozone.container.ozoneimpl; +import org.apache.commons.io.FileUtils; import org.apache.hadoop.hdds.client.BlockID; import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; @@ -31,11 +32,13 @@ import org.apache.hadoop.hdds.scm.pipeline.Pipeline; import org.apache.hadoop.ozone.container.common.ContainerTestUtils; import org.apache.hadoop.ozone.container.common.utils.StorageVolumeUtil; +import org.apache.ozone.test.GenericTestUtils; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.Timeout; import org.junit.jupiter.api.io.TempDir; import java.io.File; +import java.io.IOException; import java.nio.file.Path; import java.util.HashMap; import java.util.LinkedList; @@ -73,8 +76,7 @@ public void testCreateOzoneContainer( conf.set(OZONE_METADATA_DIRS, ozoneMetaDir.getPath()); conf.set(HDDS_DATANODE_DIR_KEY, hddsNodeDir.getPath()); conf.setInt(OzoneConfigKeys.HDDS_CONTAINER_IPC_PORT, - pipeline.getFirstNode() - .getPort(DatanodeDetails.Port.Name.STANDALONE).getValue()); + pipeline.getFirstNode().getStandalonePort().getValue()); DatanodeDetails datanodeDetails = randomDatanodeDetails(); container = ContainerTestUtils @@ -106,8 +108,7 @@ void testOzoneContainerStart( conf.set(OZONE_METADATA_DIRS, ozoneMetaDir.getPath()); conf.set(HDDS_DATANODE_DIR_KEY, hddsNodeDir.getPath()); conf.setInt(OzoneConfigKeys.HDDS_CONTAINER_IPC_PORT, - pipeline.getFirstNode() - .getPort(DatanodeDetails.Port.Name.STANDALONE).getValue()); + pipeline.getFirstNode().getStandalonePort().getValue()); DatanodeDetails datanodeDetails = randomDatanodeDetails(); container = ContainerTestUtils @@ -160,6 +161,159 @@ public void testOzoneContainerViaDataNode() throws Exception { } } + @Test + public void testOzoneContainerWithMissingContainer() throws Exception { + MiniOzoneCluster cluster = null; + try { + long containerID = + ContainerTestHelper.getTestContainerID(); + OzoneConfiguration conf = newOzoneConfiguration(); + + // Start ozone container Via Datanode create. + cluster = MiniOzoneCluster.newBuilder(conf) + .setNumDatanodes(1) + .build(); + cluster.waitForClusterToBeReady(); + + runTestOzoneContainerWithMissingContainer(cluster, containerID); + } finally { + if (cluster != null) { + cluster.shutdown(); + } + } + } + + private void runTestOzoneContainerWithMissingContainer( + MiniOzoneCluster cluster, long testContainerID) throws Exception { + ContainerProtos.ContainerCommandRequestProto + request, writeChunkRequest, putBlockRequest, + updateRequest1, updateRequest2; + ContainerProtos.ContainerCommandResponseProto response, + updateResponse1, updateResponse2; + XceiverClientGrpc client = null; + try { + // This client talks to ozone container via datanode. + client = createClientForTesting(cluster); + client.connect(); + Pipeline pipeline = client.getPipeline(); + createContainerForTesting(client, testContainerID); + writeChunkRequest = writeChunkForContainer(client, testContainerID, + 1024); + + DatanodeDetails datanodeDetails = cluster.getHddsDatanodes().get(0).getDatanodeDetails(); + File containerPath = + new File(cluster.getHddsDatanode(datanodeDetails).getDatanodeStateMachine() + .getContainer().getContainerSet().getContainer(testContainerID) + .getContainerData().getContainerPath()); + cluster.getHddsDatanode(datanodeDetails).stop(); + FileUtils.deleteDirectory(containerPath); + + // Restart & Check if the container has been marked as missing, since the container directory has been deleted. + cluster.restartHddsDatanode(datanodeDetails, false); + GenericTestUtils.waitFor(() -> { + try { + return cluster.getHddsDatanode(datanodeDetails).getDatanodeStateMachine() + .getContainer().getContainerSet() + .getMissingContainerSet().contains(testContainerID); + } catch (IOException e) { + return false; + } + }, 1000, 30000); + + // Read Chunk + request = ContainerTestHelper.getReadChunkRequest( + pipeline, writeChunkRequest.getWriteChunk()); + + response = client.sendCommand(request); + assertNotNull(response); + assertEquals(ContainerProtos.Result.CONTAINER_NOT_FOUND, response.getResult()); + + response = createContainerForTesting(client, testContainerID); + assertEquals(ContainerProtos.Result.CONTAINER_MISSING, response.getResult()); + + // Put Block + putBlockRequest = ContainerTestHelper.getPutBlockRequest( + pipeline, writeChunkRequest.getWriteChunk()); + + response = client.sendCommand(putBlockRequest); + assertNotNull(response); + assertEquals(ContainerProtos.Result.CONTAINER_MISSING, response.getResult()); + + // Write chunk + response = client.sendCommand(writeChunkRequest); + assertNotNull(response); + assertEquals(ContainerProtos.Result.CONTAINER_MISSING, response.getResult()); + + // Get Block + request = ContainerTestHelper. + getBlockRequest(pipeline, putBlockRequest.getPutBlock()); + response = client.sendCommand(request); + assertEquals(ContainerProtos.Result.CONTAINER_NOT_FOUND, response.getResult()); + + // Create Container + request = ContainerTestHelper.getCreateContainerRequest(testContainerID, pipeline); + response = client.sendCommand(request); + assertEquals(ContainerProtos.Result.CONTAINER_MISSING, response.getResult()); + + // Delete Block and Delete Chunk are handled by BlockDeletingService + // ContainerCommandRequestProto DeleteBlock and DeleteChunk requests + // are deprecated + + //Update an existing container + Map containerUpdate = new HashMap(); + containerUpdate.put("container_updated_key", "container_updated_value"); + updateRequest1 = ContainerTestHelper.getUpdateContainerRequest( + testContainerID, containerUpdate); + updateResponse1 = client.sendCommand(updateRequest1); + assertNotNull(updateResponse1); + assertEquals(ContainerProtos.Result.CONTAINER_MISSING, updateResponse1.getResult()); + + //Update an non-existing container + long nonExistingContinerID = + ContainerTestHelper.getTestContainerID(); + updateRequest2 = ContainerTestHelper.getUpdateContainerRequest( + nonExistingContinerID, containerUpdate); + updateResponse2 = client.sendCommand(updateRequest2); + assertEquals(ContainerProtos.Result.CONTAINER_NOT_FOUND, + updateResponse2.getResult()); + + // Restarting again & checking if the container is still not present on disk and marked as missing, this is to + // ensure the previous write request didn't inadvertently create the container data. + cluster.restartHddsDatanode(datanodeDetails, false); + GenericTestUtils.waitFor(() -> { + try { + return cluster.getHddsDatanode(datanodeDetails).getDatanodeStateMachine() + .getContainer().getContainerSet() + .getMissingContainerSet().contains(testContainerID); + } catch (IOException e) { + return false; + } + }, 1000, 30000); + // Create Recovering Container + request = ContainerTestHelper.getCreateContainerRequest(testContainerID, pipeline, + ContainerProtos.ContainerDataProto.State.RECOVERING); + response = client.sendCommand(request); + assertEquals(ContainerProtos.Result.SUCCESS, response.getResult()); + //write chunk on recovering container + response = client.sendCommand(writeChunkRequest); + assertNotNull(response); + assertEquals(ContainerProtos.Result.SUCCESS, response.getResult()); + //write chunk on recovering container + response = client.sendCommand(putBlockRequest); + assertNotNull(response); + assertEquals(ContainerProtos.Result.SUCCESS, response.getResult()); + //Get block on the recovering container should succeed now. + request = ContainerTestHelper.getBlockRequest(pipeline, putBlockRequest.getPutBlock()); + response = client.sendCommand(request); + assertEquals(ContainerProtos.Result.SUCCESS, response.getResult()); + + } finally { + if (client != null) { + client.close(); + } + } + } + public static void runTestOzoneContainerViaDataNode( long testContainerID, XceiverClientSpi client) throws Exception { ContainerProtos.ContainerCommandRequestProto @@ -506,10 +660,14 @@ private static XceiverClientGrpc createClientForTesting( MiniOzoneCluster cluster) { Pipeline pipeline = cluster.getStorageContainerManager() .getPipelineManager().getPipelines().iterator().next(); + return createClientForTesting(pipeline, cluster); + } + + private static XceiverClientGrpc createClientForTesting(Pipeline pipeline, MiniOzoneCluster cluster) { return new XceiverClientGrpc(pipeline, cluster.getConf()); } - public static void createContainerForTesting(XceiverClientSpi client, + public static ContainerProtos.ContainerCommandResponseProto createContainerForTesting(XceiverClientSpi client, long containerID) throws Exception { // Create container ContainerProtos.ContainerCommandRequestProto request = @@ -518,6 +676,7 @@ public static void createContainerForTesting(XceiverClientSpi client, ContainerProtos.ContainerCommandResponseProto response = client.sendCommand(request); assertNotNull(response); + return response; } public static ContainerProtos.ContainerCommandRequestProto diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestSecureOzoneContainer.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestSecureOzoneContainer.java index 92d716f7a40..262d3026e78 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestSecureOzoneContainer.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestSecureOzoneContainer.java @@ -42,7 +42,6 @@ import org.apache.hadoop.ozone.container.common.volume.MutableVolumeSet; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.token.Token; -import org.apache.ozone.test.GenericTestUtils; import org.apache.ratis.util.ExitUtils; import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.BeforeEach; @@ -82,6 +81,8 @@ class TestSecureOzoneContainer { @TempDir private Path tempFolder; + @TempDir + private Path ozoneMetaPath; private OzoneConfiguration conf; private CertificateClientTestImpl caClient; @@ -107,9 +108,7 @@ static void init() { @BeforeEach void setup() throws Exception { conf = new OzoneConfiguration(); - String ozoneMetaPath = - GenericTestUtils.getTempPath("ozoneMeta"); - conf.set(OZONE_METADATA_DIRS, ozoneMetaPath); + conf.set(OZONE_METADATA_DIRS, ozoneMetaPath.toString()); caClient = new CertificateClientTestImpl(conf); secretKeyClient = new SecretKeyTestClient(); secretManager = new ContainerTokenSecretManager( @@ -132,8 +131,7 @@ void testCreateOzoneContainer(boolean requireToken, boolean hasToken, Pipeline pipeline = MockPipeline.createSingleNodePipeline(); conf.set(HDDS_DATANODE_DIR_KEY, tempFolder.toString()); conf.setInt(OzoneConfigKeys.HDDS_CONTAINER_IPC_PORT, pipeline - .getFirstNode().getPort(DatanodeDetails.Port.Name.STANDALONE) - .getValue()); + .getFirstNode().getStandalonePort().getValue()); conf.setBoolean(OzoneConfigKeys.HDDS_CONTAINER_IPC_RANDOM_PORT, false); DatanodeDetails dn = MockDatanodeDetails.randomDatanodeDetails(); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/replication/TestContainerReplication.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/replication/TestContainerReplication.java index 08932aa4e37..4e7aeaeef79 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/replication/TestContainerReplication.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/replication/TestContainerReplication.java @@ -137,7 +137,7 @@ void targetPullsFromWrongService() throws Exception { long containerID = createNewClosedContainer(source); DatanodeDetails invalidPort = new DatanodeDetails(source); invalidPort.setPort(Port.Name.REPLICATION, - source.getPort(Port.Name.STANDALONE).getValue()); + source.getStandalonePort().getValue()); ReplicateContainerCommand cmd = ReplicateContainerCommand.fromSources(containerID, ImmutableList.of(invalidPort)); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestContainerServer.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestContainerServer.java index 27e85501662..32e7a0e9e66 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestContainerServer.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestContainerServer.java @@ -18,7 +18,6 @@ package org.apache.hadoop.ozone.container.server; -import java.io.File; import java.io.IOException; import java.nio.file.Path; import java.nio.file.Paths; @@ -62,7 +61,6 @@ import org.apache.hadoop.ozone.container.common.volume.StorageVolume; import org.apache.hadoop.ozone.container.common.volume.VolumeSet; import org.apache.hadoop.ozone.container.ozoneimpl.ContainerController; -import org.apache.ozone.test.GenericTestUtils; import com.google.common.collect.Maps; import org.apache.ratis.rpc.RpcType; import org.apache.ratis.util.function.CheckedBiConsumer; @@ -82,8 +80,8 @@ * Test Containers. */ public class TestContainerServer { - static final String TEST_DIR = GenericTestUtils.getTestDir("dfs") - .getAbsolutePath() + File.separator; + @TempDir + private static Path testDir; private static final OzoneConfiguration CONF = new OzoneConfiguration(); private static CertificateClient caClient; @TempDir @@ -92,7 +90,7 @@ public class TestContainerServer { @BeforeAll public static void setup() { DefaultMetricsSystem.setMiniClusterMode(true); - CONF.set(HddsConfigKeys.HDDS_METADATA_DIR_NAME, TEST_DIR); + CONF.set(HddsConfigKeys.HDDS_METADATA_DIR_NAME, testDir.toString()); CONF.setBoolean(OzoneConfigKeys.HDDS_CONTAINER_RATIS_DATASTREAM_ENABLED, false); DatanodeDetails dn = MockDatanodeDetails.randomDatanodeDetails(); caClient = new DNCertificateClient(new SecurityConfig(CONF), null, @@ -110,7 +108,7 @@ public void testClientServer() throws Exception { runTestClientServer(1, (pipeline, conf) -> conf .setInt(OzoneConfigKeys.HDDS_CONTAINER_IPC_PORT, pipeline.getFirstNode() - .getPort(DatanodeDetails.Port.Name.STANDALONE).getValue()), + .getStandalonePort().getValue()), XceiverClientGrpc::new, (dn, conf) -> new XceiverServerGrpc(datanodeDetails, conf, new TestContainerDispatcher(), caClient), (dn, p) -> { @@ -126,8 +124,8 @@ public void testClientServerRatisGrpc() throws Exception { static XceiverServerRatis newXceiverServerRatis( DatanodeDetails dn, OzoneConfiguration conf) throws IOException { conf.setInt(OzoneConfigKeys.HDDS_CONTAINER_RATIS_IPC_PORT, - dn.getPort(DatanodeDetails.Port.Name.RATIS).getValue()); - final String dir = TEST_DIR + dn.getUuid(); + dn.getRatisPort().getValue()); + final String dir = testDir.resolve(dn.getUuid().toString()).toString(); conf.set(OzoneConfigKeys.HDDS_CONTAINER_RATIS_DATANODE_STORAGE_DIR, dir); final ContainerDispatcher dispatcher = new TestContainerDispatcher(); @@ -191,9 +189,9 @@ private HddsDispatcher createDispatcher(DatanodeDetails dd, UUID scmId, throws IOException { ContainerSet containerSet = new ContainerSet(1000); conf.set(HDDS_DATANODE_DIR_KEY, - Paths.get(TEST_DIR, "dfs", "data", "hdds", + Paths.get(testDir.toString(), "dfs", "data", "hdds", RandomStringUtils.randomAlphabetic(4)).toString()); - conf.set(OZONE_METADATA_DIRS, TEST_DIR); + conf.set(OZONE_METADATA_DIRS, testDir.toString()); VolumeSet volumeSet = new MutableVolumeSet(dd.getUuidString(), conf, null, StorageVolume.VolumeType.DATA_VOLUME, null); StorageVolumeUtil.getHddsVolumesList(volumeSet.getVolumesList()) @@ -211,8 +209,7 @@ public void testClientServerWithContainerDispatcher() throws Exception { UUID.randomUUID(), CONF); runTestClientServer(1, (pipeline, conf) -> conf .setInt(OzoneConfigKeys.HDDS_CONTAINER_IPC_PORT, - pipeline.getFirstNode() - .getPort(DatanodeDetails.Port.Name.STANDALONE).getValue()), + pipeline.getFirstNode().getStandalonePort().getValue()), XceiverClientGrpc::new, (dn, conf) -> new XceiverServerGrpc(dd, conf, hddsDispatcher, caClient), (dn, p) -> { diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestSecureContainerServer.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestSecureContainerServer.java index cae7f6bb59e..8be3549f67e 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestSecureContainerServer.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestSecureContainerServer.java @@ -70,7 +70,6 @@ import org.apache.hadoop.ozone.container.ozoneimpl.ContainerController; import org.apache.hadoop.hdds.security.token.OzoneBlockTokenSecretManager; import org.apache.hadoop.security.token.Token; -import org.apache.ozone.test.GenericTestUtils; import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_BLOCK_TOKEN_ENABLED; import static org.apache.hadoop.hdds.HddsConfigKeys.OZONE_METADATA_DIRS; @@ -98,7 +97,6 @@ import org.apache.ratis.util.function.CheckedBiConsumer; import org.apache.ratis.util.function.CheckedBiFunction; -import org.junit.jupiter.api.AfterAll; import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.Test; @@ -117,8 +115,8 @@ public class TestSecureContainerServer { @TempDir private Path tempDir; - private static final String TEST_DIR - = GenericTestUtils.getTestDir("dfs").getAbsolutePath() + File.separator; + @TempDir + private static Path testDir; private static final OzoneConfiguration CONF = new OzoneConfiguration(); private static CertificateClientTestImpl caClient; private static SecretKeyClient secretKeyClient; @@ -129,7 +127,7 @@ public class TestSecureContainerServer { public static void setup() throws Exception { DefaultMetricsSystem.setMiniClusterMode(true); ExitUtils.disableSystemExit(); - CONF.set(HddsConfigKeys.HDDS_METADATA_DIR_NAME, TEST_DIR); + CONF.set(HddsConfigKeys.HDDS_METADATA_DIR_NAME, testDir.toString()); CONF.setBoolean(OZONE_SECURITY_ENABLED_KEY, true); CONF.setBoolean(HDDS_BLOCK_TOKEN_ENABLED, true); caClient = new CertificateClientTestImpl(CONF); @@ -144,11 +142,6 @@ public static void setup() throws Exception { tokenLifetime, secretKeyClient); } - @AfterAll - public static void deleteTestDir() { - FileUtils.deleteQuietly(new File(TEST_DIR)); - } - @AfterEach public void cleanUp() throws IOException { FileUtils.deleteQuietly(new File(CONF.get(HDDS_DATANODE_DIR_KEY))); @@ -162,7 +155,7 @@ public void testClientServer() throws Exception { runTestClientServer(1, (pipeline, conf) -> conf .setInt(OzoneConfigKeys.HDDS_CONTAINER_IPC_PORT, pipeline.getFirstNode() - .getPort(DatanodeDetails.Port.Name.STANDALONE).getValue()), + .getStandalonePort().getValue()), XceiverClientGrpc::new, (dn, conf) -> new XceiverServerGrpc(dd, conf, hddsDispatcher, caClient), (dn, p) -> { }, (p) -> { }); @@ -172,9 +165,9 @@ private HddsDispatcher createDispatcher(DatanodeDetails dd, UUID scmId, OzoneConfiguration conf) throws IOException { ContainerSet containerSet = new ContainerSet(1000); conf.set(HDDS_DATANODE_DIR_KEY, - Paths.get(TEST_DIR, "dfs", "data", "hdds", + Paths.get(testDir.toString(), "dfs", "data", "hdds", RandomStringUtils.randomAlphabetic(4)).toString()); - conf.set(OZONE_METADATA_DIRS, TEST_DIR); + conf.set(OZONE_METADATA_DIRS, testDir.toString()); VolumeSet volumeSet = new MutableVolumeSet(dd.getUuidString(), conf, null, StorageVolume.VolumeType.DATA_VOLUME, null); StorageVolumeUtil.getHddsVolumesList(volumeSet.getVolumesList()) @@ -195,12 +188,12 @@ public void testClientServerRatisGrpc() throws Exception { XceiverServerRatis newXceiverServerRatis( DatanodeDetails dn, OzoneConfiguration conf) throws IOException { conf.setInt(OzoneConfigKeys.HDDS_CONTAINER_RATIS_IPC_PORT, - dn.getPort(DatanodeDetails.Port.Name.RATIS).getValue()); + dn.getRatisPort().getValue()); conf.setBoolean(OzoneConfigKeys.HDDS_CONTAINER_RATIS_DATASTREAM_ENABLED, true); conf.setBoolean( OzoneConfigKeys.HDDS_CONTAINER_RATIS_DATASTREAM_RANDOM_PORT, true); - final String dir = TEST_DIR + dn.getUuid(); + final String dir = testDir.resolve(dn.getUuidString()).toString(); conf.set(OzoneConfigKeys.HDDS_CONTAINER_RATIS_DATANODE_STORAGE_DIR, dir); final ContainerDispatcher dispatcher = createDispatcher(dn, UUID.randomUUID(), conf); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/debug/TestLDBCli.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/debug/TestLDBCli.java index a4327a49bfa..aac55367adc 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/debug/TestLDBCli.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/debug/TestLDBCli.java @@ -17,6 +17,7 @@ package org.apache.hadoop.ozone.debug; import com.fasterxml.jackson.core.type.TypeReference; +import com.fasterxml.jackson.databind.JsonNode; import com.fasterxml.jackson.databind.ObjectMapper; import com.fasterxml.jackson.databind.ObjectWriter; import org.apache.commons.lang3.tuple.Pair; @@ -35,6 +36,8 @@ import org.apache.hadoop.ozone.container.common.statemachine.DatanodeConfiguration; import org.apache.hadoop.ozone.container.keyvalue.helpers.BlockUtils; import org.apache.hadoop.ozone.container.metadata.DatanodeSchemaThreeDBDefinition; +import org.apache.hadoop.ozone.debug.ldb.DBScanner; +import org.apache.hadoop.ozone.debug.ldb.RDBParser; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; import jakarta.annotation.Nonnull; @@ -68,6 +71,7 @@ import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.junit.jupiter.api.Assertions.assertNotNull; /** * This class tests `ozone debug ldb` CLI that reads from a RocksDB directory. @@ -98,8 +102,6 @@ public void setup() throws IOException { pstderr = new PrintWriter(stderr); cmd = new CommandLine(new RDBParser()) - .addSubcommand(new DBScanner()) - .addSubcommand(new ValueSchema()) .setOut(pstdout) .setErr(pstderr); @@ -120,6 +122,7 @@ public void shutdown() throws IOException { /** * Defines ldb tool test cases. */ + @SuppressWarnings({"methodlength"}) private static Stream scanTestCases() { return Stream.of( Arguments.of( @@ -182,6 +185,43 @@ private static Stream scanTestCases() { Named.of("Filter invalid key", Arrays.asList("--filter", "keyName:equals:key9")), Named.of("Expect key1-key3", null) ), + Arguments.of( + Named.of(KEY_TABLE, Pair.of(KEY_TABLE, false)), + Named.of("Default", Pair.of(0, "")), + Named.of("Filter dataSize<2000", Arrays.asList("--filter", "dataSize:lesser:2000")), + Named.of("Expect key1-key5", Pair.of("key1", "key6")) + ), + Arguments.of( + Named.of(KEY_TABLE, Pair.of(KEY_TABLE, false)), + Named.of("Default", Pair.of(0, "")), + Named.of("Filter dataSize<500", Arrays.asList("--filter", "dataSize:lesser:500")), + Named.of("Expect empty result", null) + ), + Arguments.of( + Named.of(KEY_TABLE, Pair.of(KEY_TABLE, false)), + Named.of("Default", Pair.of(0, "")), + Named.of("Filter dataSize>500", Arrays.asList("--filter", "dataSize:greater:500")), + Named.of("Expect key1-key5", Pair.of("key1", "key6")) + ), + Arguments.of( + Named.of(KEY_TABLE, Pair.of(KEY_TABLE, false)), + Named.of("Default", Pair.of(0, "")), + Named.of("Filter dataSize>2000", Arrays.asList("--filter", "dataSize:greater:2000")), + Named.of("Expect empty result", null) + ), + Arguments.of( + Named.of(KEY_TABLE, Pair.of(KEY_TABLE, false)), + Named.of("Default", Pair.of(0, "")), + Named.of("Filter key3 regex", Arrays.asList("--filter", "keyName:regex:^.*3$")), + Named.of("Expect key3", Pair.of("key3", "key4")) + ), + Arguments.of( + Named.of(KEY_TABLE, Pair.of(KEY_TABLE, false)), + Named.of("Default", Pair.of(0, "")), + Named.of("Filter keys whose dataSize digits start with 5 using regex", + Arrays.asList("--filter", "dataSize:regex:^5.*$")), + Named.of("Expect empty result", null) + ), Arguments.of( Named.of(BLOCK_DATA + " V3", Pair.of(BLOCK_DATA, true)), Named.of("Default", Pair.of(0, "")), @@ -303,6 +343,50 @@ void testScanOfPipelinesWhenNoData() throws IOException { assertEquals("", stderr.toString()); } + @Test + void testScanWithRecordsPerFile() throws IOException { + // Prepare dummy table + int recordsCount = 5; + prepareKeyTable(recordsCount); + + String scanDir1 = tempDir.getAbsolutePath() + "/scandir1"; + // Prepare scan args + int maxRecordsPerFile = 2; + List completeScanArgs1 = new ArrayList<>(Arrays.asList( + "--db", dbStore.getDbLocation().getAbsolutePath(), + "scan", + "--column-family", KEY_TABLE, "--out", scanDir1 + File.separator + "keytable", + "--max-records-per-file", String.valueOf(maxRecordsPerFile))); + File tmpDir1 = new File(scanDir1); + tmpDir1.deleteOnExit(); + + int exitCode1 = cmd.execute(completeScanArgs1.toArray(new String[0])); + assertEquals(0, exitCode1); + assertTrue(tmpDir1.isDirectory()); + File[] subFiles = tmpDir1.listFiles(); + assertNotNull(subFiles); + assertEquals(Math.ceil(recordsCount / (maxRecordsPerFile * 1.0)), subFiles.length); + for (File subFile : subFiles) { + JsonNode jsonNode = MAPPER.readTree(subFile); + assertNotNull(jsonNode); + } + + String scanDir2 = tempDir.getAbsolutePath() + "/scandir2"; + // Used with parameter '-l' + List completeScanArgs2 = new ArrayList<>(Arrays.asList( + "--db", dbStore.getDbLocation().getAbsolutePath(), + "scan", + "--column-family", KEY_TABLE, "--out", scanDir2 + File.separator + "keytable", + "--max-records-per-file", String.valueOf(maxRecordsPerFile), "-l", "2")); + File tmpDir2 = new File(scanDir2); + tmpDir2.deleteOnExit(); + + int exitCode2 = cmd.execute(completeScanArgs2.toArray(new String[0])); + assertEquals(0, exitCode2); + assertTrue(tmpDir2.isDirectory()); + assertEquals(1, tmpDir2.listFiles().length); + } + @Test void testSchemaCommand() throws IOException { // Prepare dummy table @@ -351,22 +435,7 @@ private void prepareTable(String tableName, boolean schemaV3) switch (tableName) { case KEY_TABLE: - // Dummy om.db with only keyTable - dbStore = DBStoreBuilder.newBuilder(conf).setName("om.db") - .setPath(tempDir.toPath()).addTable(KEY_TABLE).build(); - - Table keyTable = dbStore.getTable(KEY_TABLE); - // Insert 5 keys - for (int i = 1; i <= 5; i++) { - String key = "key" + i; - OmKeyInfo value = OMRequestTestUtils.createOmKeyInfo("vol1", "buck1", - key, ReplicationConfig.fromProtoTypeAndFactor(STAND_ALONE, HddsProtos.ReplicationFactor.ONE)).build(); - keyTable.put(key.getBytes(UTF_8), - value.getProtobuf(ClientVersion.CURRENT_VERSION).toByteArray()); - - // Populate map - dbMap.put(key, toMap(value)); - } + prepareKeyTable(5); break; case BLOCK_DATA: @@ -414,6 +483,29 @@ private void prepareTable(String tableName, boolean schemaV3) } } + /** + * Prepare the keytable for testing. + * @param recordsCount prepare the number of keys + */ + private void prepareKeyTable(int recordsCount) throws IOException { + if (recordsCount < 1) { + throw new IllegalArgumentException("recordsCount must be greater than 1."); + } + // Dummy om.db with only keyTable + dbStore = DBStoreBuilder.newBuilder(conf).setName("om.db") + .setPath(tempDir.toPath()).addTable(KEY_TABLE).build(); + Table keyTable = dbStore.getTable(KEY_TABLE); + for (int i = 1; i <= recordsCount; i++) { + String key = "key" + i; + OmKeyInfo value = OMRequestTestUtils.createOmKeyInfo("vol1", "buck1", + key, ReplicationConfig.fromProtoTypeAndFactor(STAND_ALONE, + HddsProtos.ReplicationFactor.ONE)).build(); + keyTable.put(key.getBytes(UTF_8), value.getProtobuf(ClientVersion.CURRENT_VERSION).toByteArray()); + // Populate map + dbMap.put(key, toMap(value)); + } + } + private static Map toMap(Object obj) throws IOException { ObjectWriter objectWriter = DBScanner.JsonSerializationHelper.getWriter(); String json = objectWriter.writeValueAsString(obj); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/dn/ratis/TestDnRatisLogParser.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/dn/ratis/TestDnRatisLogParser.java index 7c82633f113..c5a45da8c77 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/dn/ratis/TestDnRatisLogParser.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/dn/ratis/TestDnRatisLogParser.java @@ -26,7 +26,7 @@ import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.ozone.MiniOzoneCluster; import org.apache.hadoop.ozone.OzoneConfigKeys; -import org.apache.hadoop.ozone.segmentparser.DatanodeRatisLogParser; +import org.apache.hadoop.ozone.debug.segmentparser.DatanodeRatisLogParser; import org.apache.ozone.test.GenericTestUtils; import org.junit.jupiter.api.AfterEach; diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestHadoopDirTreeGenerator.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestHadoopDirTreeGenerator.java index 9137eca6c0e..d5551ce6737 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestHadoopDirTreeGenerator.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestHadoopDirTreeGenerator.java @@ -36,6 +36,7 @@ import org.apache.ratis.server.raftlog.RaftLog; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.io.TempDir; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.slf4j.event.Level; @@ -45,6 +46,9 @@ import java.io.IOException; import java.net.URI; import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.List; import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertEquals; @@ -53,8 +57,8 @@ * Test for HadoopDirTreeGenerator. */ public class TestHadoopDirTreeGenerator { - - private String path; + @TempDir + private java.nio.file.Path path; private OzoneConfiguration conf = null; private MiniOzoneCluster cluster = null; private ObjectStore store = null; @@ -64,12 +68,8 @@ public class TestHadoopDirTreeGenerator { @BeforeEach public void setup() { - path = GenericTestUtils - .getTempPath(TestHadoopDirTreeGenerator.class.getSimpleName()); GenericTestUtils.setLogLevel(RaftLog.LOG, Level.DEBUG); GenericTestUtils.setLogLevel(RaftServer.LOG, Level.DEBUG); - File baseDir = new File(path); - baseDir.mkdirs(); } /** @@ -79,7 +79,6 @@ private void shutdown() throws IOException { IOUtils.closeQuietly(client); if (cluster != null) { cluster.shutdown(); - FileUtils.deleteDirectory(new File(path)); } } @@ -108,8 +107,8 @@ protected OzoneConfiguration getOzoneConfiguration() { public void testNestedDirTreeGeneration() throws Exception { try { startCluster(); - FileOutputStream out = FileUtils.openOutputStream(new File(path, - "conf")); + FileOutputStream out = FileUtils.openOutputStream(new File(path.toString(), + "conf")); cluster.getConf().writeXml(out); out.getFD().sync(); out.close(); @@ -140,7 +139,7 @@ private void verifyDirTree(String volumeName, String bucketName, int depth, OzoneVolume volume = store.getVolume(volumeName); volume.createBucket(bucketName); String rootPath = "o3fs://" + bucketName + "." + volumeName; - String confPath = new File(path, "conf").getAbsolutePath(); + String confPath = new File(path.toString(), "conf").getAbsolutePath(); new Freon().execute( new String[]{"-conf", confPath, "dtsg", "-d", depth + "", "-c", fileCount + "", "-s", span + "", "-n", "1", "-r", rootPath, @@ -154,7 +153,7 @@ private void verifyDirTree(String volumeName, String bucketName, int depth, FileStatus[] fileStatuses = fileSystem.listStatus(rootDir); // verify the num of peer directories, expected span count is 1 // as it has only one dir at root. - verifyActualSpan(1, fileStatuses); + verifyActualSpan(1, Arrays.asList(fileStatuses)); for (FileStatus fileStatus : fileStatuses) { int actualDepth = traverseToLeaf(fileSystem, fileStatus.getPath(), 1, depth, span, @@ -168,14 +167,16 @@ private int traverseToLeaf(FileSystem fs, Path dirPath, int depth, int expectedFileCnt, StorageSize perFileSize) throws IOException { FileStatus[] fileStatuses = fs.listStatus(dirPath); + List fileStatusList = new ArrayList<>(); + Collections.addAll(fileStatusList, fileStatuses); // check the num of peer directories except root and leaf as both // has less dirs. if (depth < expectedDepth - 1) { - verifyActualSpan(expectedSpanCnt, fileStatuses); + verifyActualSpan(expectedSpanCnt, fileStatusList); } int actualNumFiles = 0; ArrayList files = new ArrayList<>(); - for (FileStatus fileStatus : fileStatuses) { + for (FileStatus fileStatus : fileStatusList) { if (fileStatus.isDirectory()) { ++depth; return traverseToLeaf(fs, fileStatus.getPath(), depth, expectedDepth, @@ -196,7 +197,7 @@ private int traverseToLeaf(FileSystem fs, Path dirPath, int depth, } private int verifyActualSpan(int expectedSpanCnt, - FileStatus[] fileStatuses) { + List fileStatuses) { int actualSpan = 0; for (FileStatus fileStatus : fileStatuses) { if (fileStatus.isDirectory()) { diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestHadoopNestedDirGenerator.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestHadoopNestedDirGenerator.java index 4411c0d2ea5..f4993d538ee 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestHadoopNestedDirGenerator.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestHadoopNestedDirGenerator.java @@ -34,6 +34,7 @@ import java.util.LinkedList; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.io.TempDir; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.slf4j.event.Level; @@ -42,7 +43,6 @@ import java.io.IOException; import java.net.URI; -import static org.apache.ozone.test.GenericTestUtils.getTempPath; import static org.junit.jupiter.api.Assertions.assertEquals; /** @@ -50,8 +50,8 @@ */ public class TestHadoopNestedDirGenerator { - - private String path; + @TempDir + private java.nio.file.Path path; private OzoneConfiguration conf = null; private MiniOzoneCluster cluster = null; private ObjectStore store = null; @@ -61,11 +61,8 @@ public class TestHadoopNestedDirGenerator { @BeforeEach public void setup() { - path = getTempPath(TestHadoopNestedDirGenerator.class.getSimpleName()); GenericTestUtils.setLogLevel(RaftLog.LOG, Level.DEBUG); GenericTestUtils.setLogLevel(RaftServer.LOG, Level.DEBUG); - File baseDir = new File(path); - baseDir.mkdirs(); } /** @@ -76,7 +73,6 @@ private void shutdown() throws IOException { IOUtils.closeQuietly(client); if (cluster != null) { cluster.shutdown(); - FileUtils.deleteDirectory(new File(path)); } } @@ -101,8 +97,7 @@ private void startCluster() throws Exception { public void testNestedDirTreeGeneration() throws Exception { try { startCluster(); - FileOutputStream out = FileUtils.openOutputStream(new File(path, - "conf")); + FileOutputStream out = FileUtils.openOutputStream(new File(path.toString(), "conf")); cluster.getConf().writeXml(out); out.getFD().sync(); out.close(); @@ -128,7 +123,7 @@ private void verifyDirTree(String volumeName, String bucketName, OzoneVolume volume = store.getVolume(volumeName); volume.createBucket(bucketName); String rootPath = "o3fs://" + bucketName + "." + volumeName; - String confPath = new File(path, "conf").getAbsolutePath(); + String confPath = new File(path.toString(), "conf").getAbsolutePath(); new Freon().execute(new String[]{"-conf", confPath, "ddsg", "-d", actualDepth + "", "-s", span + "", "-n", "1", "-r", rootPath}); // verify the directory structure diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestHsyncGenerator.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestHsyncGenerator.java index 7026f32d8b3..66714e58bad 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestHsyncGenerator.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestHsyncGenerator.java @@ -94,14 +94,13 @@ public void test() throws IOException { OzoneVolume volume = store.getVolume(volumeName); volume.createBucket(bucketName); - String rootPath = String.format("%s://%s/%s/%s/", - OZONE_OFS_URI_SCHEME, cluster.getConf().get(OZONE_OM_ADDRESS_KEY), - volumeName, bucketName); + String rootPath = String.format("%s://%s/%s/%s/", OZONE_OFS_URI_SCHEME, + cluster.getConf().get(OZONE_OM_ADDRESS_KEY), volumeName, bucketName); int exitCode = cmd.execute( "--path", rootPath, - "--bytes-per-write", "1024", - "--number-of-files", "2", + "--bytes-per-write", "8", + "--writes-per-transaction", "64", "-t", "5", "-n", "100"); assertEquals(0, exitCode); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestOMSnapshotDAG.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestOMSnapshotDAG.java index c566cae414f..ecb493ecf8f 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestOMSnapshotDAG.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestOMSnapshotDAG.java @@ -56,6 +56,7 @@ import java.nio.file.Path; import java.nio.file.Paths; import java.time.Duration; +import java.util.Collections; import java.util.List; import java.util.concurrent.TimeoutException; import java.util.stream.Collectors; @@ -231,7 +232,7 @@ public void testDAGReconstruction() final File checkpointSnap2 = new File(snap2.getDbPath()); GenericTestUtils.waitFor(checkpointSnap2::exists, 2000, 20000); - List sstDiffList21 = differ.getSSTDiffList(snap2, snap1); + List sstDiffList21 = differ.getSSTDiffList(snap2, snap1).orElse(Collections.emptyList()); LOG.debug("Got diff list: {}", sstDiffList21); // Delete 1000 keys, take a 3rd snapshot, and do another diff @@ -250,13 +251,13 @@ public void testDAGReconstruction() final File checkpointSnap3 = new File(snap3.getDbPath()); GenericTestUtils.waitFor(checkpointSnap3::exists, 2000, 20000); - List sstDiffList32 = differ.getSSTDiffList(snap3, snap2); + List sstDiffList32 = differ.getSSTDiffList(snap3, snap2).orElse(Collections.emptyList()); // snap3-snap1 diff result is a combination of snap3-snap2 and snap2-snap1 - List sstDiffList31 = differ.getSSTDiffList(snap3, snap1); + List sstDiffList31 = differ.getSSTDiffList(snap3, snap1).orElse(Collections.emptyList()); // Same snapshot. Result should be empty list - List sstDiffList22 = differ.getSSTDiffList(snap2, snap2); + List sstDiffList22 = differ.getSSTDiffList(snap2, snap2).orElse(Collections.emptyList()); assertThat(sstDiffList22).isEmpty(); snapDB1.close(); snapDB2.close(); @@ -282,13 +283,13 @@ public void testDAGReconstruction() volumeName, bucketName, "snap3", ((RDBStore) snapDB3.get() .getMetadataManager().getStore()).getDb().getManagedRocksDb()); - List sstDiffList21Run2 = differ.getSSTDiffList(snap2, snap1); + List sstDiffList21Run2 = differ.getSSTDiffList(snap2, snap1).orElse(Collections.emptyList()); assertEquals(sstDiffList21, sstDiffList21Run2); - List sstDiffList32Run2 = differ.getSSTDiffList(snap3, snap2); + List sstDiffList32Run2 = differ.getSSTDiffList(snap3, snap2).orElse(Collections.emptyList()); assertEquals(sstDiffList32, sstDiffList32Run2); - List sstDiffList31Run2 = differ.getSSTDiffList(snap3, snap1); + List sstDiffList31Run2 = differ.getSSTDiffList(snap3, snap1).orElse(Collections.emptyList()); assertEquals(sstDiffList31, sstDiffList31Run2); snapDB1.close(); snapDB2.close(); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestOmBucketReadWriteFileOps.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestOmBucketReadWriteFileOps.java index 5244bb85790..8eb83b91356 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestOmBucketReadWriteFileOps.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestOmBucketReadWriteFileOps.java @@ -36,6 +36,7 @@ import org.apache.ratis.server.raftlog.RaftLog; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.io.TempDir; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.slf4j.event.Level; @@ -52,8 +53,8 @@ * Test for OmBucketReadWriteFileOps. */ public class TestOmBucketReadWriteFileOps { - - private String path; + @TempDir + private java.nio.file.Path path; private OzoneConfiguration conf = null; private MiniOzoneCluster cluster = null; private ObjectStore store = null; @@ -63,12 +64,8 @@ public class TestOmBucketReadWriteFileOps { @BeforeEach public void setup() { - path = GenericTestUtils - .getTempPath(TestOmBucketReadWriteFileOps.class.getSimpleName()); GenericTestUtils.setLogLevel(RaftLog.LOG, Level.DEBUG); GenericTestUtils.setLogLevel(RaftServer.LOG, Level.DEBUG); - File baseDir = new File(path); - baseDir.mkdirs(); } /** @@ -78,7 +75,6 @@ private void shutdown() throws IOException { IOUtils.closeQuietly(client); if (cluster != null) { cluster.shutdown(); - FileUtils.deleteDirectory(new File(path)); } } @@ -107,8 +103,7 @@ protected OzoneConfiguration getOzoneConfiguration() { public void testOmBucketReadWriteFileOps() throws Exception { try { startCluster(); - FileOutputStream out = FileUtils.openOutputStream(new File(path, - "conf")); + FileOutputStream out = FileUtils.openOutputStream(new File(path.toString(), "conf")); cluster.getConf().writeXml(out); out.getFD().sync(); out.close(); @@ -154,7 +149,7 @@ private void verifyFreonCommand(ParameterBuilder parameterBuilder) volume.createBucket(parameterBuilder.bucketName); String rootPath = "o3fs://" + parameterBuilder.bucketName + "." + parameterBuilder.volumeName + parameterBuilder.prefixFilePath; - String confPath = new File(path, "conf").getAbsolutePath(); + String confPath = new File(path.toString(), "conf").getAbsolutePath(); new Freon().execute( new String[]{"-conf", confPath, "obrwf", "-P", rootPath, "-r", String.valueOf(parameterBuilder.fileCountForRead), diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestOmBucketReadWriteKeyOps.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestOmBucketReadWriteKeyOps.java index 3c7a04071b3..5e24cfc4e0e 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestOmBucketReadWriteKeyOps.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestOmBucketReadWriteKeyOps.java @@ -34,6 +34,7 @@ import org.apache.ratis.server.RaftServer; import org.apache.ratis.server.raftlog.RaftLog; import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.io.TempDir; import org.junit.jupiter.params.ParameterizedTest; import org.junit.jupiter.params.provider.ValueSource; import org.slf4j.Logger; @@ -43,6 +44,7 @@ import java.io.File; import java.io.FileOutputStream; import java.io.IOException; +import java.nio.file.Path; import java.util.Iterator; import static org.assertj.core.api.Assertions.assertThat; @@ -55,8 +57,8 @@ public class TestOmBucketReadWriteKeyOps { // TODO: Remove code duplication of TestOmBucketReadWriteKeyOps with // TestOmBucketReadWriteFileOps. - - private String path; + @TempDir + private Path path; private OzoneConfiguration conf = null; private MiniOzoneCluster cluster = null; private ObjectStore store = null; @@ -66,12 +68,8 @@ public class TestOmBucketReadWriteKeyOps { @BeforeEach public void setup() { - path = GenericTestUtils - .getTempPath(TestHadoopDirTreeGenerator.class.getSimpleName()); GenericTestUtils.setLogLevel(RaftLog.LOG, Level.DEBUG); GenericTestUtils.setLogLevel(RaftServer.LOG, Level.DEBUG); - File baseDir = new File(path); - baseDir.mkdirs(); } /** @@ -111,7 +109,7 @@ private OzoneConfiguration getOzoneConfiguration() { public void testOmBucketReadWriteKeyOps(boolean fsPathsEnabled) throws Exception { try { startCluster(fsPathsEnabled); - FileOutputStream out = FileUtils.openOutputStream(new File(path, + FileOutputStream out = FileUtils.openOutputStream(new File(path.toString(), "conf")); cluster.getConf().writeXml(out); out.getFD().sync(); @@ -157,7 +155,7 @@ private void verifyFreonCommand(ParameterBuilder parameterBuilder) OzoneVolume volume = store.getVolume(parameterBuilder.volumeName); volume.createBucket(parameterBuilder.bucketName); OzoneBucket bucket = volume.getBucket(parameterBuilder.bucketName); - String confPath = new File(path, "conf").getAbsolutePath(); + String confPath = new File(path.toString(), "conf").getAbsolutePath(); long startTime = System.currentTimeMillis(); new Freon().execute( diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/fsck/TestContainerMapper.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/fsck/TestContainerMapper.java index 7811470887d..63d2870e7d7 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/fsck/TestContainerMapper.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/fsck/TestContainerMapper.java @@ -35,15 +35,14 @@ import org.apache.hadoop.ozone.client.io.OzoneOutputStream; import org.apache.hadoop.ozone.om.OzoneManager; import org.apache.hadoop.ozone.om.helpers.BucketLayout; -import org.apache.ozone.test.GenericTestUtils; -import org.apache.ratis.util.FileUtils; import org.junit.jupiter.api.AfterAll; import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.Timeout; +import org.junit.jupiter.api.io.TempDir; -import java.io.File; import java.io.IOException; +import java.nio.file.Path; import java.util.ArrayList; import java.util.Arrays; import java.util.HashMap; @@ -61,6 +60,8 @@ */ @Timeout(value = 300, unit = TimeUnit.SECONDS) public class TestContainerMapper { + @TempDir + private static Path dbPath; private static MiniOzoneCluster cluster = null; private static OzoneClient ozClient = null; private static ObjectStore store = null; @@ -71,14 +72,12 @@ public class TestContainerMapper { private static String bucketName = UUID.randomUUID().toString(); private static OzoneConfiguration conf; private static List keyList = new ArrayList<>(); - private static String dbPath; @BeforeAll public static void init() throws Exception { conf = new OzoneConfiguration(); - dbPath = GenericTestUtils.getRandomizedTempPath(); - conf.set(OZONE_OM_DB_DIRS, dbPath); + conf.set(OZONE_OM_DB_DIRS, dbPath.toString()); conf.set(ScmConfigKeys.OZONE_SCM_CONTAINER_SIZE, "100MB"); conf.setStorageSize(OZONE_DATANODE_RATIS_VOLUME_FREE_SPACE_MIN, 0, StorageUnit.MB); @@ -137,6 +136,5 @@ private static byte[] generateData(int size, byte val) { public static void shutdown() throws IOException { IOUtils.closeQuietly(ozClient); cluster.shutdown(); - FileUtils.deleteFully(new File(dbPath)); } } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestBucketLayoutWithOlderClient.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestBucketLayoutWithOlderClient.java index 73596781cc6..e1b2a59d78c 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestBucketLayoutWithOlderClient.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestBucketLayoutWithOlderClient.java @@ -26,6 +26,7 @@ import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; +import org.apache.hadoop.security.UserGroupInformation; import org.junit.jupiter.api.AfterAll; import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.Test; @@ -96,6 +97,9 @@ public void testCreateBucketWithOlderClient() throws Exception { OzoneManagerProtocolProtos.StorageTypeProto.DISK) .build()) .build()).build(); + createBucketReq = createBucketReq.toBuilder() + .setUserInfo(OzoneManagerProtocolProtos.UserInfo.newBuilder() + .setUserName(UserGroupInformation.getCurrentUser().getShortUserName()).build()).build(); OzoneManagerProtocolProtos.OMResponse omResponse = cluster.getOzoneManager().getOmServerProtocol() diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestKeyManagerImpl.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestKeyManagerImpl.java index efa2963842d..e9c9b946c8e 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestKeyManagerImpl.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestKeyManagerImpl.java @@ -71,6 +71,7 @@ import org.apache.hadoop.hdds.utils.db.cache.CacheValue; import org.apache.hadoop.ozone.OzoneAcl; import org.apache.hadoop.ozone.OzoneConfigKeys; +import org.apache.hadoop.ozone.client.OzoneClient; import org.apache.hadoop.ozone.om.exceptions.OMException; import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyArgs; @@ -92,11 +93,9 @@ import org.apache.hadoop.ozone.security.acl.OzoneObjInfo; import org.apache.hadoop.ozone.security.acl.RequestContext; import org.apache.hadoop.security.UserGroupInformation; -import org.apache.ozone.test.GenericTestUtils; import org.apache.hadoop.util.Time; import com.google.common.collect.Sets; -import org.apache.commons.io.FileUtils; import org.apache.commons.lang3.RandomStringUtils; import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.ONE; @@ -122,6 +121,7 @@ import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.Timeout; +import org.junit.jupiter.api.io.TempDir; import org.junit.jupiter.params.ParameterizedTest; import org.junit.jupiter.params.provider.Arguments; import org.junit.jupiter.params.provider.MethodSource; @@ -154,7 +154,8 @@ */ @Timeout(300) public class TestKeyManagerImpl { - + @TempDir + private static File dir; private static PrefixManager prefixManager; private static KeyManagerImpl keyManager; private static NodeManager nodeManager; @@ -163,7 +164,6 @@ public class TestKeyManagerImpl { private static StorageContainerLocationProtocol mockScmContainerClient; private static OzoneConfiguration conf; private static OMMetadataManager metadataManager; - private static File dir; private static long scmBlockSize; private static final String KEY_NAME = "key1"; private static final String BUCKET_NAME = "bucket1"; @@ -171,13 +171,13 @@ public class TestKeyManagerImpl { private static final String VERSIONED_BUCKET_NAME = "versionedbucket1"; private static final String VOLUME_NAME = "vol1"; private static OzoneManagerProtocol writeClient; + private static OzoneClient rpcClient; private static OzoneManager om; @BeforeAll public static void setUp() throws Exception { ExitUtils.disableSystemExit(); conf = new OzoneConfiguration(); - dir = GenericTestUtils.getRandomizedTestDir(); conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, dir.toString()); conf.set(OzoneConfigKeys.OZONE_NETWORK_TOPOLOGY_AWARE_READ_KEY, "true"); final String rootPath = String.format("%s://%s/", OZONE_OFS_URI_SCHEME, @@ -219,6 +219,7 @@ public static void setUp() throws Exception { keyManager = (KeyManagerImpl)omTestManagers.getKeyManager(); prefixManager = omTestManagers.getPrefixManager(); writeClient = omTestManagers.getWriteClient(); + rpcClient = omTestManagers.getRpcClient(); mockContainerClient(); @@ -235,10 +236,11 @@ public static void setUp() throws Exception { @AfterAll public static void cleanup() throws Exception { + writeClient.close(); + rpcClient.close(); scm.stop(); scm.join(); om.stop(); - FileUtils.deleteDirectory(dir); } @BeforeEach @@ -252,10 +254,11 @@ public void init() throws Exception { public void cleanupTest() throws IOException { mockContainerClient(); org.apache.hadoop.fs.Path volumePath = new org.apache.hadoop.fs.Path(OZONE_URI_DELIMITER, VOLUME_NAME); - FileSystem fs = FileSystem.get(conf); - fs.delete(new org.apache.hadoop.fs.Path(volumePath, BUCKET_NAME), true); - fs.delete(new org.apache.hadoop.fs.Path(volumePath, BUCKET2_NAME), true); - fs.delete(new org.apache.hadoop.fs.Path(volumePath, VERSIONED_BUCKET_NAME), true); + try (FileSystem fs = FileSystem.get(conf)) { + fs.delete(new org.apache.hadoop.fs.Path(volumePath, BUCKET_NAME), true); + fs.delete(new org.apache.hadoop.fs.Path(volumePath, BUCKET2_NAME), true); + fs.delete(new org.apache.hadoop.fs.Path(volumePath, VERSIONED_BUCKET_NAME), true); + } } private static void mockContainerClient() { @@ -334,8 +337,7 @@ public void openKeyFailureInSafeMode() throws Exception { .setKeyName(KEY_NAME) .setDataSize(1000) .setReplicationConfig(RatisReplicationConfig.getInstance(THREE)) - .setAcls(OzoneAclUtil.getAclList(ugi.getUserName(), ugi.getGroupNames(), - ALL, ALL)) + .setAcls(OzoneAclUtil.getAclList(ugi, ALL, ALL)) .build(); OMException omException = assertThrows(OMException.class, () -> writeClient.openKey(keyArgs)); @@ -1692,8 +1694,7 @@ private OmKeyArgs.Builder createBuilder(String bucketName) .setDataSize(0) .setReplicationConfig( StandaloneReplicationConfig.getInstance(ONE)) - .setAcls(OzoneAclUtil.getAclList(ugi.getUserName(), ugi.getGroupNames(), - ALL, ALL)) + .setAcls(OzoneAclUtil.getAclList(ugi, ALL, ALL)) .setVolumeName(VOLUME_NAME) .setOwnerName(ugi.getShortUserName()); } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestListKeys.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestListKeys.java index 204c0ee6681..7e9744d0123 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestListKeys.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestListKeys.java @@ -48,6 +48,7 @@ import static com.google.common.collect.Lists.newLinkedList; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_CLIENT_LIST_CACHE_SIZE; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_FS_ITERATE_BATCH_SIZE; +import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_SERVER_LIST_MAX_SIZE; import static org.junit.jupiter.params.provider.Arguments.of; import static org.junit.jupiter.api.Assertions.assertEquals; @@ -80,6 +81,7 @@ public static void init() throws Exception { // Set the number of keys to be processed during batch operate. conf.setInt(OZONE_FS_ITERATE_BATCH_SIZE, 3); conf.setInt(OZONE_CLIENT_LIST_CACHE_SIZE, 3); + conf.setInt(OZONE_OM_SERVER_LIST_MAX_SIZE, 2); cluster = MiniOzoneCluster.newBuilder(conf).build(); cluster.waitForClusterToBeReady(); client = cluster.newClient(); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestListKeysWithFSO.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestListKeysWithFSO.java index 11594f3ef11..0829c8fc19a 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestListKeysWithFSO.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestListKeysWithFSO.java @@ -47,6 +47,7 @@ import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_CLIENT_LIST_CACHE_SIZE; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_FS_ITERATE_BATCH_SIZE; +import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_SERVER_LIST_MAX_SIZE; import static org.junit.jupiter.api.Assertions.assertEquals; /** @@ -81,6 +82,7 @@ public static void init() throws Exception { // Set the number of keys to be processed during batch operate. conf.setInt(OZONE_FS_ITERATE_BATCH_SIZE, 3); conf.setInt(OZONE_CLIENT_LIST_CACHE_SIZE, 3); + conf.setInt(OZONE_OM_SERVER_LIST_MAX_SIZE, 2); cluster = MiniOzoneCluster.newBuilder(conf).build(); cluster.waitForClusterToBeReady(); client = cluster.newClient(); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOMRatisSnapshots.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOMRatisSnapshots.java index bd5046bfc0b..f7394729898 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOMRatisSnapshots.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOMRatisSnapshots.java @@ -409,9 +409,9 @@ public void testInstallIncrementalSnapshot(@TempDir Path tempDir) // Do some transactions so that the log index increases List firstKeys = writeKeysToIncreaseLogIndex(leaderRatisServer, - 80); + 100); - SnapshotInfo snapshotInfo2 = createOzoneSnapshot(leaderOM, "snap80"); + SnapshotInfo snapshotInfo2 = createOzoneSnapshot(leaderOM, "snap100"); followerOM.getConfiguration().setInt( OZONE_SNAPSHOT_SST_FILTERING_SERVICE_INTERVAL, KeyManagerImpl.DISABLE_VALUE); @@ -424,9 +424,9 @@ public void testInstallIncrementalSnapshot(@TempDir Path tempDir) }, 1000, 30_000); // Get two incremental tarballs, adding new keys/snapshot for each. - IncrementData firstIncrement = getNextIncrementalTarball(160, 2, leaderOM, + IncrementData firstIncrement = getNextIncrementalTarball(200, 2, leaderOM, leaderRatisServer, faultInjector, followerOM, tempDir); - IncrementData secondIncrement = getNextIncrementalTarball(240, 3, leaderOM, + IncrementData secondIncrement = getNextIncrementalTarball(300, 3, leaderOM, leaderRatisServer, faultInjector, followerOM, tempDir); // Resume the follower thread, it would download the incremental snapshot. @@ -501,10 +501,10 @@ public void testInstallIncrementalSnapshot(@TempDir Path tempDir) assertNotNull(filesInCandidate); assertEquals(0, filesInCandidate.length); - checkSnapshot(leaderOM, followerOM, "snap80", firstKeys, snapshotInfo2); - checkSnapshot(leaderOM, followerOM, "snap160", firstIncrement.getKeys(), + checkSnapshot(leaderOM, followerOM, "snap100", firstKeys, snapshotInfo2); + checkSnapshot(leaderOM, followerOM, "snap200", firstIncrement.getKeys(), firstIncrement.getSnapshotInfo()); - checkSnapshot(leaderOM, followerOM, "snap240", secondIncrement.getKeys(), + checkSnapshot(leaderOM, followerOM, "snap300", secondIncrement.getKeys(), secondIncrement.getSnapshotInfo()); assertEquals( followerOM.getOmSnapshotProvider().getInitCount(), 2, @@ -618,7 +618,7 @@ public void testInstallIncrementalSnapshotWithFailure() throws Exception { // Do some transactions so that the log index increases List firstKeys = writeKeysToIncreaseLogIndex(leaderRatisServer, - 80); + 100); // Start the inactive OM. Checkpoint installation will happen spontaneously. cluster.startInactiveOM(followerNodeId); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmContainerLocationCache.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmContainerLocationCache.java index f25bb47f0db..b48a7067cad 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmContainerLocationCache.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmContainerLocationCache.java @@ -19,7 +19,6 @@ package org.apache.hadoop.ozone.om; import com.google.common.collect.ImmutableMap; -import org.apache.commons.io.FileUtils; import org.apache.commons.io.IOUtils; import org.apache.hadoop.hdds.HddsConfigKeys; import org.apache.hadoop.hdds.client.ContainerBlockID; @@ -63,6 +62,7 @@ import org.apache.hadoop.ozone.OzoneConfigKeys; import org.apache.hadoop.ozone.client.ObjectStore; import org.apache.hadoop.ozone.client.OzoneBucket; +import org.apache.hadoop.ozone.client.OzoneClient; import org.apache.hadoop.ozone.client.OzoneKeyDetails; import org.apache.hadoop.ozone.client.io.OzoneOutputStream; import org.apache.hadoop.ozone.client.rpc.RpcClient; @@ -71,7 +71,6 @@ import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs; import org.apache.hadoop.ozone.om.helpers.ServiceInfoEx; import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; -import org.apache.ozone.test.GenericTestUtils; import org.apache.ratis.thirdparty.com.google.protobuf.ByteString; import org.apache.ratis.thirdparty.io.grpc.Status; import org.apache.ratis.thirdparty.io.grpc.StatusException; @@ -83,6 +82,7 @@ import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.Timeout; +import org.junit.jupiter.api.io.TempDir; import org.junit.jupiter.params.ParameterizedTest; import org.junit.jupiter.params.provider.Arguments; import org.junit.jupiter.params.provider.MethodSource; @@ -136,12 +136,12 @@ */ @Timeout(300) public class TestOmContainerLocationCache { - + @TempDir + private static File dir; private static ScmBlockLocationProtocol mockScmBlockLocationProtocol; private static StorageContainerLocationProtocol mockScmContainerClient; private static OzoneConfiguration conf; private static OMMetadataManager metadataManager; - private static File dir; private static final String BUCKET_NAME = "bucket1"; private static final String VERSIONED_BUCKET_NAME = "versionedBucket1"; private static final String VOLUME_NAME = "vol1"; @@ -162,14 +162,13 @@ public class TestOmContainerLocationCache { private static final DatanodeDetails DN5 = MockDatanodeDetails.createDatanodeDetails(UUID.randomUUID()); private static final AtomicLong CONTAINER_ID = new AtomicLong(1); - + private static OzoneClient ozoneClient; @BeforeAll public static void setUp() throws Exception { ExitUtils.disableSystemExit(); conf = new OzoneConfiguration(); - dir = GenericTestUtils.getRandomizedTestDir(); conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, dir.toString()); conf.set(OzoneConfigKeys.OZONE_NETWORK_TOPOLOGY_AWARE_READ_KEY, "true"); conf.setLong(OZONE_KEY_PREALLOCATION_BLOCKS_MAX, 10); @@ -184,6 +183,7 @@ public static void setUp() throws Exception { OmTestManagers omTestManagers = new OmTestManagers(conf, mockScmBlockLocationProtocol, mockScmContainerClient); om = omTestManagers.getOzoneManager(); + ozoneClient = omTestManagers.getRpcClient(); metadataManager = omTestManagers.getMetadataManager(); rpcClient = new RpcClient(conf, null) { @@ -204,8 +204,8 @@ protected XceiverClientFactory createXceiverClientFactory( @AfterAll public static void cleanup() throws Exception { + ozoneClient.close(); om.stop(); - FileUtils.deleteDirectory(dir); } private static XceiverClientManager mockDataNodeClientFactory() diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmMetrics.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmMetrics.java index 0481ee4a867..eafa193ae2b 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmMetrics.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmMetrics.java @@ -340,6 +340,9 @@ public void testKeyOps() throws Exception { long initialNumKeyLists = getLongCounter("NumKeyLists", omMetrics); long initialNumKeys = getLongCounter("NumKeys", omMetrics); long initialNumInitiateMultipartUploads = getLongCounter("NumInitiateMultipartUploads", omMetrics); + long initialNumGetObjectTagging = getLongCounter("NumGetObjectTagging", omMetrics); + long initialNumPutObjectTagging = getLongCounter("NumPutObjectTagging", omMetrics); + long initialNumDeleteObjectTagging = getLongCounter("NumDeleteObjectTagging", omMetrics); long initialEcKeyCreateTotal = getLongCounter("EcKeyCreateTotal", omMetrics); long initialNumKeyAllocateFails = getLongCounter("NumKeyAllocateFails", omMetrics); @@ -349,6 +352,9 @@ public void testKeyOps() throws Exception { long initialNumBlockAllocationFails = getLongCounter("NumBlockAllocationFails", omMetrics); long initialNumKeyListFails = getLongCounter("NumKeyListFails", omMetrics); long initialEcKeyCreateFailsTotal = getLongCounter("EcKeyCreateFailsTotal", omMetrics); + long initialNumGetObjectTaggingFails = getLongCounter("NumGetObjectTaggingFails", omMetrics); + long initialNumPutObjectTaggingFails = getLongCounter("NumPutObjectTaggingFails", omMetrics); + long initialNumDeleteObjectTaggingFails = getLongCounter("NumDeleteObjectTaggingFails", omMetrics); // see HDDS-10078 for making this work with FILE_SYSTEM_OPTIMIZED layout TestDataUtil.createVolumeAndBucket(client, volumeName, bucketName, BucketLayout.LEGACY); @@ -358,13 +364,16 @@ public void testKeyOps() throws Exception { omMetrics = getMetrics("OMMetrics"); - assertEquals(initialNumKeyOps + 7, getLongCounter("NumKeyOps", omMetrics)); + assertEquals(initialNumKeyOps + 10, getLongCounter("NumKeyOps", omMetrics)); assertEquals(initialNumKeyAllocate + 1, getLongCounter("NumKeyAllocate", omMetrics)); assertEquals(initialNumKeyLookup + 1, getLongCounter("NumKeyLookup", omMetrics)); assertEquals(initialNumKeyDeletes + 1, getLongCounter("NumKeyDeletes", omMetrics)); assertEquals(initialNumKeyLists + 1, getLongCounter("NumKeyLists", omMetrics)); assertEquals(initialNumKeys, getLongCounter("NumKeys", omMetrics)); assertEquals(initialNumInitiateMultipartUploads + 1, getLongCounter("NumInitiateMultipartUploads", omMetrics)); + assertEquals(initialNumGetObjectTagging + 1, getLongCounter("NumGetObjectTagging", omMetrics)); + assertEquals(initialNumPutObjectTagging + 1, getLongCounter("NumPutObjectTagging", omMetrics)); + assertEquals(initialNumDeleteObjectTagging + 1, getLongCounter("NumDeleteObjectTagging", omMetrics)); keyArgs = createKeyArgs(volumeName, bucketName, new ECReplicationConfig("rs-3-2-1024K")); @@ -406,6 +415,7 @@ public void testKeyOps() throws Exception { doThrow(exception).when(mockKm).lookupKey(any(), any(), any()); doThrow(exception).when(mockKm).listKeys( any(), any(), any(), any(), anyInt()); + doThrow(exception).when(mockKm).getObjectTagging(any(), any()); OmMetadataReader omMetadataReader = (OmMetadataReader) ozoneManager.getOmMetadataReader().get(); HddsWhiteboxTestUtils.setInternalState( @@ -421,7 +431,7 @@ public void testKeyOps() throws Exception { doKeyOps(keyArgs); omMetrics = getMetrics("OMMetrics"); - assertEquals(initialNumKeyOps + 28, getLongCounter("NumKeyOps", omMetrics)); + assertEquals(initialNumKeyOps + 37, getLongCounter("NumKeyOps", omMetrics)); assertEquals(initialNumKeyAllocate + 6, getLongCounter("NumKeyAllocate", omMetrics)); assertEquals(initialNumKeyLookup + 3, getLongCounter("NumKeyLookup", omMetrics)); assertEquals(initialNumKeyDeletes + 4, getLongCounter("NumKeyDeletes", omMetrics)); @@ -435,6 +445,9 @@ public void testKeyOps() throws Exception { assertEquals(initialNumInitiateMultipartUploadFails + 1, getLongCounter( "NumInitiateMultipartUploadFails", omMetrics)); assertEquals(initialNumKeys + 2, getLongCounter("NumKeys", omMetrics)); + assertEquals(initialNumGetObjectTaggingFails + 1, getLongCounter("NumGetObjectTaggingFails", omMetrics)); + assertEquals(initialNumPutObjectTaggingFails + 1, getLongCounter("NumPutObjectTaggingFails", omMetrics)); + assertEquals(initialNumDeleteObjectTaggingFails + 1, getLongCounter("NumDeleteObjectTaggingFails", omMetrics)); keyArgs = createKeyArgs(volumeName, bucketName, new ECReplicationConfig("rs-3-2-1024K")); @@ -836,6 +849,21 @@ private void doKeyOps(OmKeyArgs keyArgs) { } catch (IOException ignored) { } + try { + writeClient.putObjectTagging(keyArgs); + } catch (IOException ignored) { + } + + try { + writeClient.getObjectTagging(keyArgs); + } catch (IOException ignored) { + } + + try { + writeClient.deleteObjectTagging(keyArgs); + } catch (IOException ignored) { + } + try { writeClient.deleteKey(keyArgs); } catch (IOException ignored) { diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerHAWithStoppedNodes.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerHAWithStoppedNodes.java index 6937c52c712..80b97f92275 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerHAWithStoppedNodes.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerHAWithStoppedNodes.java @@ -20,9 +20,12 @@ import org.apache.commons.lang3.RandomStringUtils; import org.apache.hadoop.hdds.client.ReplicationFactor; import org.apache.hadoop.hdds.client.ReplicationType; +import org.apache.hadoop.hdds.ratis.RatisHelper; import org.apache.hadoop.hdds.utils.db.Table; import org.apache.hadoop.hdds.utils.db.TableIterator; import org.apache.hadoop.hdfs.LogVerificationAppender; +import org.apache.hadoop.ipc.RPC; +import org.apache.hadoop.ipc.Server; import org.apache.hadoop.ozone.MiniOzoneHAClusterImpl; import org.apache.hadoop.ozone.OzoneConfigKeys; import org.apache.hadoop.ozone.OzoneConsts; @@ -33,17 +36,24 @@ import org.apache.hadoop.ozone.client.VolumeArgs; import org.apache.hadoop.ozone.client.io.OzoneInputStream; import org.apache.hadoop.ozone.client.io.OzoneOutputStream; +import org.apache.hadoop.ozone.om.exceptions.OMException; import org.apache.hadoop.ozone.om.ha.HadoopRpcOMFailoverProxyProvider; import org.apache.hadoop.ozone.om.ha.OMHAMetrics; import org.apache.hadoop.ozone.om.helpers.OmMultipartInfo; import org.apache.hadoop.ozone.om.helpers.OmMultipartUploadCompleteInfo; import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo; import org.apache.hadoop.ozone.om.service.KeyDeletingService; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; import org.apache.hadoop.security.UserGroupInformation; import org.apache.log4j.Logger; import org.apache.ozone.test.GenericTestUtils; import org.apache.ozone.test.tag.Flaky; +import org.apache.ratis.client.RaftClient; import org.apache.ratis.conf.RaftProperties; +import org.apache.ratis.protocol.ClientId; +import org.apache.ratis.protocol.RaftClientReply; +import org.apache.ratis.retry.RetryPolicies; +import org.apache.ratis.rpc.SupportedRpcType; import org.apache.ratis.server.RaftServerConfigKeys; import org.apache.ratis.util.TimeDuration; import org.junit.jupiter.api.AfterEach; @@ -52,7 +62,9 @@ import org.junit.jupiter.api.Order; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.TestMethodOrder; +import org.slf4j.LoggerFactory; +import java.io.IOException; import java.net.ConnectException; import java.util.HashMap; import java.util.Iterator; @@ -61,6 +73,7 @@ import java.util.Set; import java.util.TreeSet; import java.util.UUID; +import java.util.concurrent.TimeUnit; import static java.nio.charset.StandardCharsets.UTF_8; import static org.apache.hadoop.ozone.MiniOzoneHAClusterImpl.NODE_FAILURE_TIMEOUT; @@ -71,16 +84,17 @@ import static org.junit.jupiter.api.Assertions.assertNotEquals; import static org.junit.jupiter.api.Assertions.assertNotNull; import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.api.Assertions.assertTrue; import static org.junit.jupiter.api.Assertions.fail; /** * Ozone Manager HA tests that stop/restart one or more OM nodes. * @see TestOzoneManagerHAWithAllRunning */ -@Flaky("HDDS-11352") @TestMethodOrder(MethodOrderer.OrderAnnotation.class) public class TestOzoneManagerHAWithStoppedNodes extends TestOzoneManagerHA { - + private static final org.slf4j.Logger LOG = LoggerFactory.getLogger( + TestOzoneManagerHAWithStoppedNodes.class); /** * After restarting OMs we need to wait * for a leader to be elected and ready. @@ -594,6 +608,97 @@ void testListVolumes() throws Exception { objectStore.listVolumesByUser(userName, prefix, "")); } + @Test + void testRetryCacheWithDownedOM() throws Exception { + // Create a volume, a bucket and a key + String userName = "user" + RandomStringUtils.randomNumeric(5); + String adminName = "admin" + RandomStringUtils.randomNumeric(5); + String volumeName = "volume" + RandomStringUtils.randomNumeric(5); + String bucketName = UUID.randomUUID().toString(); + String keyTo = UUID.randomUUID().toString(); + + VolumeArgs createVolumeArgs = VolumeArgs.newBuilder() + .setOwner(userName) + .setAdmin(adminName) + .build(); + getObjectStore().createVolume(volumeName, createVolumeArgs); + OzoneVolume ozoneVolume = getObjectStore().getVolume(volumeName); + ozoneVolume.createBucket(bucketName); + OzoneBucket ozoneBucket = ozoneVolume.getBucket(bucketName); + String keyFrom = createKey(ozoneBucket); + + int callId = 10; + ClientId clientId = ClientId.randomId(); + MiniOzoneHAClusterImpl cluster = getCluster(); + OzoneManager omLeader = cluster.getOMLeader(); + + OzoneManagerProtocolProtos.KeyArgs keyArgs = + OzoneManagerProtocolProtos.KeyArgs.newBuilder() + .setVolumeName(volumeName) + .setBucketName(bucketName) + .setKeyName(keyFrom) + .build(); + OzoneManagerProtocolProtos.RenameKeyRequest renameKeyRequest + = OzoneManagerProtocolProtos.RenameKeyRequest.newBuilder() + .setKeyArgs(keyArgs) + .setToKeyName(keyTo) + .build(); + OzoneManagerProtocolProtos.OMRequest omRequest = + OzoneManagerProtocolProtos.OMRequest.newBuilder() + .setCmdType(OzoneManagerProtocolProtos.Type.RenameKey) + .setRenameKeyRequest(renameKeyRequest) + .setClientId(clientId.toString()) + .build(); + // set up the current call so that OM Ratis Server doesn't complain. + Server.getCurCall().set(new Server.Call(callId, 0, null, null, + RPC.RpcKind.RPC_BUILTIN, clientId.toByteString().toByteArray())); + // Submit rename request to OM + OzoneManagerProtocolProtos.OMResponse omResponse = + omLeader.getOmServerProtocol().processRequest(omRequest); + assertTrue(omResponse.getSuccess()); + + // Make one of the follower OM the leader, and shutdown the current leader. + OzoneManager newLeader = cluster.getOzoneManagersList().stream().filter( + om -> !om.getOMNodeId().equals(omLeader.getOMNodeId())).findFirst().get(); + transferLeader(omLeader, newLeader); + cluster.shutdownOzoneManager(omLeader); + + // Once the rename completes, the source key should no longer exist + // and the destination key should exist. + OMException omException = assertThrows(OMException.class, + () -> ozoneBucket.getKey(keyFrom)); + assertEquals(omException.getResult(), OMException.ResultCodes.KEY_NOT_FOUND); + assertTrue(ozoneBucket.getKey(keyTo).isFile()); + + // Submit rename request to OM again. The request is cached so it will succeed. + omResponse = newLeader.getOmServerProtocol().processRequest(omRequest); + assertTrue(omResponse.getSuccess()); + } + + private void transferLeader(OzoneManager omLeader, OzoneManager newLeader) throws IOException { + LOG.info("Transfer leadership from {}(raft id {}) to {}(raft id {})", + omLeader.getOMNodeId(), omLeader.getOmRatisServer().getRaftPeerId(), + newLeader.getOMNodeId(), newLeader.getOmRatisServer().getRaftPeerId()); + + final SupportedRpcType rpc = SupportedRpcType.GRPC; + final RaftProperties properties = RatisHelper.newRaftProperties(rpc); + + // For now not making anything configurable, RaftClient is only used + // in SCM for DB updates of sub-ca certs go via Ratis. + RaftClient.Builder builder = RaftClient.newBuilder() + .setRaftGroup(omLeader.getOmRatisServer().getRaftGroup()) + .setLeaderId(null) + .setProperties(properties) + .setRetryPolicy( + RetryPolicies.retryUpToMaximumCountWithFixedSleep(120, + TimeDuration.valueOf(500, TimeUnit.MILLISECONDS))); + try (RaftClient raftClient = builder.build()) { + RaftClientReply reply = raftClient.admin().transferLeadership(newLeader.getOmRatisServer() + .getRaftPeerId(), 10 * 1000); + assertTrue(reply.isSuccess()); + } + } + private void validateVolumesList(Set expectedVolumes, Iterator volumeIterator) { int expectedCount = 0; diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerListVolumesSecure.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerListVolumesSecure.java index 72f1c3374b2..6c7cd89109e 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerListVolumesSecure.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerListVolumesSecure.java @@ -50,6 +50,7 @@ import org.apache.hadoop.hdds.security.x509.certificate.client.CertificateClientTestImpl; import org.apache.hadoop.minikdc.MiniKdc; import org.apache.hadoop.ozone.OzoneAcl; +import org.apache.hadoop.ozone.client.SecretKeyTestClient; import org.apache.hadoop.ozone.om.exceptions.OMException; import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs; import org.apache.hadoop.ozone.om.protocolPB.OmTransportFactory; @@ -201,6 +202,7 @@ private void setupEnvironment(boolean aclEnabled, om.setScmTopologyClient(new ScmTopologyClient( new ScmBlockLocationTestingClient(null, null, 0))); om.setCertClient(new CertificateClientTestImpl(conf)); + om.setSecretKeyClient(new SecretKeyTestClient()); om.start(); // Get OM client diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestRecursiveAclWithFSO.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestRecursiveAclWithFSO.java index 6f86fcba70e..1a2e61b8800 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestRecursiveAclWithFSO.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestRecursiveAclWithFSO.java @@ -32,6 +32,7 @@ import org.apache.hadoop.ozone.client.protocol.ClientProtocol; import org.apache.hadoop.ozone.om.exceptions.OMException; import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; +import org.apache.hadoop.ozone.security.acl.OzoneAclConfig; import org.apache.hadoop.ozone.security.acl.OzoneObj; import org.apache.hadoop.ozone.security.acl.OzoneObjInfo; import org.apache.hadoop.security.UserGroupInformation; @@ -45,11 +46,13 @@ import java.util.ArrayList; import java.util.Arrays; import java.util.List; +import java.util.UUID; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ACL_AUTHORIZER_CLASS; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ACL_AUTHORIZER_CLASS_NATIVE; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ACL_ENABLED; import static org.apache.hadoop.ozone.security.acl.OzoneObj.StoreType.OZONE; +import static org.junit.jupiter.api.Assertions.assertArrayEquals; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertThrows; import static org.junit.jupiter.api.Assertions.assertTrue; @@ -68,6 +71,8 @@ public class TestRecursiveAclWithFSO { .createUserForTesting("user1", new String[] {"test1"}); private final UserGroupInformation user2 = UserGroupInformation .createUserForTesting("user2", new String[] {"test2"}); + private final UserGroupInformation user3 = UserGroupInformation + .createUserForTesting("user3", new String[] {"test3, test4"}); @BeforeEach public void init() throws Exception { @@ -213,6 +218,70 @@ public void testKeyDeleteAndRenameWithoutPermission() throws Exception { } } + @Test + public void testKeyDefaultACL() throws Exception { + String volumeName = "vol1"; + try (OzoneClient client = cluster.newClient()) { + ObjectStore objectStore = client.getObjectStore(); + objectStore.createVolume(volumeName); + addVolumeAcl(objectStore, volumeName, "world::a"); + + // verify volume ACLs. This volume will have 2 default ACLs, plus above one added + OzoneObj obj = OzoneObjInfo.Builder.newBuilder().setVolumeName(volumeName) + .setResType(OzoneObj.ResourceType.VOLUME) + .setStoreType(OZONE).build(); + List acls = objectStore.getAcl(obj); + assertEquals(3, acls.size()); + assertEquals(adminUser.getShortUserName(), acls.get(0).getName()); + OzoneAclConfig aclConfig = cluster.getConf().getObject(OzoneAclConfig.class); + assertArrayEquals(aclConfig.getUserDefaultRights(), acls.get(0).getAclList().toArray()); + assertEquals(adminUser.getPrimaryGroupName(), acls.get(1).getName()); + assertArrayEquals(aclConfig.getGroupDefaultRights(), acls.get(1).getAclList().toArray()); + assertEquals("WORLD", acls.get(2).getName()); + assertArrayEquals(aclConfig.getUserDefaultRights(), acls.get(2).getAclList().toArray()); + } + + // set LoginUser as user3 + UserGroupInformation.setLoginUser(user3); + try (OzoneClient client = cluster.newClient()) { + ObjectStore objectStore = client.getObjectStore(); + OzoneVolume volume = objectStore.getVolume(volumeName); + BucketArgs omBucketArgs = + BucketArgs.newBuilder().setStorageType(StorageType.DISK).build(); + String bucketName = "bucket"; + volume.createBucket(bucketName, omBucketArgs); + OzoneBucket ozoneBucket = volume.getBucket(bucketName); + + // verify bucket default ACLs + OzoneObj obj = OzoneObjInfo.Builder.newBuilder().setVolumeName(volume.getName()) + .setBucketName(ozoneBucket.getName()).setResType(OzoneObj.ResourceType.BUCKET) + .setStoreType(OZONE).build(); + List acls = objectStore.getAcl(obj); + assertEquals(2, acls.size()); + assertEquals(user3.getShortUserName(), acls.get(0).getName()); + OzoneAclConfig aclConfig = cluster.getConf().getObject(OzoneAclConfig.class); + assertArrayEquals(aclConfig.getUserDefaultRights(), acls.get(0).getAclList().toArray()); + assertEquals(user3.getPrimaryGroupName(), acls.get(1).getName()); + assertArrayEquals(aclConfig.getGroupDefaultRights(), acls.get(1).getAclList().toArray()); + + // verify key default ACLs + int length = 10; + byte[] input = new byte[length]; + Arrays.fill(input, (byte) 96); + String keyName = UUID.randomUUID().toString(); + createKey(ozoneBucket, keyName, length, input); + obj = OzoneObjInfo.Builder.newBuilder().setVolumeName(volume.getName()) + .setBucketName(ozoneBucket.getName()).setKeyName(keyName) + .setResType(OzoneObj.ResourceType.KEY).setStoreType(OZONE).build(); + acls = objectStore.getAcl(obj); + assertEquals(2, acls.size()); + assertEquals(user3.getShortUserName(), acls.get(0).getName()); + assertArrayEquals(aclConfig.getUserDefaultRights(), acls.get(0).getAclList().toArray()); + assertEquals(user3.getPrimaryGroupName(), acls.get(1).getName()); + assertArrayEquals(aclConfig.getGroupDefaultRights(), acls.get(1).getAclList().toArray()); + } + } + private void removeAclsFromKey(ObjectStore objectStore, OzoneBucket ozoneBucket, String key) throws IOException { OzoneObj ozoneObj = OzoneObjInfo.Builder.newBuilder().setKeyName(key) @@ -271,6 +340,16 @@ private void setVolumeAcl(ObjectStore objectStore, String volumeName, assertTrue(objectStore.setAcl(obj, OzoneAcl.parseAcls(aclString))); } + /** + * Helper function to add volume ACL. + */ + private void addVolumeAcl(ObjectStore objectStore, String volumeName, + String aclString) throws IOException { + OzoneObj obj = OzoneObjInfo.Builder.newBuilder().setVolumeName(volumeName) + .setResType(OzoneObj.ResourceType.VOLUME).setStoreType(OZONE).build(); + assertTrue(objectStore.addAcl(obj, OzoneAcl.parseAcl(aclString))); + } + /** * Helper function to set bucket ACL. */ diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/service/TestRangerBGSyncService.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/service/TestRangerBGSyncService.java index a173bd9222e..4bdc29d6146 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/service/TestRangerBGSyncService.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/service/TestRangerBGSyncService.java @@ -65,7 +65,6 @@ import java.util.HashSet; import java.util.List; import java.util.Set; -import java.util.UUID; import java.util.concurrent.TimeUnit; import static org.apache.hadoop.hdds.scm.HddsTestUtils.mockRemoteUser; @@ -106,6 +105,8 @@ public class TestRangerBGSyncService { @TempDir private Path folder; + @TempDir + private String path; private MultiTenantAccessController accessController; private OMRangerBGSyncService bgSync; @@ -181,8 +182,6 @@ public void setUp() throws IOException { // Run as alice, so that Server.getRemoteUser() won't return null. mockRemoteUser(ugiAlice); - String omID = UUID.randomUUID().toString(); - final String path = GenericTestUtils.getTempPath(omID); Path metaDirPath = Paths.get(path, "om-meta"); conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, metaDirPath.toString()); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshot.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshot.java index 9a6bca29b88..0d93436b0e5 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshot.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshot.java @@ -20,6 +20,7 @@ import java.net.URI; import java.net.URISyntaxException; +import java.util.HashMap; import java.util.List; import com.google.common.collect.Lists; @@ -100,6 +101,7 @@ import java.nio.charset.StandardCharsets; import java.util.Collections; import java.util.Iterator; +import java.util.Map; import java.util.Set; import java.util.UUID; import java.util.concurrent.TimeUnit; @@ -125,8 +127,10 @@ import static org.apache.hadoop.ozone.om.OmUpgradeConfig.ConfigStrings.OZONE_OM_INIT_DEFAULT_LAYOUT_VERSION; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.BUCKET_NOT_FOUND; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.CONTAINS_SNAPSHOT; +import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.FILE_NOT_FOUND; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.KEY_NOT_FOUND; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.NOT_SUPPORTED_OPERATION_PRIOR_FINALIZATION; +import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.VOLUME_NOT_FOUND; import static org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLIdentityType.USER; import static org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLType.WRITE; import static org.apache.hadoop.ozone.snapshot.CancelSnapshotDiffResponse.CancelMessage.CANCEL_ALREADY_CANCELLED_JOB; @@ -182,17 +186,21 @@ public abstract class TestOmSnapshot { private final boolean forceFullSnapshotDiff; private final boolean disableNativeDiff; private final AtomicInteger counter; + private final boolean createLinkedBucket; + private final Map linkedBuckets = new HashMap<>(); public TestOmSnapshot(BucketLayout newBucketLayout, boolean newEnableFileSystemPaths, boolean forceFullSnapDiff, - boolean disableNativeDiff) + boolean disableNativeDiff, + boolean createLinkedBucket) throws Exception { this.enabledFileSystemPaths = newEnableFileSystemPaths; this.bucketLayout = newBucketLayout; this.forceFullSnapshotDiff = forceFullSnapDiff; this.disableNativeDiff = disableNativeDiff; this.counter = new AtomicInteger(); + this.createLinkedBucket = createLinkedBucket; init(); } @@ -218,7 +226,10 @@ private void init() throws Exception { cluster.waitForClusterToBeReady(); client = cluster.newClient(); // create a volume and a bucket to be used by OzoneFileSystem - ozoneBucket = TestDataUtil.createVolumeAndBucket(client, bucketLayout); + ozoneBucket = TestDataUtil.createVolumeAndBucket(client, bucketLayout, createLinkedBucket); + if (createLinkedBucket) { + this.linkedBuckets.put(ozoneBucket.getName(), ozoneBucket.getSourceBucket()); + } volumeName = ozoneBucket.getVolumeName(); bucketName = ozoneBucket.getName(); ozoneManager = cluster.getOzoneManager(); @@ -232,6 +243,17 @@ private void init() throws Exception { finalizeOMUpgrade(); } + private void createBucket(OzoneVolume volume, String bucketVal) throws IOException { + if (createLinkedBucket) { + String sourceBucketName = linkedBuckets.computeIfAbsent(bucketVal, (k) -> bucketVal + counter.incrementAndGet()); + volume.createBucket(sourceBucketName); + TestDataUtil.createLinkedBucket(client, volume.getName(), sourceBucketName, bucketVal); + this.linkedBuckets.put(bucketVal, sourceBucketName); + } else { + volume.createBucket(bucketVal); + } + } + private void stopKeyManager() throws IOException { KeyManagerImpl keyManager = (KeyManagerImpl) HddsWhiteboxTestUtils .getInternalState(ozoneManager, "keyManager"); @@ -319,10 +341,10 @@ public void testListKey() throws Exception { store.createVolume(volumeB); OzoneVolume volA = store.getVolume(volumeA); OzoneVolume volB = store.getVolume(volumeB); - volA.createBucket(bucketA); - volA.createBucket(bucketB); - volB.createBucket(bucketA); - volB.createBucket(bucketB); + createBucket(volA, bucketA); + createBucket(volA, bucketB); + createBucket(volB, bucketA); + createBucket(volB, bucketB); OzoneBucket volAbucketA = volA.getBucket(bucketA); OzoneBucket volAbucketB = volA.getBucket(bucketB); OzoneBucket volBbucketA = volB.getBucket(bucketA); @@ -401,7 +423,7 @@ public void testListKeyOnEmptyBucket() String bucket = "buc-" + counter.incrementAndGet(); store.createVolume(volume); OzoneVolume vol = store.getVolume(volume); - vol.createBucket(bucket); + createBucket(vol, bucket); String snapshotKeyPrefix = createSnapshot(volume, bucket); OzoneBucket buc = vol.getBucket(bucket); Iterator keys = buc.listKeys(snapshotKeyPrefix); @@ -478,7 +500,7 @@ public void testListDeleteKey() throws Exception { String bucket = "buc-" + counter.incrementAndGet(); store.createVolume(volume); OzoneVolume vol = store.getVolume(volume); - vol.createBucket(bucket); + createBucket(vol, bucket); OzoneBucket volBucket = vol.getBucket(bucket); String key = "key-"; @@ -503,7 +525,7 @@ public void testListAddNewKey() throws Exception { String bucket = "buc-" + counter.incrementAndGet(); store.createVolume(volume); OzoneVolume vol = store.getVolume(volume); - vol.createBucket(bucket); + createBucket(vol, bucket); OzoneBucket bucket1 = vol.getBucket(bucket); String key1 = "key-1-"; @@ -553,7 +575,7 @@ public void testCreateSnapshotMissingMandatoryParams() throws Exception { String bucket = "buck-" + counter.incrementAndGet(); store.createVolume(volume); OzoneVolume volume1 = store.getVolume(volume); - volume1.createBucket(bucket); + createBucket(volume1, bucket); OzoneBucket bucket1 = volume1.getBucket(bucket); // Create Key1 and take snapshot String key1 = "key-1-"; @@ -597,11 +619,11 @@ private Set getDeletedKeysFromRocksDb( private void getOmKeyInfo(String volume, String bucket, String key) throws IOException { ResolvedBucket resolvedBucket = new ResolvedBucket(volume, bucket, - volume, bucket, "", bucketLayout); + volume, this.linkedBuckets.getOrDefault(bucket, bucket), "", bucketLayout); cluster.getOzoneManager().getKeyManager() .getKeyInfo(new OmKeyArgs.Builder() .setVolumeName(volume) - .setBucketName(bucket) + .setBucketName(this.linkedBuckets.getOrDefault(bucket, bucket)) .setKeyName(key).build(), resolvedBucket, null); } @@ -621,7 +643,7 @@ public void testSnapDiffHandlingReclaimWithLatestUse() throws Exception { String testBucketName = "bucket1"; store.createVolume(testVolumeName); OzoneVolume volume = store.getVolume(testVolumeName); - volume.createBucket(testBucketName); + createBucket(volume, testBucketName); OzoneBucket bucket = volume.getBucket(testBucketName); String key1 = "k1"; key1 = createFileKeyWithPrefix(bucket, key1); @@ -659,7 +681,7 @@ public void testSnapDiffHandlingReclaimWithPreviousUse() throws Exception { String testBucketName = "bucket1"; store.createVolume(testVolumeName); OzoneVolume volume = store.getVolume(testVolumeName); - volume.createBucket(testBucketName); + createBucket(volume, testBucketName); OzoneBucket bucket = volume.getBucket(testBucketName); String key1 = "k1"; key1 = createFileKeyWithPrefix(bucket, key1); @@ -706,7 +728,7 @@ public void testSnapDiffReclaimWithKeyRecreation() throws Exception { String testBucketName = "bucket1"; store.createVolume(testVolumeName); OzoneVolume volume = store.getVolume(testVolumeName); - volume.createBucket(testBucketName); + createBucket(volume, testBucketName); OzoneBucket bucket = volume.getBucket(testBucketName); String key1 = "k1"; key1 = createFileKeyWithPrefix(bucket, key1); @@ -760,7 +782,7 @@ public void testSnapDiffReclaimWithKeyRename() throws Exception { String testBucketName = "bucket1"; store.createVolume(testVolumeName); OzoneVolume volume = store.getVolume(testVolumeName); - volume.createBucket(testBucketName); + createBucket(volume, testBucketName); OzoneBucket bucket = volume.getBucket(testBucketName); String key1 = "k1"; key1 = createFileKeyWithPrefix(bucket, key1); @@ -805,7 +827,7 @@ public void testSnapDiffWith2RenamesAndDelete() throws Exception { String testBucketName = "bucket" + counter.incrementAndGet(); store.createVolume(testVolumeName); OzoneVolume volume = store.getVolume(testVolumeName); - volume.createBucket(testBucketName); + createBucket(volume, testBucketName); OzoneBucket bucket = volume.getBucket(testBucketName); String key1 = "k1"; key1 = createFileKeyWithPrefix(bucket, key1); @@ -864,7 +886,7 @@ public void testSnapDiffWithKeyRenamesRecreationAndDelete() String testBucketName = "bucket1"; store.createVolume(testVolumeName); OzoneVolume volume = store.getVolume(testVolumeName); - volume.createBucket(testBucketName); + createBucket(volume, testBucketName); OzoneBucket bucket = volume.getBucket(testBucketName); String key1 = "k1"; key1 = createFileKeyWithPrefix(bucket, key1); @@ -908,7 +930,7 @@ public void testSnapDiffReclaimWithDeferredKeyDeletion() throws Exception { String testBucketName = "bucket1"; store.createVolume(testVolumeName); OzoneVolume volume = store.getVolume(testVolumeName); - volume.createBucket(testBucketName); + createBucket(volume, testBucketName); OzoneBucket bucket = volume.getBucket(testBucketName); String snap1 = "snap1"; createSnapshot(testVolumeName, testBucketName, snap1); @@ -942,7 +964,7 @@ public void testSnapDiffWithNoEffectiveRename() throws Exception { String testBucketName = "bucket1"; store.createVolume(testVolumeName); OzoneVolume volume = store.getVolume(testVolumeName); - volume.createBucket(testBucketName); + createBucket(volume, testBucketName); OzoneBucket bucket = volume.getBucket(testBucketName); String snap1 = "snap1"; String key1 = "k1"; @@ -984,7 +1006,7 @@ public void testSnapDiffWithDirectory() throws Exception { String testBucketName = "bucket1"; store.createVolume(testVolumeName); OzoneVolume volume = store.getVolume(testVolumeName); - volume.createBucket(testBucketName); + createBucket(volume, testBucketName); OzoneBucket bucket = volume.getBucket(testBucketName); String snap1 = "snap1"; String key1 = "k1"; @@ -1042,7 +1064,7 @@ public void testSnapDiffWithDirectoryDelete() throws Exception { String testBucketName = "bucket1"; store.createVolume(testVolumeName); OzoneVolume volume = store.getVolume(testVolumeName); - volume.createBucket(testBucketName); + createBucket(volume, testBucketName); OzoneBucket bucket = volume.getBucket(testBucketName); String snap1 = "snap1"; String key1 = "k1"; @@ -1078,7 +1100,7 @@ public void testSnapdiffWithObjectMetaModification() throws Exception { String testBucketName = "bucket1"; store.createVolume(testVolumeName); OzoneVolume volume = store.getVolume(testVolumeName); - volume.createBucket(testBucketName); + createBucket(volume, testBucketName); OzoneBucket bucket = volume.getBucket(testBucketName); String snap1 = "snap1"; String key1 = "k1"; @@ -1110,7 +1132,7 @@ public void testSnapdiffWithFilesystemCreate() String testBucketName = "bucket" + counter.incrementAndGet(); store.createVolume(testVolumeName); OzoneVolume volume = store.getVolume(testVolumeName); - volume.createBucket(testBucketName); + createBucket(volume, testBucketName); String rootPath = String.format("%s://%s.%s/", OzoneConsts.OZONE_URI_SCHEME, testBucketName, testVolumeName); try (FileSystem fs = FileSystem.get(new URI(rootPath), cluster.getConf())) { @@ -1153,7 +1175,7 @@ public void testSnapDiffWithFilesystemDirectoryRenameOperation() String testBucketName = "bucket" + counter.incrementAndGet(); store.createVolume(testVolumeName); OzoneVolume volume = store.getVolume(testVolumeName); - volume.createBucket(testBucketName); + createBucket(volume, testBucketName); String rootPath = String.format("%s://%s.%s/", OzoneConsts.OZONE_URI_SCHEME, testBucketName, testVolumeName); try (FileSystem fs = FileSystem.get(new URI(rootPath), cluster.getConf())) { @@ -1196,7 +1218,7 @@ public void testSnapDiffWithFilesystemDirectoryMoveOperation() String testBucketName = "bucket" + counter.incrementAndGet(); store.createVolume(testVolumeName); OzoneVolume volume = store.getVolume(testVolumeName); - volume.createBucket(testBucketName); + createBucket(volume, testBucketName); String rootPath = String.format("%s://%s.%s/", OzoneConsts.OZONE_URI_SCHEME, testBucketName, testVolumeName); try (FileSystem fs = FileSystem.get(new URI(rootPath), cluster.getConf())) { @@ -1239,8 +1261,8 @@ public void testBucketDeleteIfSnapshotExists() throws Exception { String bucket2 = "buc-" + counter.incrementAndGet(); store.createVolume(volume1); OzoneVolume volume = store.getVolume(volume1); - volume.createBucket(bucket1); - volume.createBucket(bucket2); + createBucket(volume, bucket1); + createBucket(volume, bucket2); OzoneBucket bucketWithSnapshot = volume.getBucket(bucket1); OzoneBucket bucketWithoutSnapshot = volume.getBucket(bucket2); String key = "key-"; @@ -1250,7 +1272,7 @@ public void testBucketDeleteIfSnapshotExists() throws Exception { deleteKeys(bucketWithSnapshot); deleteKeys(bucketWithoutSnapshot); OMException omException = assertThrows(OMException.class, - () -> volume.deleteBucket(bucket1)); + () -> volume.deleteBucket(linkedBuckets.getOrDefault(bucket1, bucket1))); assertEquals(CONTAINS_SNAPSHOT, omException.getResult()); // TODO: Delete snapshot then delete bucket1 when deletion is implemented // no exception for bucket without snapshot @@ -1263,7 +1285,7 @@ public void testGetSnapshotInfo() throws Exception { String bucket = "buck-" + counter.incrementAndGet(); store.createVolume(volume); OzoneVolume volume1 = store.getVolume(volume); - volume1.createBucket(bucket); + createBucket(volume1, bucket); OzoneBucket bucket1 = volume1.getBucket(bucket); createFileKey(bucket1, "key-1"); @@ -1278,12 +1300,12 @@ public void testGetSnapshotInfo() throws Exception { assertEquals(snap1, snapshot1.getName()); assertEquals(volume, snapshot1.getVolumeName()); - assertEquals(bucket, snapshot1.getBucketName()); + assertEquals(linkedBuckets.getOrDefault(bucket, bucket), snapshot1.getBucketName()); OzoneSnapshot snapshot2 = store.getSnapshotInfo(volume, bucket, snap2); assertEquals(snap2, snapshot2.getName()); assertEquals(volume, snapshot2.getVolumeName()); - assertEquals(bucket, snapshot2.getBucketName()); + assertEquals(linkedBuckets.getOrDefault(bucket, bucket), snapshot2.getBucketName()); testGetSnapshotInfoFailure(null, bucket, "snapshotName", "volume can't be null or empty."); @@ -1292,9 +1314,10 @@ public void testGetSnapshotInfo() throws Exception { testGetSnapshotInfoFailure(volume, bucket, null, "snapshot name can't be null or empty."); testGetSnapshotInfoFailure(volume, bucket, "snapshotName", - "Snapshot '/" + volume + "/" + bucket + "/snapshotName' is not found."); + "Snapshot '/" + volume + "/" + linkedBuckets.getOrDefault(bucket, bucket) + + "/snapshotName' is not found."); testGetSnapshotInfoFailure(volume, "bucketName", "snapshotName", - "Snapshot '/" + volume + "/bucketName/snapshotName' is not found."); + "Bucket not found: " + volume + "/bucketName"); } public void testGetSnapshotInfoFailure(String volName, @@ -1313,7 +1336,7 @@ public void testSnapDiffWithDirRename() throws Exception { String bucket = "buck-" + counter.incrementAndGet(); store.createVolume(volume); OzoneVolume volume1 = store.getVolume(volume); - volume1.createBucket(bucket); + createBucket(volume1, bucket); OzoneBucket bucket1 = volume1.getBucket(bucket); bucket1.createDirectory("dir1"); String snap1 = "snap1"; @@ -1335,7 +1358,7 @@ public void testSnapDiff() throws Exception { String bucket = "buck-" + counter.incrementAndGet(); store.createVolume(volume); OzoneVolume volume1 = store.getVolume(volume); - volume1.createBucket(bucket); + createBucket(volume1, bucket); OzoneBucket bucket1 = volume1.getBucket(bucket); // Create Key1 and take snapshot String key1 = "key-1-"; @@ -1473,9 +1496,9 @@ public void testSnapDiffCancel() throws Exception { assertEquals(CANCELLED, response.getJobStatus()); String fromSnapshotTableKey = - SnapshotInfo.getTableKey(volumeName, bucketName, fromSnapName); + SnapshotInfo.getTableKey(volumeName, linkedBuckets.getOrDefault(bucketName, bucketName), fromSnapName); String toSnapshotTableKey = - SnapshotInfo.getTableKey(volumeName, bucketName, toSnapName); + SnapshotInfo.getTableKey(volumeName, linkedBuckets.getOrDefault(bucketName, bucketName), toSnapName); UUID fromSnapshotID = SnapshotUtils.getSnapshotInfo(ozoneManager, fromSnapshotTableKey).getSnapshotId(); UUID toSnapshotID = SnapshotUtils.getSnapshotInfo(ozoneManager, toSnapshotTableKey).getSnapshotId(); @@ -1567,7 +1590,7 @@ public void testSnapDiffNoSnapshot() throws Exception { String bucket = "buck-" + counter.incrementAndGet(); store.createVolume(volume); OzoneVolume volume1 = store.getVolume(volume); - volume1.createBucket(bucket); + createBucket(volume1, bucket); OzoneBucket bucket1 = volume1.getBucket(bucket); // Create Key1 and take snapshot String key1 = "key-1-"; @@ -1580,13 +1603,13 @@ public void testSnapDiffNoSnapshot() throws Exception { OMException omException = assertThrows(OMException.class, () -> store.snapshotDiff(volume, bucket, snap1, snap2, null, 0, false, disableNativeDiff)); - assertEquals(KEY_NOT_FOUND, omException.getResult()); + assertEquals(FILE_NOT_FOUND, omException.getResult()); // From snapshot is invalid omException = assertThrows(OMException.class, () -> store.snapshotDiff(volume, bucket, snap2, snap1, null, 0, false, disableNativeDiff)); - assertEquals(KEY_NOT_FOUND, omException.getResult()); + assertEquals(FILE_NOT_FOUND, omException.getResult()); } @Test @@ -1599,7 +1622,7 @@ public void testSnapDiffNonExistentUrl() throws Exception { String bucketb = "buck-" + counter.incrementAndGet(); store.createVolume(volumea); OzoneVolume volume1 = store.getVolume(volumea); - volume1.createBucket(bucketa); + createBucket(volume1, bucketa); OzoneBucket bucket1 = volume1.getBucket(bucketa); // Create Key1 and take 2 snapshots String key1 = "key-1-"; @@ -1612,16 +1635,16 @@ public void testSnapDiffNonExistentUrl() throws Exception { OMException omException = assertThrows(OMException.class, () -> store.snapshotDiff(volumea, bucketb, snap1, snap2, null, 0, forceFullSnapshotDiff, disableNativeDiff)); - assertEquals(KEY_NOT_FOUND, omException.getResult()); + assertEquals(BUCKET_NOT_FOUND, omException.getResult()); // Volume is nonexistent omException = assertThrows(OMException.class, () -> store.snapshotDiff(volumeb, bucketa, snap2, snap1, null, 0, forceFullSnapshotDiff, disableNativeDiff)); - assertEquals(KEY_NOT_FOUND, omException.getResult()); + assertEquals(VOLUME_NOT_FOUND, omException.getResult()); omException = assertThrows(OMException.class, () -> store.snapshotDiff(volumeb, bucketb, snap2, snap1, null, 0, forceFullSnapshotDiff, disableNativeDiff)); - assertEquals(KEY_NOT_FOUND, omException.getResult()); + assertEquals(VOLUME_NOT_FOUND, omException.getResult()); } /** @@ -1638,7 +1661,7 @@ public void testSnapDiffWithKeyOverwrite() throws Exception { String testBucketName = "bucket1"; store.createVolume(testVolumeName); OzoneVolume volume = store.getVolume(testVolumeName); - volume.createBucket(testBucketName); + createBucket(volume, testBucketName); OzoneBucket bucket = volume.getBucket(testBucketName); String key1 = "k1"; key1 = createFileKeyWithPrefix(bucket, key1); @@ -1663,7 +1686,7 @@ public void testSnapDiffMissingMandatoryParams() throws Exception { String bucket = "buck-" + counter.incrementAndGet(); store.createVolume(volume); OzoneVolume volume1 = store.getVolume(volume); - volume1.createBucket(bucket); + createBucket(volume1, bucket); OzoneBucket bucket1 = volume1.getBucket(bucket); // Create Key1 and take snapshot String key1 = "key-1-"; @@ -1677,12 +1700,12 @@ public void testSnapDiffMissingMandatoryParams() throws Exception { OMException omException = assertThrows(OMException.class, () -> store.snapshotDiff(volume, bucket, snap1, nullstr, null, 0, forceFullSnapshotDiff, disableNativeDiff)); - assertEquals(KEY_NOT_FOUND, omException.getResult()); + assertEquals(FILE_NOT_FOUND, omException.getResult()); // From snapshot is empty omException = assertThrows(OMException.class, () -> store.snapshotDiff(volume, bucket, nullstr, snap1, null, 0, forceFullSnapshotDiff, disableNativeDiff)); - assertEquals(KEY_NOT_FOUND, omException.getResult()); + assertEquals(FILE_NOT_FOUND, omException.getResult()); // Bucket is empty assertThrows(IllegalArgumentException.class, () -> store.snapshotDiff(volume, nullstr, snap1, snap2, @@ -1700,8 +1723,8 @@ public void testSnapDiffMultipleBuckets() throws Exception { String bucketName2 = "buck-" + counter.incrementAndGet(); store.createVolume(volume); OzoneVolume volume1 = store.getVolume(volume); - volume1.createBucket(bucketName1); - volume1.createBucket(bucketName2); + createBucket(volume1, bucketName1); + createBucket(volume1, bucketName2); OzoneBucket bucket1 = volume1.getBucket(bucketName1); OzoneBucket bucket2 = volume1.getBucket(bucketName2); // Create Key1 and take snapshot @@ -1726,19 +1749,18 @@ public void testListSnapshotDiffWithInvalidParameters() String volume = "vol-" + RandomStringUtils.randomNumeric(5); String bucket = "buck-" + RandomStringUtils.randomNumeric(5); - String volBucketErrorMessage = "Provided volume name " + volume + - " or bucket name " + bucket + " doesn't exist"; + String volErrorMessage = "Volume not found: " + volume; Exception volBucketEx = assertThrows(OMException.class, () -> store.listSnapshotDiffJobs(volume, bucket, "", true)); - assertEquals(volBucketErrorMessage, + assertEquals(volErrorMessage, volBucketEx.getMessage()); // Create the volume and the bucket. store.createVolume(volume); OzoneVolume ozVolume = store.getVolume(volume); - ozVolume.createBucket(bucket); + createBucket(ozVolume, bucket); assertDoesNotThrow(() -> store.listSnapshotDiffJobs(volume, bucket, "", true)); @@ -1785,8 +1807,8 @@ public void testSnapDiffWithMultipleSSTs() throws Exception { String bucketName2 = "buck2"; store.createVolume(volumeName1); OzoneVolume volume1 = store.getVolume(volumeName1); - volume1.createBucket(bucketName1); - volume1.createBucket(bucketName2); + createBucket(volume1, bucketName1); + createBucket(volume1, bucketName2); OzoneBucket bucket1 = volume1.getBucket(bucketName1); OzoneBucket bucket2 = volume1.getBucket(bucketName2); String keyPrefix = "key-"; @@ -1822,7 +1844,7 @@ public void testDeleteSnapshotTwice() throws Exception { String bucket = "buck-" + counter.incrementAndGet(); store.createVolume(volume); OzoneVolume volume1 = store.getVolume(volume); - volume1.createBucket(bucket); + createBucket(volume1, bucket); OzoneBucket bucket1 = volume1.getBucket(bucket); // Create Key1 and take snapshot String key1 = "key-1-"; @@ -1843,7 +1865,7 @@ public void testDeleteSnapshotFailure() throws Exception { String bucket = "buck-" + counter.incrementAndGet(); store.createVolume(volume); OzoneVolume volume1 = store.getVolume(volume); - volume1.createBucket(bucket); + createBucket(volume1, bucket); OzoneBucket bucket1 = volume1.getBucket(bucket); // Create Key1 and take snapshot String key1 = "key-1-"; @@ -1870,7 +1892,7 @@ public void testDeleteSnapshotMissingMandatoryParams() throws Exception { String bucket = "buck-" + counter.incrementAndGet(); store.createVolume(volume); OzoneVolume volume1 = store.getVolume(volume); - volume1.createBucket(bucket); + createBucket(volume1, bucket); OzoneBucket bucket1 = volume1.getBucket(bucket); // Create Key1 and take snapshot String key1 = "key-1-"; @@ -1895,9 +1917,10 @@ public void testSnapshotQuotaHandling() throws Exception { String bucket = "buck-" + counter.incrementAndGet(); store.createVolume(volume); OzoneVolume volume1 = store.getVolume(volume); - volume1.createBucket(bucket); + createBucket(volume1, bucket); OzoneBucket bucket1 = volume1.getBucket(bucket); - bucket1.setQuota(OzoneQuota.parseQuota("102400000", "500")); + OzoneBucket originalBucket1 = volume1.getBucket(linkedBuckets.getOrDefault(bucket, bucket)); + originalBucket1.setQuota(OzoneQuota.parseQuota("102400000", "500")); volume1.setQuota(OzoneQuota.parseQuota("204800000", "1000")); long volUsedNamespaceInitial = volume1.getUsedNamespace(); @@ -1973,7 +1996,7 @@ private String createSnapshot(String volName, String buckName, OmSnapshotManager.getSnapshotPrefix(snapshotName); SnapshotInfo snapshotInfo = ozoneManager.getMetadataManager() .getSnapshotInfoTable() - .get(SnapshotInfo.getTableKey(volName, buckName, snapshotName)); + .get(SnapshotInfo.getTableKey(volName, linkedBuckets.getOrDefault(buckName, buckName), snapshotName)); String snapshotDirName = OmSnapshotManager.getSnapshotPath(ozoneManager.getConfiguration(), snapshotInfo) + OM_KEY_PREFIX + "CURRENT"; @@ -2182,7 +2205,7 @@ public void testDayWeekMonthSnapshotCreationAndExpiration() throws Exception { String bucketA = "buc-a-" + RandomStringUtils.randomNumeric(5); store.createVolume(volumeA); OzoneVolume volA = store.getVolume(volumeA); - volA.createBucket(bucketA); + createBucket(volA, bucketA); OzoneBucket volAbucketA = volA.getBucket(bucketA); int latestDayIndex = 0; @@ -2309,7 +2332,7 @@ private void checkDayWeekMonthSnapshotData(OzoneBucket ozoneBucketClient, // Validate keys metadata in active Ozone namespace OzoneKeyDetails ozoneKeyDetails = ozoneBucketClient.getKey(keyName); assertEquals(keyName, ozoneKeyDetails.getName()); - assertEquals(ozoneBucketClient.getName(), + assertEquals(linkedBuckets.getOrDefault(ozoneBucketClient.getName(), ozoneBucketClient.getName()), ozoneKeyDetails.getBucketName()); assertEquals(ozoneBucketClient.getVolumeName(), ozoneKeyDetails.getVolumeName()); @@ -2391,7 +2414,7 @@ public void testSnapshotCompactionDag() throws Exception { store.createVolume(volume1); OzoneVolume ozoneVolume = store.getVolume(volume1); - ozoneVolume.createBucket(bucket1); + createBucket(ozoneVolume, bucket1); OzoneBucket ozoneBucket1 = ozoneVolume.getBucket(bucket1); DBStore activeDbStore = ozoneManager.getMetadataManager().getStore(); @@ -2404,7 +2427,7 @@ public void testSnapshotCompactionDag() throws Exception { createSnapshot(volume1, bucket1, "bucket1-snap1"); activeDbStore.compactDB(); - ozoneVolume.createBucket(bucket2); + createBucket(ozoneVolume, bucket2); OzoneBucket ozoneBucket2 = ozoneVolume.getBucket(bucket2); for (int i = 100; i < 200; i++) { @@ -2417,7 +2440,7 @@ public void testSnapshotCompactionDag() throws Exception { createSnapshot(volume1, bucket2, "bucket2-snap1"); activeDbStore.compactDB(); - ozoneVolume.createBucket(bucket3); + createBucket(ozoneVolume, bucket3); OzoneBucket ozoneBucket3 = ozoneVolume.getBucket(bucket3); for (int i = 200; i < 300; i++) { @@ -2496,7 +2519,7 @@ public void testSnapshotReuseSnapName() throws Exception { String bucket = "buck-" + counter.incrementAndGet(); store.createVolume(volume); OzoneVolume volume1 = store.getVolume(volume); - volume1.createBucket(bucket); + createBucket(volume1, bucket); OzoneBucket bucket1 = volume1.getBucket(bucket); // Create Key1 and take snapshot String key1 = "key-1-"; diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotFileSystem.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotFileSystem.java index 0849b900781..c43ec9c33c1 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotFileSystem.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotFileSystem.java @@ -38,6 +38,7 @@ import org.apache.hadoop.ozone.client.OzoneBucket; import org.apache.hadoop.ozone.client.OzoneClient; import org.apache.hadoop.ozone.client.OzoneKey; +import org.apache.hadoop.ozone.client.OzoneSnapshot; import org.apache.hadoop.ozone.client.OzoneVolume; import org.apache.hadoop.ozone.client.io.OzoneInputStream; import org.apache.hadoop.ozone.client.io.OzoneOutputStream; @@ -54,9 +55,9 @@ import org.apache.ozone.test.GenericTestUtils; import org.junit.jupiter.api.AfterAll; import org.junit.jupiter.api.AfterEach; -import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.TestInstance; import org.junit.jupiter.api.Timeout; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -70,9 +71,11 @@ import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; +import java.util.HashMap; import java.util.Iterator; import java.util.LinkedList; import java.util.List; +import java.util.Map; import java.util.Set; import java.util.TreeSet; import java.util.UUID; @@ -99,6 +102,7 @@ * Abstract class for OmSnapshot file system tests. */ @Timeout(120) +@TestInstance(TestInstance.Lifecycle.PER_CLASS) public abstract class TestOmSnapshotFileSystem { protected static final String VOLUME_NAME = "volume" + RandomStringUtils.randomNumeric(5); @@ -107,26 +111,29 @@ public abstract class TestOmSnapshotFileSystem { protected static final String BUCKET_NAME_LEGACY = "bucket-legacy-" + RandomStringUtils.randomNumeric(5); - private static MiniOzoneCluster cluster = null; - private static OzoneClient client; - private static ObjectStore objectStore; - private static OzoneConfiguration conf; - private static OzoneManagerProtocol writeClient; - private static OzoneManager ozoneManager; - private static String keyPrefix; + private MiniOzoneCluster cluster = null; + private OzoneClient client; + private ObjectStore objectStore; + private OzoneConfiguration conf; + private OzoneManagerProtocol writeClient; + private OzoneManager ozoneManager; + private String keyPrefix; private final String bucketName; + private final boolean createLinkedBuckets; private FileSystem fs; private OzoneFileSystem o3fs; + private Map linkedBucketMaps = new HashMap<>(); private static final Logger LOG = LoggerFactory.getLogger(TestOmSnapshot.class); - public TestOmSnapshotFileSystem(String bucketName) { + public TestOmSnapshotFileSystem(String bucketName, boolean createLinkedBuckets) throws Exception { this.bucketName = bucketName; + this.createLinkedBuckets = createLinkedBuckets; + init(); } - @BeforeAll - public static void init() throws Exception { + private void init() throws Exception { conf = new OzoneConfiguration(); conf.setBoolean(OMConfigKeys.OZONE_OM_ENABLE_FILESYSTEM_PATHS, true); cluster = MiniOzoneCluster.newBuilder(conf).build(); @@ -138,12 +145,20 @@ public static void init() throws Exception { ozoneManager = cluster.getOzoneManager(); TestDataUtil.createVolume(client, VOLUME_NAME); - TestDataUtil.createBucket(client, VOLUME_NAME, + OzoneBucket bucket = TestDataUtil.createBucket(client, VOLUME_NAME, new BucketArgs.Builder().setBucketLayout(FILE_SYSTEM_OPTIMIZED).build(), - BUCKET_NAME_FSO); - TestDataUtil.createBucket(client, VOLUME_NAME, + BUCKET_NAME_FSO, createLinkedBuckets); + if (createLinkedBuckets) { + linkedBucketMaps.put(bucket.getName(), bucket.getSourceBucket()); + } + bucket = TestDataUtil.createBucket(client, VOLUME_NAME, new BucketArgs.Builder().setBucketLayout(LEGACY).build(), - BUCKET_NAME_LEGACY); + BUCKET_NAME_LEGACY, createLinkedBuckets); + if (createLinkedBuckets) { + linkedBucketMaps.put(bucket.getName(), bucket.getSourceBucket()); + } + + // stop the deletion services so that keys can still be read KeyManagerImpl keyManager = (KeyManagerImpl) ozoneManager.getKeyManager(); @@ -163,7 +178,7 @@ public void setupFsClient() throws IOException { } @AfterAll - public static void tearDown() throws Exception { + void tearDown() { IOUtils.closeQuietly(client); if (cluster != null) { cluster.shutdown(); @@ -273,7 +288,7 @@ public void testListKeysAtDifferentLevels() throws Exception { deleteSnapshot(snapshotName); String expectedMessage = String.format("Unable to load snapshot. " + "Snapshot with table key '/%s/%s/%s' is no longer active", - VOLUME_NAME, bucketName, snapshotName); + VOLUME_NAME, linkedBucketMaps.getOrDefault(bucketName, bucketName), snapshotName); OMException exception = assertThrows(OMException.class, () -> ozoneBucket.listKeys(keyPrefix + "a/", null)); assertEquals(expectedMessage, exception.getMessage()); @@ -376,7 +391,7 @@ private void createKey(OzoneBucket ozoneBucket, String key, int length, assertEquals(inputString, new String(read, StandardCharsets.UTF_8)); } - private static void setKeyPrefix(String s) { + private void setKeyPrefix(String s) { keyPrefix = s; } @@ -493,21 +508,21 @@ public void testListStatus() throws Exception { () -> fs.listStatus(snapshotRoot1)); assertEquals(String.format("Unable to load snapshot. " + "Snapshot with table key '/%s/%s/%s' is no longer active", - VOLUME_NAME, bucketName, snapshotName1), exception1.getMessage()); + VOLUME_NAME, linkedBucketMaps.getOrDefault(bucketName, bucketName), snapshotName1), exception1.getMessage()); deleteSnapshot(snapshotName2); FileNotFoundException exception2 = assertThrows(FileNotFoundException.class, () -> fs.listStatus(snapshotRoot2)); assertEquals(String.format("Unable to load snapshot. " + "Snapshot with table key '/%s/%s/%s' is no longer active", - VOLUME_NAME, bucketName, snapshotName2), exception2.getMessage()); + VOLUME_NAME, linkedBucketMaps.getOrDefault(bucketName, bucketName), snapshotName2), exception2.getMessage()); deleteSnapshot(snapshotName3); FileNotFoundException exception3 = assertThrows(FileNotFoundException.class, () -> fs.listStatus(snapshotParent3)); assertEquals(String.format("Unable to load snapshot. " + "Snapshot with table key '/%s/%s/%s' is no longer active", - VOLUME_NAME, bucketName, snapshotName3), exception3.getMessage()); + VOLUME_NAME, linkedBucketMaps.getOrDefault(bucketName, bucketName), snapshotName3), exception3.getMessage()); } @Test @@ -542,7 +557,7 @@ public void testListStatusWithIntermediateDir() throws Exception { () -> fs.listStatus(snapshotParent)); assertEquals(String.format("Unable to load snapshot. " + "Snapshot with table key '/%s/%s/%s' is no longer active", - VOLUME_NAME, bucketName, snapshotName), exception.getMessage()); + VOLUME_NAME, linkedBucketMaps.getOrDefault(bucketName, bucketName), snapshotName), exception.getMessage()); } @Test @@ -578,7 +593,7 @@ public void testGetFileStatus() throws Exception { () -> fs.listStatus(snapshotParent)); assertEquals(String.format("Unable to load snapshot. " + "Snapshot with table key '/%s/%s/%s' is no longer active", - VOLUME_NAME, bucketName, snapshotName), exception.getMessage()); + VOLUME_NAME, linkedBucketMaps.getOrDefault(bucketName, bucketName), snapshotName), exception.getMessage()); } @Test @@ -619,7 +634,7 @@ void testReadFileFromSnapshot() throws Exception { () -> fs.open(fileInSnapshot)); assertEquals(String.format("FILE_NOT_FOUND: Unable to load snapshot. " + "Snapshot with table key '/%s/%s/%s' is no longer active", - VOLUME_NAME, bucketName, snapshotName), exception.getMessage()); + VOLUME_NAME, linkedBucketMaps.getOrDefault(bucketName, bucketName), snapshotName), exception.getMessage()); } private void createAndCommitKey(String keyName) throws IOException { @@ -669,7 +684,7 @@ public void testListStatusOnRoot() throws Exception { () -> fs.listStatus(snapshotRoot)); assertEquals(String.format("Unable to load snapshot. " + "Snapshot with table key '/%s/%s/%s' is no longer active", - VOLUME_NAME, bucketName, snapshotName), exception.getMessage()); + VOLUME_NAME, linkedBucketMaps.getOrDefault(bucketName, bucketName), snapshotName), exception.getMessage()); } /** @@ -726,7 +741,7 @@ public void testListStatusOnLargeDirectory() throws Exception { () -> fs.listStatus(snapshotRoot)); assertEquals(String.format("Unable to load snapshot. " + "Snapshot with table key '/%s/%s/%s' is no longer active", - VOLUME_NAME, bucketName, snapshotName), exception.getMessage()); + VOLUME_NAME, linkedBucketMaps.getOrDefault(bucketName, bucketName), snapshotName), exception.getMessage()); } private String createSnapshot(String snapshotName) @@ -736,9 +751,10 @@ private String createSnapshot(String snapshotName) writeClient.createSnapshot(VOLUME_NAME, bucketName, snapshotName); // wait till the snapshot directory exists + OzoneSnapshot snapshot = objectStore.getSnapshotInfo(VOLUME_NAME, bucketName, snapshotName); SnapshotInfo snapshotInfo = ozoneManager.getMetadataManager() .getSnapshotInfoTable() - .get(SnapshotInfo.getTableKey(VOLUME_NAME, bucketName, snapshotName)); + .get(SnapshotInfo.getTableKey(snapshot.getVolumeName(), snapshot.getBucketName(), snapshotName)); String snapshotDirName = getSnapshotPath(conf, snapshotInfo) + OM_KEY_PREFIX + "CURRENT"; GenericTestUtils.waitFor(() -> new File(snapshotDirName).exists(), diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotFileSystemFso.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotFileSystemFso.java index 47bdd8f3bd5..17adf6cce72 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotFileSystemFso.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotFileSystemFso.java @@ -25,7 +25,7 @@ */ @Timeout(120) public class TestOmSnapshotFileSystemFso extends TestOmSnapshotFileSystem { - TestOmSnapshotFileSystemFso() { - super(BUCKET_NAME_FSO); + TestOmSnapshotFileSystemFso() throws Exception { + super(BUCKET_NAME_FSO, false); } } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotFileSystemFsoWithLinkedBuckets.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotFileSystemFsoWithLinkedBuckets.java new file mode 100644 index 00000000000..e9d1017cddb --- /dev/null +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotFileSystemFsoWithLinkedBuckets.java @@ -0,0 +1,31 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.om.snapshot; + +import org.junit.jupiter.api.Timeout; + +/** + * OmSnapshot file system tests for FSO. + */ +@Timeout(120) +public class TestOmSnapshotFileSystemFsoWithLinkedBuckets extends TestOmSnapshotFileSystem { + TestOmSnapshotFileSystemFsoWithLinkedBuckets() throws Exception { + super(BUCKET_NAME_FSO, true); + } +} diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotFileSystemLegacy.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotFileSystemLegacy.java index b8d81c31cf5..effaaa5d4e7 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotFileSystemLegacy.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotFileSystemLegacy.java @@ -25,7 +25,7 @@ */ @Timeout(120) public class TestOmSnapshotFileSystemLegacy extends TestOmSnapshotFileSystem { - TestOmSnapshotFileSystemLegacy() { - super(BUCKET_NAME_LEGACY); + TestOmSnapshotFileSystemLegacy() throws Exception { + super(BUCKET_NAME_LEGACY, false); } } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotFileSystemLegacyWithLinkedBuckets.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotFileSystemLegacyWithLinkedBuckets.java new file mode 100644 index 00000000000..61f92cc7c0b --- /dev/null +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotFileSystemLegacyWithLinkedBuckets.java @@ -0,0 +1,31 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.om.snapshot; + +import org.junit.jupiter.api.Timeout; + +/** + * OmSnapshot file system tests for Legacy. + */ +@Timeout(120) +public class TestOmSnapshotFileSystemLegacyWithLinkedBuckets extends TestOmSnapshotFileSystem { + TestOmSnapshotFileSystemLegacyWithLinkedBuckets() throws Exception { + super(BUCKET_NAME_LEGACY, true); + } +} diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotFsoWithNativeLib.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotFsoWithNativeLib.java index 06fbebb2efa..c303b24ad24 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotFsoWithNativeLib.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotFsoWithNativeLib.java @@ -31,6 +31,6 @@ @Timeout(300) class TestOmSnapshotFsoWithNativeLib extends TestOmSnapshot { TestOmSnapshotFsoWithNativeLib() throws Exception { - super(FILE_SYSTEM_OPTIMIZED, false, false, false); + super(FILE_SYSTEM_OPTIMIZED, false, false, false, false); } } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotFsoWithNativeLibWithLinkedBuckets.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotFsoWithNativeLibWithLinkedBuckets.java new file mode 100644 index 00000000000..c499a705649 --- /dev/null +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotFsoWithNativeLibWithLinkedBuckets.java @@ -0,0 +1,36 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.om.snapshot; + +import org.apache.ozone.test.tag.Native; +import org.junit.jupiter.api.Timeout; + +import static org.apache.hadoop.hdds.utils.NativeConstants.ROCKS_TOOLS_NATIVE_LIBRARY_NAME; +import static org.apache.hadoop.ozone.om.helpers.BucketLayout.FILE_SYSTEM_OPTIMIZED; + +/** + * Test OmSnapshot for FSO bucket type when native lib is enabled. + */ +@Native(ROCKS_TOOLS_NATIVE_LIBRARY_NAME) +@Timeout(300) +class TestOmSnapshotFsoWithNativeLibWithLinkedBuckets extends TestOmSnapshot { + TestOmSnapshotFsoWithNativeLibWithLinkedBuckets() throws Exception { + super(FILE_SYSTEM_OPTIMIZED, false, false, false, true); + } +} diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotFsoWithoutNativeLib.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotFsoWithoutNativeLib.java index c1782b73d19..26262916cb8 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotFsoWithoutNativeLib.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotFsoWithoutNativeLib.java @@ -29,6 +29,6 @@ public class TestOmSnapshotFsoWithoutNativeLib extends TestOmSnapshot { public TestOmSnapshotFsoWithoutNativeLib() throws Exception { - super(FILE_SYSTEM_OPTIMIZED, false, false, true); + super(FILE_SYSTEM_OPTIMIZED, false, false, true, false); } } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotFsoWithoutNativeLibWithLinkedBuckets.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotFsoWithoutNativeLibWithLinkedBuckets.java new file mode 100644 index 00000000000..4387f77b3fc --- /dev/null +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotFsoWithoutNativeLibWithLinkedBuckets.java @@ -0,0 +1,34 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.om.snapshot; + +import org.junit.jupiter.api.Timeout; + +import static org.apache.hadoop.ozone.om.helpers.BucketLayout.FILE_SYSTEM_OPTIMIZED; + +/** + * Test OmSnapshot for FSO bucket type when native lib is disabled. + */ +@Timeout(300) +public class TestOmSnapshotFsoWithoutNativeLibWithLinkedBuckets extends TestOmSnapshot { + + public TestOmSnapshotFsoWithoutNativeLibWithLinkedBuckets() throws Exception { + super(FILE_SYSTEM_OPTIMIZED, false, false, true, true); + } +} diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotObjectStore.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotObjectStore.java index 13c8cb5fca3..bad51103a55 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotObjectStore.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotObjectStore.java @@ -29,6 +29,6 @@ public class TestOmSnapshotObjectStore extends TestOmSnapshot { public TestOmSnapshotObjectStore() throws Exception { - super(OBJECT_STORE, false, false, false); + super(OBJECT_STORE, false, false, false, true); } } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotObjectStoreWithLinkedBuckets.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotObjectStoreWithLinkedBuckets.java new file mode 100644 index 00000000000..64765e71718 --- /dev/null +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotObjectStoreWithLinkedBuckets.java @@ -0,0 +1,34 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.om.snapshot; + +import org.junit.jupiter.api.Timeout; + +import static org.apache.hadoop.ozone.om.helpers.BucketLayout.OBJECT_STORE; + +/** + * Test OmSnapshot for Object Store bucket type. + */ +@Timeout(300) +public class TestOmSnapshotObjectStoreWithLinkedBuckets extends TestOmSnapshot { + + public TestOmSnapshotObjectStoreWithLinkedBuckets() throws Exception { + super(OBJECT_STORE, false, false, false, true); + } +} diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotLegacy.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotWithBucketLinkingLegacy.java similarity index 84% rename from hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotLegacy.java rename to hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotWithBucketLinkingLegacy.java index bf4a2fee0de..f1ced6c4a80 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotLegacy.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotWithBucketLinkingLegacy.java @@ -26,9 +26,9 @@ * Test OmSnapshot for Legacy bucket type. */ @Timeout(300) -public class TestOmSnapshotLegacy extends TestOmSnapshot { +public class TestOmSnapshotWithBucketLinkingLegacy extends TestOmSnapshot { - public TestOmSnapshotLegacy() throws Exception { - super(LEGACY, false, false, false); + public TestOmSnapshotWithBucketLinkingLegacy() throws Exception { + super(LEGACY, false, false, false, true); } } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotWithoutBucketLinkingLegacy.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotWithoutBucketLinkingLegacy.java new file mode 100644 index 00000000000..95549471e61 --- /dev/null +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotWithoutBucketLinkingLegacy.java @@ -0,0 +1,34 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.om.snapshot; + +import org.junit.jupiter.api.Timeout; + +import static org.apache.hadoop.ozone.om.helpers.BucketLayout.LEGACY; + +/** + * Test OmSnapshot for Legacy bucket type. + */ +@Timeout(300) +public class TestOmSnapshotWithoutBucketLinkingLegacy extends TestOmSnapshot { + + public TestOmSnapshotWithoutBucketLinkingLegacy() throws Exception { + super(LEGACY, false, false, false, false); + } +} diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotBackgroundServices.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotBackgroundServices.java index cc5bca4d310..cb2d5cba3e2 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotBackgroundServices.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotBackgroundServices.java @@ -51,6 +51,7 @@ import org.apache.ozone.rocksdiff.CompactionNode; import org.apache.ozone.test.GenericTestUtils; import org.apache.ozone.test.LambdaTestUtils; +import org.apache.ozone.test.tag.Flaky; import org.apache.ratis.server.protocol.TermIndex; import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.BeforeEach; @@ -377,6 +378,7 @@ private OzoneManager getLeaderOM() { @Test @DisplayName("testCompactionLogBackgroundService") + @Flaky("HDDS-11672") public void testCompactionLogBackgroundService() throws IOException, InterruptedException, TimeoutException { OzoneManager leaderOM = getLeaderOM(); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotDeletingServiceIntegrationTest.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotDeletingServiceIntegrationTest.java index 254de072e05..c3a58a1a211 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotDeletingServiceIntegrationTest.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotDeletingServiceIntegrationTest.java @@ -480,22 +480,22 @@ public void testSnapshotWithFSO() throws Exception { private DirectoryDeletingService getMockedDirectoryDeletingService(AtomicBoolean dirDeletionWaitStarted, AtomicBoolean dirDeletionStarted) - throws InterruptedException, TimeoutException { + throws InterruptedException, TimeoutException, IOException { OzoneManager ozoneManager = Mockito.spy(om); om.getKeyManager().getDirDeletingService().shutdown(); GenericTestUtils.waitFor(() -> om.getKeyManager().getDirDeletingService().getThreadCount() == 0, 1000, 100000); DirectoryDeletingService directoryDeletingService = Mockito.spy(new DirectoryDeletingService(10000, - TimeUnit.MILLISECONDS, 100000, ozoneManager, cluster.getConf())); + TimeUnit.MILLISECONDS, 100000, ozoneManager, cluster.getConf(), 1)); directoryDeletingService.shutdown(); GenericTestUtils.waitFor(() -> directoryDeletingService.getThreadCount() == 0, 1000, 100000); - when(ozoneManager.getMetadataManager()).thenAnswer(i -> { + doAnswer(i -> { // Wait for SDS to reach DDS wait block before processing any deleted directories. GenericTestUtils.waitFor(dirDeletionWaitStarted::get, 1000, 100000); dirDeletionStarted.set(true); return i.callRealMethod(); - }); + }).when(directoryDeletingService).getPendingDeletedDirInfo(); return directoryDeletingService; } @@ -601,6 +601,7 @@ private SnapshotDeletingService getMockedSnapshotDeletingService(KeyDeletingServ @Test @Order(4) + @Flaky("HDDS-11847") public void testParallelExcecutionOfKeyDeletionAndSnapshotDeletion() throws Exception { AtomicBoolean keyDeletionWaitStarted = new AtomicBoolean(false); AtomicBoolean dirDeletionWaitStarted = new AtomicBoolean(false); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/parser/TestOzoneHARatisLogParser.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/parser/TestOzoneHARatisLogParser.java index dff4cd046c9..3172838ab50 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/parser/TestOzoneHARatisLogParser.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/parser/TestOzoneHARatisLogParser.java @@ -29,8 +29,8 @@ import org.apache.hadoop.ozone.client.OzoneClientFactory; import org.apache.hadoop.ozone.om.helpers.OMRatisHelper; import org.apache.hadoop.ozone.MiniOzoneCluster; -import org.apache.hadoop.ozone.segmentparser.OMRatisLogParser; -import org.apache.hadoop.ozone.segmentparser.SCMRatisLogParser; +import org.apache.hadoop.ozone.debug.segmentparser.OMRatisLogParser; +import org.apache.hadoop.ozone.debug.segmentparser.SCMRatisLogParser; import org.apache.ozone.test.GenericTestUtils; import org.apache.ozone.test.tag.Flaky; import org.junit.jupiter.api.AfterEach; diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconScmSnapshot.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconScmSnapshot.java index 66be107ebf6..275993d1362 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconScmSnapshot.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconScmSnapshot.java @@ -30,6 +30,7 @@ import org.apache.hadoop.ozone.recon.scm.ReconNodeManager; import org.apache.hadoop.ozone.recon.scm.ReconStorageContainerManagerFacade; import org.apache.ozone.test.GenericTestUtils; +import org.apache.ozone.test.tag.Flaky; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.BeforeEach; @@ -126,6 +127,7 @@ public static void testSnapshot(MiniOzoneCluster cluster) throws Exception { } @Test + @Flaky("HDDS-11645") public void testExplicitRemovalOfNode() throws Exception { ReconNodeManager nodeManager = (ReconNodeManager) ozoneCluster.getReconServer() .getReconStorageContainerManager().getScmNodeManager(); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/repair/om/TestFSORepairTool.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/repair/om/TestFSORepairTool.java new file mode 100644 index 00000000000..6c40e69432f --- /dev/null +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/repair/om/TestFSORepairTool.java @@ -0,0 +1,541 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.ozone.repair.om; + +import org.apache.commons.io.IOUtils; +import org.apache.hadoop.fs.CommonConfigurationKeysPublic; +import org.apache.hadoop.fs.FSDataOutputStream; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.utils.db.Table; +import org.apache.hadoop.hdds.utils.db.TableIterator; +import org.apache.hadoop.ozone.MiniOzoneCluster; +import org.apache.hadoop.ozone.client.BucketArgs; +import org.apache.hadoop.ozone.client.ObjectStore; +import org.apache.hadoop.ozone.client.OzoneClient; +import org.apache.hadoop.ozone.client.OzoneClientFactory; +import org.apache.hadoop.ozone.client.io.OzoneOutputStream; +import org.apache.hadoop.ozone.om.OMStorage; +import org.apache.hadoop.ozone.om.helpers.BucketLayout; +import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo; +import org.apache.hadoop.ozone.repair.OzoneRepair; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.MethodOrderer; +import org.junit.jupiter.api.Order; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.TestMethodOrder; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import picocli.CommandLine; + +import java.io.ByteArrayOutputStream; +import java.io.File; +import java.io.IOException; +import java.io.PrintStream; +import java.nio.charset.StandardCharsets; + +import static java.nio.charset.StandardCharsets.UTF_8; +import static org.apache.hadoop.ozone.OzoneConsts.OM_DB_NAME; +import static org.apache.hadoop.ozone.OzoneConsts.OZONE_OFS_URI_SCHEME; +import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_ADDRESS_KEY; +import static org.junit.jupiter.api.Assertions.assertEquals; + +/** + * FSORepairTool test cases. + */ +@TestMethodOrder(MethodOrderer.OrderAnnotation.class) +public class TestFSORepairTool { + public static final Logger LOG = LoggerFactory.getLogger(TestFSORepairTool.class); + private static final ByteArrayOutputStream OUT = new ByteArrayOutputStream(); + private static final ByteArrayOutputStream ERR = new ByteArrayOutputStream(); + private static final PrintStream OLD_OUT = System.out; + private static final PrintStream OLD_ERR = System.err; + private static final String DEFAULT_ENCODING = UTF_8.name(); + private static MiniOzoneCluster cluster; + private static FileSystem fs; + private static OzoneClient client; + private static CommandLine cmd; + private static String dbPath; + private static FSORepairTool.Report vol1Report; + private static FSORepairTool.Report vol2Report; + private static FSORepairTool.Report fullReport; + private static FSORepairTool.Report emptyReport; + + @BeforeAll + public static void setup() throws Exception { + OzoneConfiguration conf = new OzoneConfiguration(); + cluster = MiniOzoneCluster.newBuilder(conf).build(); + cluster.waitForClusterToBeReady(); + + // Init ofs. + final String rootPath = String.format("%s://%s/", OZONE_OFS_URI_SCHEME, conf.get(OZONE_OM_ADDRESS_KEY)); + conf.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY, rootPath); + fs = FileSystem.get(conf); + + cmd = new OzoneRepair().getCmd(); + dbPath = new File(OMStorage.getOmDbDir(conf) + "/" + OM_DB_NAME).getPath(); + + // Build multiple connected and disconnected trees + FSORepairTool.Report report1 = buildConnectedTree("vol1", "bucket1", 10); + FSORepairTool.Report report2 = buildDisconnectedTree("vol2", "bucket1", 10); + FSORepairTool.Report report3 = buildConnectedTree("vol2", "bucket2", 10); + FSORepairTool.Report report4 = buildEmptyTree(); + + vol1Report = new FSORepairTool.Report(report1); + vol2Report = new FSORepairTool.Report(report2, report3); + fullReport = new FSORepairTool.Report(report1, report2, report3, report4); + emptyReport = new FSORepairTool.Report(report4); + + client = OzoneClientFactory.getRpcClient(conf); + ObjectStore store = client.getObjectStore(); + + // Create legacy and OBS buckets. + store.getVolume("vol1").createBucket("obs-bucket", + BucketArgs.newBuilder().setBucketLayout(BucketLayout.OBJECT_STORE) + .build()); + store.getVolume("vol1").createBucket("legacy-bucket", + BucketArgs.newBuilder().setBucketLayout(BucketLayout.LEGACY) + .build()); + + // Put a key in the legacy and OBS buckets. + OzoneOutputStream obsStream = store.getVolume("vol1") + .getBucket("obs-bucket") + .createKey("prefix/test-key", 3); + obsStream.write(new byte[]{1, 1, 1}); + obsStream.close(); + + OzoneOutputStream legacyStream = store.getVolume("vol1") + .getBucket("legacy-bucket") + .createKey("prefix/test-key", 3); + legacyStream.write(new byte[]{1, 1, 1}); + legacyStream.close(); + + // Stop the OM before running the tool + cluster.getOzoneManager().stop(); + } + + @BeforeEach + public void init() throws Exception { + System.setOut(new PrintStream(OUT, false, DEFAULT_ENCODING)); + System.setErr(new PrintStream(ERR, false, DEFAULT_ENCODING)); + } + + @AfterEach + public void clean() throws Exception { + // reset stream after each unit test + OUT.reset(); + ERR.reset(); + + // restore system streams + System.setOut(OLD_OUT); + System.setErr(OLD_ERR); + } + + @AfterAll + public static void reset() throws IOException { + if (cluster != null) { + cluster.shutdown(); + } + if (client != null) { + client.close(); + } + IOUtils.closeQuietly(fs); + } + + /** + * Test to check a connected tree with one bucket. + * The output remains the same in debug and repair mode as the tree is connected. + * @throws Exception + */ + @Order(1) + @Test + public void testConnectedTreeOneBucket() throws Exception { + String expectedOutput = serializeReport(vol1Report); + + // Test the connected tree in debug mode. + String[] args = new String[] {"om", "fso-tree", "--db", dbPath, "-v", "/vol1", "-b", "bucket1"}; + int exitCode = cmd.execute(args); + assertEquals(0, exitCode); + + String cliOutput = OUT.toString(DEFAULT_ENCODING); + String reportOutput = extractRelevantSection(cliOutput); + Assertions.assertEquals(expectedOutput, reportOutput); + + OUT.reset(); + ERR.reset(); + + // Running again in repair mode should give same results since the tree is connected. + String[] args1 = new String[] {"om", "fso-tree", "--db", dbPath, "--repair", "-v", "/vol1", "-b", "bucket1"}; + int exitCode1 = cmd.execute(args1); + assertEquals(0, exitCode1); + + String cliOutput1 = OUT.toString(DEFAULT_ENCODING); + String reportOutput1 = extractRelevantSection(cliOutput1); + Assertions.assertEquals(expectedOutput, reportOutput1); + } + + /** + * Test to verify the file size of the tree. + * @throws Exception + */ + @Order(2) + @Test + public void testReportedDataSize() throws Exception { + String expectedOutput = serializeReport(vol2Report); + + String[] args = new String[] {"om", "fso-tree", "--db", dbPath, "-v", "/vol2"}; + int exitCode = cmd.execute(args); + assertEquals(0, exitCode); + + String cliOutput = OUT.toString(DEFAULT_ENCODING); + String reportOutput = extractRelevantSection(cliOutput); + + Assertions.assertEquals(expectedOutput, reportOutput); + } + + /** + * Test to verify how the tool processes the volume and bucket filters. + * - Volume filter only. + * - Both volume and bucket filters. + * - Non-existent bucket. + * - Non-existent volume. + * - Using a bucket filter without specifying a volume. + */ + @Order(3) + @Test + public void testVolumeAndBucketFilter() throws Exception { + // When volume filter is passed + String[] args1 = new String[]{"om", "fso-tree", "--db", dbPath, "--volume", "/vol1"}; + int exitCode1 = cmd.execute(args1); + assertEquals(0, exitCode1); + + String cliOutput1 = OUT.toString(DEFAULT_ENCODING); + String reportOutput1 = extractRelevantSection(cliOutput1); + String expectedOutput1 = serializeReport(vol1Report); + Assertions.assertEquals(expectedOutput1, reportOutput1); + + OUT.reset(); + ERR.reset(); + + // When both volume and bucket filters are passed + String[] args2 = new String[]{"om", "fso-tree", "--db", dbPath, "--volume", "/vol1", + "--bucket", "bucket1"}; + int exitCode2 = cmd.execute(args2); + assertEquals(0, exitCode2); + + String cliOutput2 = OUT.toString(DEFAULT_ENCODING); + String reportOutput2 = extractRelevantSection(cliOutput2); + String expectedOutput2 = serializeReport(vol1Report); + Assertions.assertEquals(expectedOutput2, reportOutput2); + + OUT.reset(); + ERR.reset(); + + // When a non-existent bucket filter is passed + String[] args3 = new String[]{"om", "fso-tree", "--db", dbPath, "--volume", "/vol1", + "--bucket", "bucket3"}; + int exitCode3 = cmd.execute(args3); + assertEquals(0, exitCode3); + String cliOutput3 = OUT.toString(DEFAULT_ENCODING); + Assertions.assertTrue(cliOutput3.contains("Bucket 'bucket3' does not exist in volume '/vol1'.")); + + OUT.reset(); + ERR.reset(); + + // When a non-existent volume filter is passed + String[] args4 = new String[]{"om", "fso-tree", "--db", dbPath, "--volume", "/vol5"}; + int exitCode4 = cmd.execute(args4); + assertEquals(0, exitCode4); + String cliOutput4 = OUT.toString(DEFAULT_ENCODING); + Assertions.assertTrue(cliOutput4.contains("Volume '/vol5' does not exist.")); + + OUT.reset(); + ERR.reset(); + + // When bucket filter is passed without the volume filter. + String[] args5 = new String[]{"om", "fso-tree", "--db", dbPath, "--bucket", "bucket1"}; + int exitCode5 = cmd.execute(args5); + assertEquals(0, exitCode5); + String cliOutput5 = OUT.toString(DEFAULT_ENCODING); + Assertions.assertTrue(cliOutput5.contains("--bucket flag cannot be used without specifying --volume.")); + } + + /** + * Test to verify that non-fso buckets, such as legacy and obs, are skipped during the process. + * @throws Exception + */ + @Order(4) + @Test + public void testNonFSOBucketsSkipped() throws Exception { + String[] args = new String[] {"om", "fso-tree", "--db", dbPath}; + int exitCode = cmd.execute(args); + assertEquals(0, exitCode); + + String cliOutput = OUT.toString(DEFAULT_ENCODING); + Assertions.assertTrue(cliOutput.contains("Skipping non-FSO bucket /vol1/obs-bucket")); + Assertions.assertTrue(cliOutput.contains("Skipping non-FSO bucket /vol1/legacy-bucket")); + } + + /** + * If no file is present inside a vol/bucket, the report statistics should be zero. + * @throws Exception + */ + @Order(5) + @Test + public void testEmptyFileTrees() throws Exception { + String expectedOutput = serializeReport(emptyReport); + + // Run on an empty volume and bucket + String[] args = new String[] {"om", "fso-tree", "--db", dbPath, "-v", "/vol-empty", "-b", "bucket-empty"}; + int exitCode = cmd.execute(args); + assertEquals(0, exitCode); + + String cliOutput = OUT.toString(DEFAULT_ENCODING); + String reportOutput = extractRelevantSection(cliOutput); + Assertions.assertEquals(expectedOutput, reportOutput); + } + + /** + * Test in repair mode. This test ensures that: + * - The initial repair correctly resolves unreferenced objects. + * - Subsequent repair runs do not find any unreferenced objects to process. + * @throws Exception + */ + @Order(6) + @Test + public void testMultipleBucketsAndVolumes() throws Exception { + String expectedOutput = serializeReport(fullReport); + + String[] args = new String[] {"om", "fso-tree", "--db", dbPath, "--repair"}; + int exitCode = cmd.execute(args); + assertEquals(0, exitCode); + + String cliOutput = OUT.toString(DEFAULT_ENCODING); + String reportOutput = extractRelevantSection(cliOutput); + Assertions.assertEquals(expectedOutput, reportOutput); + Assertions.assertTrue(cliOutput.contains("Unreferenced:\n\tDirectories: 1\n\tFiles: 3\n\tBytes: 30")); + + String[] args1 = new String[] {"om", "fso-tree", "--db", dbPath, "--repair"}; + int exitCode1 = cmd.execute(args1); + assertEquals(0, exitCode1); + String cliOutput1 = OUT.toString(DEFAULT_ENCODING); + Assertions.assertTrue(cliOutput1.contains("Unreferenced:\n\tDirectories: 0\n\tFiles: 0\n\tBytes: 0")); + } + + /** + * Validate cluster state after OM restart by checking the tables. + * @throws Exception + */ + @Order(7) + @Test + public void validateClusterAfterRestart() throws Exception { + cluster.getOzoneManager().restart(); + + // 4 volumes (/s3v, /vol1, /vol2, /vol-empty) + assertEquals(4, countTableEntries(cluster.getOzoneManager().getMetadataManager().getVolumeTable())); + // 6 buckets (vol1/bucket1, vol2/bucket1, vol2/bucket2, vol-empty/bucket-empty, vol/legacy-bucket, vol1/obs-bucket) + assertEquals(6, countTableEntries(cluster.getOzoneManager().getMetadataManager().getBucketTable())); + // 1 directory is unreferenced and moved to the deletedDirTable during repair mode. + assertEquals(1, countTableEntries(cluster.getOzoneManager().getMetadataManager().getDeletedDirTable())); + // 3 files are unreferenced and moved to the deletedTable during repair mode. + assertEquals(3, countTableEntries(cluster.getOzoneManager().getMetadataManager().getDeletedTable())); + } + + private int countTableEntries(Table table) throws Exception { + int count = 0; + try (TableIterator> iterator = table.iterator()) { + while (iterator.hasNext()) { + iterator.next(); + count++; + } + } + return count; + } + + private String extractRelevantSection(String cliOutput) { + int startIndex = cliOutput.indexOf("Reachable:"); + if (startIndex == -1) { + throw new AssertionError("Output does not contain 'Reachable' section."); + } + return cliOutput.substring(startIndex).trim(); + } + + private String serializeReport(FSORepairTool.Report report) { + return String.format( + "Reachable:%n\tDirectories: %d%n\tFiles: %d%n\tBytes: %d%n" + + "Unreachable:%n\tDirectories: %d%n\tFiles: %d%n\tBytes: %d%n" + + "Unreferenced:%n\tDirectories: %d%n\tFiles: %d%n\tBytes: %d", + report.getReachable().getDirs(), + report.getReachable().getFiles(), + report.getReachable().getBytes(), + report.getUnreachable().getDirs(), + report.getUnreachable().getFiles(), + report.getUnreachable().getBytes(), + report.getUnreferenced().getDirs(), + report.getUnreferenced().getFiles(), + report.getUnreferenced().getBytes() + ); + } + + /** + * Creates a tree with 3 reachable directories and 4 reachable files. + */ + private static FSORepairTool.Report buildConnectedTree(String volume, String bucket, int fileSize) throws Exception { + Path bucketPath = new Path("/" + volume + "/" + bucket); + Path dir1 = new Path(bucketPath, "dir1"); + Path file1 = new Path(dir1, "file1"); + Path file2 = new Path(dir1, "file2"); + + Path dir2 = new Path(bucketPath, "dir1/dir2"); + Path file3 = new Path(dir2, "file3"); + + Path dir3 = new Path(bucketPath, "dir3"); + Path file4 = new Path(bucketPath, "file4"); + + fs.mkdirs(dir1); + fs.mkdirs(dir2); + fs.mkdirs(dir3); + + // Content to put in every file. + String data = new String(new char[fileSize]); + + FSDataOutputStream stream = fs.create(file1); + stream.write(data.getBytes(StandardCharsets.UTF_8)); + stream.close(); + stream = fs.create(file2); + stream.write(data.getBytes(StandardCharsets.UTF_8)); + stream.close(); + stream = fs.create(file3); + stream.write(data.getBytes(StandardCharsets.UTF_8)); + stream.close(); + stream = fs.create(file4); + stream.write(data.getBytes(StandardCharsets.UTF_8)); + stream.close(); + + assertConnectedTreeReadable(volume, bucket); + + FSORepairTool.ReportStatistics reachableCount = + new FSORepairTool.ReportStatistics(3, 4, fileSize * 4L); + return new FSORepairTool.Report.Builder() + .setReachable(reachableCount) + .build(); + } + + private static FSORepairTool.Report buildEmptyTree() throws IOException { + fs.mkdirs(new Path("/vol-empty/bucket-empty")); + FSORepairTool.ReportStatistics reachableCount = + new FSORepairTool.ReportStatistics(0, 0, 0); + FSORepairTool.ReportStatistics unreachableCount = + new FSORepairTool.ReportStatistics(0, 0, 0); + FSORepairTool.ReportStatistics unreferencedCount = + new FSORepairTool.ReportStatistics(0, 0, 0); + return new FSORepairTool.Report.Builder() + .setReachable(reachableCount) + .setUnreachable(unreachableCount) + .setUnreferenced(unreferencedCount) + .build(); + } + + private static void assertConnectedTreeReadable(String volume, String bucket) throws IOException { + Path bucketPath = new Path("/" + volume + "/" + bucket); + Path dir1 = new Path(bucketPath, "dir1"); + Path file1 = new Path(dir1, "file1"); + Path file2 = new Path(dir1, "file2"); + + Path dir2 = new Path(bucketPath, "dir1/dir2"); + Path file3 = new Path(dir2, "file3"); + + Path dir3 = new Path(bucketPath, "dir3"); + Path file4 = new Path(bucketPath, "file4"); + + Assertions.assertTrue(fs.exists(dir1)); + Assertions.assertTrue(fs.exists(dir2)); + Assertions.assertTrue(fs.exists(dir3)); + Assertions.assertTrue(fs.exists(file1)); + Assertions.assertTrue(fs.exists(file2)); + Assertions.assertTrue(fs.exists(file3)); + Assertions.assertTrue(fs.exists(file4)); + } + + /** + * Creates a tree with 1 reachable directory, 1 reachable file, 1 + * unreachable directory, and 3 unreachable files. + */ + private static FSORepairTool.Report buildDisconnectedTree(String volume, String bucket, int fileSize) + throws Exception { + buildConnectedTree(volume, bucket, fileSize); + + // Manually remove dir1. This should disconnect 3 of the files and 1 of + // the directories. + disconnectDirectory("dir1"); + + assertDisconnectedTreePartiallyReadable(volume, bucket); + + // dir1 does not count towards the unreachable directories the tool + // will see. It was deleted completely so the tool will never see it. + FSORepairTool.ReportStatistics reachableCount = + new FSORepairTool.ReportStatistics(1, 1, fileSize); + FSORepairTool.ReportStatistics unreferencedCount = + new FSORepairTool.ReportStatistics(1, 3, fileSize * 3L); + return new FSORepairTool.Report.Builder() + .setReachable(reachableCount) + .setUnreferenced(unreferencedCount) + .build(); + } + + private static void disconnectDirectory(String dirName) throws Exception { + Table dirTable = cluster.getOzoneManager().getMetadataManager().getDirectoryTable(); + try (TableIterator> iterator = dirTable.iterator()) { + while (iterator.hasNext()) { + Table.KeyValue entry = iterator.next(); + String key = entry.getKey(); + if (key.contains(dirName)) { + dirTable.delete(key); + break; + } + } + } + } + + private static void assertDisconnectedTreePartiallyReadable(String volume, String bucket) throws Exception { + Path bucketPath = new Path("/" + volume + "/" + bucket); + Path dir1 = new Path(bucketPath, "dir1"); + Path file1 = new Path(dir1, "file1"); + Path file2 = new Path(dir1, "file2"); + + Path dir2 = new Path(bucketPath, "dir1/dir2"); + Path file3 = new Path(dir2, "file3"); + + Path dir3 = new Path(bucketPath, "dir3"); + Path file4 = new Path(bucketPath, "file4"); + + Assertions.assertFalse(fs.exists(dir1)); + Assertions.assertFalse(fs.exists(dir2)); + Assertions.assertTrue(fs.exists(dir3)); + Assertions.assertFalse(fs.exists(file1)); + Assertions.assertFalse(fs.exists(file2)); + Assertions.assertFalse(fs.exists(file3)); + Assertions.assertTrue(fs.exists(file4)); + } +} diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/s3/awssdk/v1/AbstractS3SDKV1Tests.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/s3/awssdk/v1/AbstractS3SDKV1Tests.java new file mode 100644 index 00000000000..ab56af670b3 --- /dev/null +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/s3/awssdk/v1/AbstractS3SDKV1Tests.java @@ -0,0 +1,902 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + *

    + * http://www.apache.org/licenses/LICENSE-2.0 + *

    + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package org.apache.hadoop.ozone.s3.awssdk.v1; + +import com.amazonaws.AmazonServiceException; +import com.amazonaws.AmazonServiceException.ErrorType; +import com.amazonaws.services.s3.AmazonS3; +import com.amazonaws.services.s3.model.AbortMultipartUploadRequest; +import com.amazonaws.services.s3.model.AccessControlList; +import com.amazonaws.services.s3.model.Bucket; +import com.amazonaws.services.s3.model.CanonicalGrantee; +import com.amazonaws.services.s3.model.CompleteMultipartUploadRequest; +import com.amazonaws.services.s3.model.CompleteMultipartUploadResult; +import com.amazonaws.services.s3.model.CreateBucketRequest; +import com.amazonaws.services.s3.model.Grantee; +import com.amazonaws.services.s3.model.InitiateMultipartUploadRequest; +import com.amazonaws.services.s3.model.InitiateMultipartUploadResult; +import com.amazonaws.services.s3.model.ListMultipartUploadsRequest; +import com.amazonaws.services.s3.model.ListObjectsRequest; +import com.amazonaws.services.s3.model.ListObjectsV2Request; +import com.amazonaws.services.s3.model.ListObjectsV2Result; +import com.amazonaws.services.s3.model.ListPartsRequest; +import com.amazonaws.services.s3.model.MultipartUpload; +import com.amazonaws.services.s3.model.MultipartUploadListing; +import com.amazonaws.services.s3.model.ObjectListing; +import com.amazonaws.services.s3.model.ObjectMetadata; +import com.amazonaws.services.s3.model.ObjectTagging; +import com.amazonaws.services.s3.model.Owner; +import com.amazonaws.services.s3.model.PartETag; +import com.amazonaws.services.s3.model.PartListing; +import com.amazonaws.services.s3.model.PartSummary; +import com.amazonaws.services.s3.model.Permission; +import com.amazonaws.services.s3.model.PutObjectRequest; +import com.amazonaws.services.s3.model.PutObjectResult; +import com.amazonaws.services.s3.model.S3Object; +import com.amazonaws.services.s3.model.S3ObjectInputStream; +import com.amazonaws.services.s3.model.S3ObjectSummary; +import com.amazonaws.services.s3.model.SetObjectAclRequest; +import com.amazonaws.services.s3.model.Tag; +import com.amazonaws.services.s3.model.UploadPartRequest; +import com.amazonaws.services.s3.model.UploadPartResult; +import com.amazonaws.services.s3.transfer.TransferManager; +import com.amazonaws.services.s3.transfer.TransferManagerBuilder; +import com.amazonaws.services.s3.transfer.Upload; +import com.amazonaws.services.s3.transfer.model.UploadResult; +import org.apache.commons.lang3.RandomStringUtils; +import org.apache.hadoop.hdds.client.ReplicationConfig; +import org.apache.hadoop.hdds.client.ReplicationFactor; +import org.apache.hadoop.hdds.client.ReplicationType; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.ozone.MiniOzoneCluster; +import org.apache.hadoop.ozone.client.ObjectStore; +import org.apache.hadoop.ozone.client.OzoneBucket; +import org.apache.hadoop.ozone.client.OzoneClient; +import org.apache.hadoop.ozone.client.OzoneClientFactory; +import org.apache.hadoop.ozone.client.OzoneVolume; +import org.apache.hadoop.ozone.client.io.OzoneOutputStream; +import org.apache.hadoop.utils.InputSubstream; +import org.apache.ozone.test.OzoneTestBase; +import org.junit.jupiter.api.MethodOrderer; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.TestMethodOrder; +import org.junit.jupiter.api.io.TempDir; + +import javax.xml.bind.DatatypeConverter; +import java.io.ByteArrayInputStream; +import java.io.ByteArrayOutputStream; +import java.io.File; +import java.io.FileInputStream; +import java.io.IOException; +import java.io.InputStream; +import java.io.RandomAccessFile; +import java.nio.charset.StandardCharsets; +import java.nio.file.Files; +import java.nio.file.Path; +import java.security.MessageDigest; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Locale; +import java.util.Map; +import java.util.Random; +import java.util.stream.Collectors; + +import static org.apache.hadoop.ozone.OzoneConsts.MB; +import static org.assertj.core.api.Assertions.assertThat; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertNull; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.api.Assertions.assertTrue; + +/** + * This is an abstract class to test the AWS Java S3 SDK operations. + * This class should be extended for OM standalone and OM HA (Ratis) cluster setup. + * + * The test scenarios are adapted from + * - https://github.com/awsdocs/aws-doc-sdk-examples/tree/main/java/example_code/s3/ + * - https://github.com/ceph/s3-tests + * + * TODO: Currently we are using AWS SDK V1, need to also add tests for AWS SDK V2. + */ +@TestMethodOrder(MethodOrderer.MethodName.class) +public abstract class AbstractS3SDKV1Tests extends OzoneTestBase { + + /** + * There are still some unsupported S3 operations. + * Current unsupported S3 operations (non-exhaustive): + * - Cross Region Replication (CrossRegionReplication.java) + * - Versioned enabled buckets + * - DeleteObjectVersionEnabledBucket.java + * - DeleteMultipleObjectsVersionEnabledBucket.java + * - ListKeysVersioningEnabledBucket.java + * - Website configurations + * - WebsiteConfiguration.java + * - SetWebsiteConfiguration.java + * - GetWebsiteConfiguration.java + * - DeleteWebsiteConfiguration.java + * - S3 Event Notifications + * - EnableNotificationOnABucket.java + * - Object tags + * - GetObjectTags.java + * - GetObjectTags2.java + * - Bucket policy + * - SetBucketPolicy.java + * - GetBucketPolicy.java + * - DeleteBucketPolicy.java + * - Bucket lifecycle configuration + * - LifecycleConfiguration.java + * - Canned Bucket ACL + * - CreateBucketWithACL.java + * - Object ACL + * - SetAcl.java + * - ModifyACLExistingObject.java + * - GetAcl.java + * - S3 Encryption + * - S3Encrypt.java + * - S3EncryptV2.java + * - Client-side encryption + * - S3ClientSideEncryptionAsymmetricMasterKey.java + * - S3ClientSideEncryptionSymMasterKey.java + * - Server-side encryption + * - SpecifyServerSideEncryption.ajva + * - ServerSideEncryptionCopyObjectUsingHLWithSSEC.java + * - ServerSideEncryptionUsingClientSideEncryptionKey.java + * - Dual stack endpoints + * - DualStackEndpoints.java + * - Transfer acceleration + * - TransferAcceleration.java + * - Temp credentials + * - MakingRequestsWithFederatedTempCredentials.java + * - MakingRequestsWithIAMTempCredentials.java + * - Object archival + * - RestoreArchivedObject + * - KMS key + * - UploadObjectKMSKey.java + */ + + private static MiniOzoneCluster cluster = null; + private static AmazonS3 s3Client = null; + + /** + * Create a MiniOzoneCluster with S3G enabled for testing. + * @param conf Configurations to start the cluster + * @throws Exception exception thrown when waiting for the cluster to be ready. + */ + static void startCluster(OzoneConfiguration conf) throws Exception { + cluster = MiniOzoneCluster.newBuilder(conf) + .includeS3G(true) + .setNumDatanodes(5) + .build(); + cluster.waitForClusterToBeReady(); + s3Client = cluster.newS3Client(); + } + + /** + * Shutdown the MiniOzoneCluster. + */ + static void shutdownCluster() throws IOException { + if (cluster != null) { + cluster.shutdown(); + } + } + + public static void setCluster(MiniOzoneCluster cluster) { + AbstractS3SDKV1Tests.cluster = cluster; + } + + public static MiniOzoneCluster getCluster() { + return AbstractS3SDKV1Tests.cluster; + } + + @Test + public void testCreateBucket() { + final String bucketName = getBucketName(); + + Bucket b = s3Client.createBucket(bucketName); + + assertEquals(bucketName, b.getName()); + assertTrue(s3Client.doesBucketExist(bucketName)); + assertTrue(s3Client.doesBucketExistV2(bucketName)); + assertTrue(isBucketEmpty(b)); + } + + @Test + public void testBucketACLOperations() { + // TODO HDDS-11738: Uncomment assertions when bucket S3 ACL logic has been fixed + final String bucketName = getBucketName(); + + AccessControlList aclList = new AccessControlList(); + Owner owner = new Owner("owner", "owner"); + aclList.withOwner(owner); + Grantee grantee = new CanonicalGrantee("testGrantee"); + aclList.grantPermission(grantee, Permission.Read); + + + CreateBucketRequest createBucketRequest = new CreateBucketRequest(bucketName) + .withAccessControlList(aclList); + + s3Client.createBucket(createBucketRequest); + + //assertEquals(aclList, s3Client.getBucketAcl(bucketName)); + + aclList.grantPermission(grantee, Permission.Write); + s3Client.setBucketAcl(bucketName, aclList); + + //assertEquals(aclList, s3Client.getBucketAcl(bucketName)); + } + + @Test + public void testListBuckets() { + List bucketNames = new ArrayList<>(); + for (int i = 0; i <= 5; i++) { + String bucketName = getBucketName(String.valueOf(i)); + s3Client.createBucket(bucketName); + bucketNames.add(bucketName); + } + + List bucketList = s3Client.listBuckets(); + List listBucketNames = bucketList.stream() + .map(Bucket::getName).collect(Collectors.toList()); + + assertThat(listBucketNames).containsAll(bucketNames); + } + + @Test + public void testDeleteBucket() { + final String bucketName = getBucketName(); + + s3Client.createBucket(bucketName); + + s3Client.deleteBucket(bucketName); + + assertFalse(s3Client.doesBucketExist(bucketName)); + assertFalse(s3Client.doesBucketExistV2(bucketName)); + } + + @Test + public void testDeleteBucketNotExist() { + final String bucketName = getBucketName(); + + AmazonServiceException ase = assertThrows(AmazonServiceException.class, + () -> s3Client.deleteBucket(bucketName)); + + assertEquals(ErrorType.Client, ase.getErrorType()); + assertEquals(404, ase.getStatusCode()); + assertEquals("NoSuchBucket", ase.getErrorCode()); + } + + @Test + public void testDeleteBucketNonEmptyWithKeys() { + final String bucketName = getBucketName(); + s3Client.createBucket(bucketName); + + // Upload some objects to the bucket + for (int i = 1; i <= 10; i++) { + s3Client.putObject(bucketName, "key-" + i, RandomStringUtils.randomAlphanumeric(1024)); + } + + // Bucket deletion should fail if there are still keys in the bucket + AmazonServiceException ase = assertThrows(AmazonServiceException.class, + () -> s3Client.deleteBucket(bucketName) + ); + assertEquals(ErrorType.Client, ase.getErrorType()); + assertEquals(409, ase.getStatusCode()); + assertEquals("BucketNotEmpty", ase.getErrorCode()); + + // Delete all the keys + ObjectListing objectListing = s3Client.listObjects(bucketName); + while (true) { + for (S3ObjectSummary summary : objectListing.getObjectSummaries()) { + s3Client.deleteObject(bucketName, summary.getKey()); + } + + // more object_listing to retrieve? + if (objectListing.isTruncated()) { + objectListing = s3Client.listNextBatchOfObjects(objectListing); + } else { + break; + } + } + } + + @Test + public void testDeleteBucketNonEmptyWithIncompleteMultipartUpload(@TempDir Path tempDir) throws Exception { + final String bucketName = getBucketName(); + final String keyName = getKeyName(); + s3Client.createBucket(bucketName); + + File multipartUploadFile = Files.createFile(tempDir.resolve("multipartupload.txt")).toFile(); + + createFile(multipartUploadFile, (int) (5 * MB)); + + // Create an incomplete multipart upload by initiating multipart upload, + // uploading some parts, but not actually completing it. + String uploadId = initiateMultipartUpload(bucketName, keyName, null, null, null); + + uploadParts(bucketName, keyName, uploadId, multipartUploadFile, 1 * MB); + + // Bucket deletion should fail if there are still keys in the bucket + AmazonServiceException ase = assertThrows(AmazonServiceException.class, + () -> s3Client.deleteBucket(bucketName) + ); + assertEquals(ErrorType.Client, ase.getErrorType()); + assertEquals(409, ase.getStatusCode()); + assertEquals("BucketNotEmpty", ase.getErrorCode()); + + // After the multipart upload is aborted, the bucket deletion should succeed + abortMultipartUpload(bucketName, keyName, uploadId); + + s3Client.deleteBucket(bucketName); + + assertFalse(s3Client.doesBucketExistV2(bucketName)); + } + + @Test + public void testPutObject() { + final String bucketName = getBucketName(); + final String keyName = getKeyName(); + final String content = "bar"; + s3Client.createBucket(bucketName); + + InputStream is = new ByteArrayInputStream(content.getBytes(StandardCharsets.UTF_8)); + + PutObjectResult putObjectResult = s3Client.putObject(bucketName, keyName, is, new ObjectMetadata()); + assertEquals("37b51d194a7513e45b56f6524f2d51f2", putObjectResult.getETag()); + } + + @Test + public void testPutObjectEmpty() { + final String bucketName = getBucketName(); + final String keyName = getKeyName(); + final String content = ""; + s3Client.createBucket(bucketName); + + InputStream is = new ByteArrayInputStream(content.getBytes(StandardCharsets.UTF_8)); + + PutObjectResult putObjectResult = s3Client.putObject(bucketName, keyName, is, new ObjectMetadata()); + assertEquals("d41d8cd98f00b204e9800998ecf8427e", putObjectResult.getETag()); + } + + @Test + public void testPutObjectACL() throws Exception { + final String bucketName = getBucketName(); + final String keyName = getKeyName(); + final String content = "bar"; + final byte[] contentBytes = content.getBytes(StandardCharsets.UTF_8); + s3Client.createBucket(bucketName); + + InputStream is = new ByteArrayInputStream(content.getBytes(StandardCharsets.UTF_8)); + + PutObjectResult putObjectResult = s3Client.putObject(bucketName, keyName, is, new ObjectMetadata()); + String originalObjectETag = putObjectResult.getETag(); + assertTrue(s3Client.doesObjectExist(bucketName, keyName)); + + AccessControlList aclList = new AccessControlList(); + Owner owner = new Owner("owner", "owner"); + aclList.withOwner(owner); + Grantee grantee = new CanonicalGrantee("testGrantee"); + aclList.grantPermission(grantee, Permission.Read); + + SetObjectAclRequest setObjectAclRequest = new SetObjectAclRequest(bucketName, keyName, aclList); + + AmazonServiceException ase = assertThrows(AmazonServiceException.class, + () -> s3Client.setObjectAcl(setObjectAclRequest)); + assertEquals("NotImplemented", ase.getErrorCode()); + assertEquals(501, ase.getStatusCode()); + assertEquals(ErrorType.Service, ase.getErrorType()); + + // Ensure that the object content remains unchanged + ObjectMetadata updatedObjectMetadata = s3Client.getObjectMetadata(bucketName, keyName); + assertEquals(originalObjectETag, updatedObjectMetadata.getETag()); + S3Object updatedObject = s3Client.getObject(bucketName, keyName); + + try (S3ObjectInputStream s3is = updatedObject.getObjectContent(); + ByteArrayOutputStream bos = new ByteArrayOutputStream(contentBytes.length)) { + byte[] readBuf = new byte[1024]; + int readLen = 0; + while ((readLen = s3is.read(readBuf)) > 0) { + bos.write(readBuf, 0, readLen); + } + assertEquals(content, bos.toString("UTF-8")); + } + } + + @Test + public void testGetObject() throws Exception { + final String bucketName = getBucketName(); + final String keyName = getKeyName(); + final String content = "bar"; + final byte[] contentBytes = content.getBytes(StandardCharsets.UTF_8); + s3Client.createBucket(bucketName); + + InputStream is = new ByteArrayInputStream(contentBytes); + ObjectMetadata objectMetadata = new ObjectMetadata(); + Map userMetadata = new HashMap<>(); + userMetadata.put("key1", "value1"); + userMetadata.put("key2", "value2"); + objectMetadata.setUserMetadata(userMetadata); + + List tags = Arrays.asList(new Tag("tag1", "value1"), new Tag("tag2", "value2")); + ObjectTagging objectTagging = new ObjectTagging(tags); + + + PutObjectRequest putObjectRequest = new PutObjectRequest(bucketName, keyName, is, objectMetadata) + .withTagging(objectTagging); + + s3Client.putObject(putObjectRequest); + + S3Object s3Object = s3Client.getObject(bucketName, keyName); + assertEquals(tags.size(), s3Object.getTaggingCount()); + + try (S3ObjectInputStream s3is = s3Object.getObjectContent(); + ByteArrayOutputStream bos = new ByteArrayOutputStream(contentBytes.length)) { + byte[] readBuf = new byte[1024]; + int readLen = 0; + while ((readLen = s3is.read(readBuf)) > 0) { + bos.write(readBuf, 0, readLen); + } + assertEquals(content, bos.toString("UTF-8")); + } + } + + @Test + public void testGetObjectWithoutETag() throws Exception { + // Object uploaded using other protocols (e.g. ofs / ozone cli) will not + // have ETag. Ensure that ETag will not do ETag validation on GetObject if there + // is no ETag present. + final String bucketName = getBucketName(); + final String keyName = getKeyName(); + + s3Client.createBucket(bucketName); + + String value = "sample value"; + byte[] valueBytes = value.getBytes(StandardCharsets.UTF_8); + + OzoneConfiguration conf = cluster.getConf(); + try (OzoneClient ozoneClient = OzoneClientFactory.getRpcClient(conf)) { + ObjectStore store = ozoneClient.getObjectStore(); + + OzoneVolume volume = store.getS3Volume(); + OzoneBucket bucket = volume.getBucket(bucketName); + + try (OzoneOutputStream out = bucket.createKey(keyName, + valueBytes.length, + ReplicationConfig.fromTypeAndFactor(ReplicationType.RATIS, ReplicationFactor.ONE), + Collections.emptyMap())) { + out.write(valueBytes); + } + } + + S3Object s3Object = s3Client.getObject(bucketName, keyName); + assertNull(s3Object.getObjectMetadata().getETag()); + + try (S3ObjectInputStream s3is = s3Object.getObjectContent(); + ByteArrayOutputStream bos = new ByteArrayOutputStream(valueBytes.length)) { + byte[] readBuf = new byte[1024]; + int readLen = 0; + while ((readLen = s3is.read(readBuf)) > 0) { + bos.write(readBuf, 0, readLen); + } + assertEquals(value, bos.toString("UTF-8")); + } + } + + @Test + public void testListObjectsMany() { + final String bucketName = getBucketName(); + s3Client.createBucket(bucketName); + final List keyNames = Arrays.asList( + getKeyName("1"), + getKeyName("2"), + getKeyName("3") + ); + + for (String keyName: keyNames) { + s3Client.putObject(bucketName, keyName, RandomStringUtils.randomAlphanumeric(5)); + } + + ListObjectsRequest listObjectsRequest = new ListObjectsRequest() + .withBucketName(bucketName) + .withMaxKeys(2); + ObjectListing listObjectsResponse = s3Client.listObjects(listObjectsRequest); + assertThat(listObjectsResponse.getObjectSummaries()).hasSize(2); + assertEquals(bucketName, listObjectsResponse.getBucketName()); + assertEquals(listObjectsResponse.getObjectSummaries().stream() + .map(S3ObjectSummary::getKey).collect(Collectors.toList()), + keyNames.subList(0, 2)); + assertTrue(listObjectsResponse.isTruncated()); + + + listObjectsRequest = new ListObjectsRequest() + .withBucketName(bucketName) + .withMaxKeys(2) + .withMarker(listObjectsResponse.getNextMarker()); + listObjectsResponse = s3Client.listObjects(listObjectsRequest); + assertThat(listObjectsResponse.getObjectSummaries()).hasSize(1); + assertEquals(bucketName, listObjectsResponse.getBucketName()); + assertEquals(listObjectsResponse.getObjectSummaries().stream() + .map(S3ObjectSummary::getKey).collect(Collectors.toList()), + keyNames.subList(2, keyNames.size())); + assertFalse(listObjectsResponse.isTruncated()); + } + + @Test + public void testListObjectsManyV2() { + final String bucketName = getBucketName(); + s3Client.createBucket(bucketName); + final List keyNames = Arrays.asList( + getKeyName("1"), + getKeyName("2"), + getKeyName("3") + ); + + for (String keyName: keyNames) { + s3Client.putObject(bucketName, keyName, RandomStringUtils.randomAlphanumeric(5)); + } + + ListObjectsV2Request listObjectsRequest = new ListObjectsV2Request() + .withBucketName(bucketName) + .withMaxKeys(2); + ListObjectsV2Result listObjectsResponse = s3Client.listObjectsV2(listObjectsRequest); + assertThat(listObjectsResponse.getObjectSummaries()).hasSize(2); + assertEquals(bucketName, listObjectsResponse.getBucketName()); + assertEquals(listObjectsResponse.getObjectSummaries().stream() + .map(S3ObjectSummary::getKey).collect(Collectors.toList()), + keyNames.subList(0, 2)); + assertTrue(listObjectsResponse.isTruncated()); + + + listObjectsRequest = new ListObjectsV2Request() + .withBucketName(bucketName) + .withMaxKeys(2) + .withContinuationToken(listObjectsResponse.getNextContinuationToken()); + listObjectsResponse = s3Client.listObjectsV2(listObjectsRequest); + assertThat(listObjectsResponse.getObjectSummaries()).hasSize(1); + assertEquals(bucketName, listObjectsResponse.getBucketName()); + assertEquals(listObjectsResponse.getObjectSummaries().stream() + .map(S3ObjectSummary::getKey).collect(Collectors.toList()), + keyNames.subList(2, keyNames.size())); + assertFalse(listObjectsResponse.isTruncated()); + } + + @Test + public void testListObjectsBucketNotExist() { + final String bucketName = getBucketName(); + ListObjectsRequest listObjectsRequest = new ListObjectsRequest() + .withBucketName(bucketName); + AmazonServiceException ase = assertThrows(AmazonServiceException.class, + () -> s3Client.listObjects(listObjectsRequest)); + assertEquals(ErrorType.Client, ase.getErrorType()); + assertEquals(404, ase.getStatusCode()); + assertEquals("NoSuchBucket", ase.getErrorCode()); + } + + @Test + public void testListObjectsV2BucketNotExist() { + final String bucketName = getBucketName(); + ListObjectsV2Request listObjectsRequest = new ListObjectsV2Request() + .withBucketName(bucketName); + AmazonServiceException ase = assertThrows(AmazonServiceException.class, + () -> s3Client.listObjectsV2(listObjectsRequest)); + assertEquals(ErrorType.Client, ase.getErrorType()); + assertEquals(404, ase.getStatusCode()); + assertEquals("NoSuchBucket", ase.getErrorCode()); + } + + @Test + public void testHighLevelMultipartUpload(@TempDir Path tempDir) throws Exception { + TransferManager tm = TransferManagerBuilder.standard() + .withS3Client(s3Client) + .build(); + + final String bucketName = getBucketName(); + final String keyName = getKeyName(); + + s3Client.createBucket(bucketName); + + // The minimum file size to for TransferManager to initiate multipart upload is 16MB, so create a file + // larger than the threshold. + // See TransferManagerConfiguration#getMultipartUploadThreshold + int fileSize = (int) (20 * MB); + File multipartUploadFile = Files.createFile(tempDir.resolve("multipartupload.txt")).toFile(); + + createFile(multipartUploadFile, fileSize); + + // TransferManager processes all transfers asynchronously, + // so this call returns immediately. + Upload upload = tm.upload(bucketName, keyName, multipartUploadFile); + + upload.waitForCompletion(); + UploadResult uploadResult = upload.waitForUploadResult(); + assertEquals(bucketName, uploadResult.getBucketName()); + assertEquals(keyName, uploadResult.getKey()); + } + + @Test + public void testLowLevelMultipartUpload(@TempDir Path tempDir) throws Exception { + final String bucketName = getBucketName(); + final String keyName = getKeyName(); + final Map userMetadata = new HashMap<>(); + userMetadata.put("key1", "value1"); + userMetadata.put("key2", "value2"); + + List tags = Arrays.asList(new Tag("tag1", "value1"), new Tag("tag2", "value2")); + + s3Client.createBucket(bucketName); + + File multipartUploadFile = Files.createFile(tempDir.resolve("multipartupload.txt")).toFile(); + + createFile(multipartUploadFile, (int) (25 * MB)); + + multipartUpload(bucketName, keyName, multipartUploadFile, 5 * MB, null, userMetadata, tags); + + S3Object s3Object = s3Client.getObject(bucketName, keyName); + assertEquals(keyName, s3Object.getKey()); + assertEquals(bucketName, s3Object.getBucketName()); + assertEquals(tags.size(), s3Object.getTaggingCount()); + + ObjectMetadata objectMetadata = s3Client.getObjectMetadata(bucketName, keyName); + assertEquals(userMetadata, objectMetadata.getUserMetadata()); + } + + @Test + public void testListMultipartUploads() { + final String bucketName = getBucketName(); + final String multipartKey1 = getKeyName("multipart1"); + final String multipartKey2 = getKeyName("multipart2"); + + s3Client.createBucket(bucketName); + + List uploadIds = new ArrayList<>(); + + String uploadId1 = initiateMultipartUpload(bucketName, multipartKey1, null, null, null); + uploadIds.add(uploadId1); + String uploadId2 = initiateMultipartUpload(bucketName, multipartKey1, null, null, null); + uploadIds.add(uploadId2); + // TODO: Currently, Ozone sorts based on uploadId instead of MPU init time within the same key. + // Remove this sorting step once HDDS-11532 has been implemented + Collections.sort(uploadIds); + String uploadId3 = initiateMultipartUpload(bucketName, multipartKey2, null, null, null); + uploadIds.add(uploadId3); + + // TODO: Add test for max uploads threshold and marker once HDDS-11530 has been implemented + ListMultipartUploadsRequest listMultipartUploadsRequest = new ListMultipartUploadsRequest(bucketName); + + MultipartUploadListing result = s3Client.listMultipartUploads(listMultipartUploadsRequest); + + List listUploadIds = result.getMultipartUploads().stream() + .map(MultipartUpload::getUploadId) + .collect(Collectors.toList()); + + assertEquals(uploadIds, listUploadIds); + } + + @Test + public void testListParts(@TempDir Path tempDir) throws Exception { + final String bucketName = getBucketName(); + final String keyName = getKeyName(); + final long fileSize = 5 * MB; + final long partSize = 1 * MB; + final int maxParts = 2; + + s3Client.createBucket(bucketName); + + String uploadId = initiateMultipartUpload(bucketName, keyName, null, null, null); + + File multipartUploadFile = Files.createFile(tempDir.resolve("multipartupload.txt")).toFile(); + + createFile(multipartUploadFile, (int) fileSize); + + List partETags = uploadParts(bucketName, keyName, uploadId, multipartUploadFile, partSize); + + List listPartETags = new ArrayList<>(); + int partNumberMarker = 0; + int expectedNumOfParts = 5; + PartListing listPartsResult; + do { + ListPartsRequest listPartsRequest = new ListPartsRequest(bucketName, keyName, uploadId) + .withMaxParts(maxParts) + .withPartNumberMarker(partNumberMarker); + listPartsResult = s3Client.listParts(listPartsRequest); + if (expectedNumOfParts > maxParts) { + assertTrue(listPartsResult.isTruncated()); + partNumberMarker = listPartsResult.getNextPartNumberMarker(); + expectedNumOfParts -= maxParts; + } else { + assertFalse(listPartsResult.isTruncated()); + } + for (PartSummary partSummary : listPartsResult.getParts()) { + listPartETags.add(new PartETag(partSummary.getPartNumber(), partSummary.getETag())); + } + } while (listPartsResult.isTruncated()); + + assertEquals(partETags.size(), listPartETags.size()); + for (int i = 0; i < partETags.size(); i++) { + assertEquals(partETags.get(i).getPartNumber(), listPartETags.get(i).getPartNumber()); + assertEquals(partETags.get(i).getETag(), listPartETags.get(i).getETag()); + } + } + + @Test + public void testListPartsNotFound() { + final String bucketName = getBucketName(); + final String keyName = getKeyName(); + + s3Client.createBucket(bucketName); + + ListPartsRequest listPartsRequest = + new ListPartsRequest(bucketName, keyName, "nonexist"); + + AmazonServiceException ase = assertThrows(AmazonServiceException.class, + () -> s3Client.listParts(listPartsRequest)); + + assertEquals(ErrorType.Client, ase.getErrorType()); + assertEquals(404, ase.getStatusCode()); + assertEquals("NoSuchUpload", ase.getErrorCode()); + } + + private boolean isBucketEmpty(Bucket bucket) { + ObjectListing objectListing = s3Client.listObjects(bucket.getName()); + return objectListing.getObjectSummaries().isEmpty(); + } + + private String getBucketName() { + return getBucketName(null); + } + + private String getBucketName(String suffix) { + return (getTestName() + "bucket" + suffix).toLowerCase(Locale.ROOT); + } + + private String getKeyName() { + return getKeyName(null); + } + + private String getKeyName(String suffix) { + return (getTestName() + "key" + suffix).toLowerCase(Locale.ROOT); + } + + private String multipartUpload(String bucketName, String key, File file, long partSize, String contentType, + Map userMetadata, List tags) throws Exception { + String uploadId = initiateMultipartUpload(bucketName, key, contentType, userMetadata, tags); + + List partETags = uploadParts(bucketName, key, uploadId, file, partSize); + + completeMultipartUpload(bucketName, key, uploadId, partETags); + + return uploadId; + } + + private String initiateMultipartUpload(String bucketName, String key, String contentType, + Map metadata, List tags) { + InitiateMultipartUploadRequest initRequest; + if (metadata == null || metadata.isEmpty()) { + initRequest = new InitiateMultipartUploadRequest(bucketName, key); + } else { + ObjectMetadata objectMetadata = new ObjectMetadata(); + objectMetadata.setUserMetadata(metadata); + if (contentType != null) { + objectMetadata.setContentType(contentType); + } + + initRequest = new InitiateMultipartUploadRequest(bucketName, key, objectMetadata) + .withTagging(new ObjectTagging(tags)); + } + + InitiateMultipartUploadResult initResponse = s3Client.initiateMultipartUpload(initRequest); + assertEquals(bucketName, initResponse.getBucketName()); + assertEquals(key, initResponse.getKey()); + // TODO: Once bucket lifecycle configuration is supported, should check for "abortDate" and "abortRuleId" + + return initResponse.getUploadId(); + } + + // TODO: Also support async upload parts (similar to v2 asyncClient) + private List uploadParts(String bucketName, String key, String uploadId, File file, long partSize) + throws Exception { + // Create a list of ETag objects. You retrieve ETags for each object part + // uploaded, + // then, after each individual part has been uploaded, pass the list of ETags to + // the request to complete the upload. + List partETags = new ArrayList<>(); + + // Upload the file parts. + long filePosition = 0; + long fileLength = file.length(); + try (FileInputStream fileInputStream = new FileInputStream(file)) { + for (int i = 1; filePosition < fileLength; i++) { + // Because the last part could be less than 5 MB, adjust the part size as + // needed. + partSize = Math.min(partSize, (fileLength - filePosition)); + + // Create the request to upload a part. + UploadPartRequest uploadRequest = new UploadPartRequest() + .withBucketName(bucketName) + .withKey(key) + .withUploadId(uploadId) + .withPartNumber(i) + .withFileOffset(filePosition) + .withFile(file) + .withPartSize(partSize); + + // Upload the part and add the response's ETag to our list. + UploadPartResult uploadResult = s3Client.uploadPart(uploadRequest); + PartETag partETag = uploadResult.getPartETag(); + assertEquals(i, partETag.getPartNumber()); + assertEquals(DatatypeConverter.printHexBinary( + calculateDigest(fileInputStream, 0, (int) partSize)).toLowerCase(), partETag.getETag()); + partETags.add(partETag); + + filePosition += partSize; + } + } + + return partETags; + } + + private void completeMultipartUpload(String bucketName, String key, String uploadId, List partETags) { + CompleteMultipartUploadRequest compRequest = new CompleteMultipartUploadRequest(bucketName, key, + uploadId, partETags); + CompleteMultipartUploadResult compResponse = s3Client.completeMultipartUpload(compRequest); + assertEquals(bucketName, compResponse.getBucketName()); + assertEquals(key, compResponse.getKey()); + } + + private void abortMultipartUpload(String bucketName, String key, String uploadId) { + AbortMultipartUploadRequest abortRequest = new AbortMultipartUploadRequest(bucketName, key, uploadId); + s3Client.abortMultipartUpload(abortRequest); + } + + private static byte[] calculateDigest(InputStream inputStream, int skip, int length) throws Exception { + int numRead; + byte[] buffer = new byte[1024]; + + MessageDigest complete = MessageDigest.getInstance("MD5"); + if (skip > -1 && length > -1) { + inputStream = new InputSubstream(inputStream, skip, length); + } + + do { + numRead = inputStream.read(buffer); + if (numRead > 0) { + complete.update(buffer, 0, numRead); + } + } while (numRead != -1); + + return complete.digest(); + } + + private static void createFile(File newFile, int size) throws IOException { + // write random data so that filesystems with compression enabled (e.g. ZFS) + // can't compress the file + Random random = new Random(); + byte[] data = new byte[size]; + random.nextBytes(data); + + RandomAccessFile file = new RandomAccessFile(newFile, "rws"); + + file.write(data); + + file.getFD().sync(); + file.close(); + } +} diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/s3/awssdk/v1/TestS3SDKV1.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/s3/awssdk/v1/TestS3SDKV1.java new file mode 100644 index 00000000000..5e9b3633be0 --- /dev/null +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/s3/awssdk/v1/TestS3SDKV1.java @@ -0,0 +1,47 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + *

    + * http://www.apache.org/licenses/LICENSE-2.0 + *

    + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package org.apache.hadoop.ozone.s3.awssdk.v1; + +import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.scm.ScmConfigKeys; +import org.apache.hadoop.ozone.om.OMConfigKeys; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Timeout; + +import java.io.IOException; + +/** + * Tests the AWS S3 SDK basic operations with OM Ratis disabled. + */ +@Timeout(300) +public class TestS3SDKV1 extends AbstractS3SDKV1Tests { + + @BeforeAll + public static void init() throws Exception { + OzoneConfiguration conf = new OzoneConfiguration(); + conf.setBoolean(OMConfigKeys.OZONE_OM_RATIS_ENABLE_KEY, false); + conf.setInt(ScmConfigKeys.OZONE_SCM_PIPELINE_OWNER_CONTAINER_COUNT, 1); + startCluster(conf); + } + + @AfterAll + public static void shutdown() throws IOException { + shutdownCluster(); + } +} diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/s3/awssdk/v1/TestS3SDKV1WithRatis.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/s3/awssdk/v1/TestS3SDKV1WithRatis.java new file mode 100644 index 00000000000..cb614453f69 --- /dev/null +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/s3/awssdk/v1/TestS3SDKV1WithRatis.java @@ -0,0 +1,49 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + *

    + * http://www.apache.org/licenses/LICENSE-2.0 + *

    + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package org.apache.hadoop.ozone.s3.awssdk.v1; + +import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.scm.ScmConfigKeys; +import org.apache.hadoop.ozone.OzoneConfigKeys; +import org.apache.hadoop.ozone.om.OMConfigKeys; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.BeforeAll; + +import java.io.IOException; + +/** + * Tests the AWS S3 SDK basic operations with OM Ratis enabled. + */ +public class TestS3SDKV1WithRatis extends AbstractS3SDKV1Tests { + + @BeforeAll + public static void init() throws Exception { + OzoneConfiguration conf = new OzoneConfiguration(); + conf.setBoolean(ScmConfigKeys.OZONE_SCM_PIPELINE_AUTO_CREATE_FACTOR_ONE, + false); + conf.setBoolean(OMConfigKeys.OZONE_OM_RATIS_ENABLE_KEY, true); + conf.setBoolean(OzoneConfigKeys.OZONE_NETWORK_TOPOLOGY_AWARE_READ_KEY, + true); + startCluster(conf); + } + + @AfterAll + public static void shutdown() throws IOException { + shutdownCluster(); + } +} diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/s3/awssdk/v1/TestS3SDKV1WithRatisStreaming.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/s3/awssdk/v1/TestS3SDKV1WithRatisStreaming.java new file mode 100644 index 00000000000..571d4c64908 --- /dev/null +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/s3/awssdk/v1/TestS3SDKV1WithRatisStreaming.java @@ -0,0 +1,55 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + *

    + * http://www.apache.org/licenses/LICENSE-2.0 + *

    + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package org.apache.hadoop.ozone.s3.awssdk.v1; + +import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.scm.ScmConfigKeys; +import org.apache.hadoop.ozone.OzoneConfigKeys; +import org.apache.hadoop.ozone.om.OMConfigKeys; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Timeout; + +import java.io.IOException; + +/** + * Tests the AWS S3 SDK basic operations with OM Ratis enabled and Streaming Write Pipeline. + */ +@Timeout(300) +public class TestS3SDKV1WithRatisStreaming extends AbstractS3SDKV1Tests { + + @BeforeAll + public static void init() throws Exception { + OzoneConfiguration conf = new OzoneConfiguration(); + conf.setBoolean(ScmConfigKeys.OZONE_SCM_PIPELINE_AUTO_CREATE_FACTOR_ONE, + false); + conf.setBoolean(OMConfigKeys.OZONE_OM_RATIS_ENABLE_KEY, true); + conf.setBoolean(OzoneConfigKeys.OZONE_NETWORK_TOPOLOGY_AWARE_READ_KEY, + true); + conf.setBoolean(OzoneConfigKeys.HDDS_CONTAINER_RATIS_DATASTREAM_ENABLED, true); + conf.setBoolean(OzoneConfigKeys.OZONE_FS_DATASTREAM_ENABLED, true); + // Ensure that all writes use datastream + conf.set(OzoneConfigKeys.OZONE_FS_DATASTREAM_AUTO_THRESHOLD, "0MB"); + startCluster(conf); + } + + @AfterAll + public static void shutdown() throws IOException { + shutdownCluster(); + } +} diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestDeletedBlocksTxnShell.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestDeletedBlocksTxnShell.java index 6f6c5439d8c..730a2479a51 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestDeletedBlocksTxnShell.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestDeletedBlocksTxnShell.java @@ -259,5 +259,27 @@ public void testDeletedBlocksTxnSubcommand() throws Exception { currentValidTxnNum = deletedBlockLog.getNumOfValidTransactions(); LOG.info("Valid num of txns: {}", currentValidTxnNum); assertEquals(30, currentValidTxnNum); + + // Fail first 20 txns be failed + // increment retry count than threshold, count will be set to -1 + for (int i = 0; i < maxRetry + 1; i++) { + deletedBlockLog.incrementCount(txIds); + } + flush(); + + GetFailedDeletedBlocksTxnSubcommand getFailedBlockCommand = + new GetFailedDeletedBlocksTxnSubcommand(); + outContent.reset(); + cmd = new CommandLine(getFailedBlockCommand); + // set start transaction as 15 + cmd.parseArgs("-c", "5", "-s", "15"); + getFailedBlockCommand.execute(scmClient); + matchCount = 0; + p = Pattern.compile("\"txID\" : \\d+", Pattern.MULTILINE); + m = p.matcher(outContent.toString(DEFAULT_ENCODING)); + while (m.find()) { + matchCount += 1; + } + assertEquals(5, matchCount); } } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestNSSummaryAdmin.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestNSSummaryAdmin.java index fdc3ec00087..65cfb780fbf 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestNSSummaryAdmin.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestNSSummaryAdmin.java @@ -58,7 +58,8 @@ public class TestNSSummaryAdmin extends StandardOutputTestBase { @BeforeAll public static void init() throws Exception { - conf = new OzoneConfiguration(); + ozoneAdmin = new OzoneAdmin(); + conf = ozoneAdmin.getOzoneConf(); OMRequestTestUtils.configureFSOptimizedPaths(conf, true); conf.set(OZONE_RECON_ADDRESS_KEY, "localhost:9888"); cluster = MiniOzoneCluster.newBuilder(conf) @@ -67,9 +68,6 @@ public static void init() throws Exception { client = cluster.newClient(); store = client.getObjectStore(); - // Client uses server conf for this test - ozoneAdmin = new OzoneAdmin(conf); - volumeName = UUID.randomUUID().toString(); bucketOBS = UUID.randomUUID().toString(); bucketFSO = UUID.randomUUID().toString(); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneDebugShell.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneDebugShell.java index 9b1747b4c27..d8315cb427d 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneDebugShell.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneDebugShell.java @@ -36,9 +36,8 @@ import org.apache.hadoop.ozone.client.OzoneClient; import org.apache.hadoop.ozone.client.OzoneClientFactory; import org.apache.hadoop.ozone.client.OzoneSnapshot; -import org.apache.hadoop.ozone.debug.DBScanner; import org.apache.hadoop.ozone.debug.OzoneDebug; -import org.apache.hadoop.ozone.debug.RDBParser; +import org.apache.hadoop.ozone.debug.ldb.RDBParser; import org.apache.hadoop.ozone.om.OMConfigKeys; import org.apache.hadoop.ozone.om.OMStorage; import org.apache.hadoop.ozone.om.helpers.BucketLayout; @@ -84,6 +83,7 @@ public class TestOzoneDebugShell { private static MiniOzoneCluster cluster = null; private static OzoneClient client; + private static OzoneDebug ozoneDebugShell; private static OzoneConfiguration conf = null; @@ -101,7 +101,8 @@ protected static void startCluster() throws Exception { @BeforeAll public static void init() throws Exception { - conf = new OzoneConfiguration(); + ozoneDebugShell = new OzoneDebug(); + conf = ozoneDebugShell.getOzoneConf(); conf.setTimeDuration(OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL, 100, TimeUnit.MILLISECONDS); conf.setTimeDuration(HDDS_HEARTBEAT_INTERVAL, 1, SECONDS); @@ -149,7 +150,6 @@ public void testLdbCliForOzoneSnapshot() throws Exception { StringWriter stdout = new StringWriter(); PrintWriter pstdout = new PrintWriter(stdout); CommandLine cmd = new CommandLine(new RDBParser()) - .addSubcommand(new DBScanner()) .setOut(pstdout); final String volumeName = UUID.randomUUID().toString(); final String bucketName = UUID.randomUUID().toString(); @@ -208,7 +208,6 @@ private int runChunkInfoCommand(String volumeName, String bucketName, getSetConfStringFromConf(OMConfigKeys.OZONE_OM_ADDRESS_KEY), "chunkinfo", bucketPath + Path.SEPARATOR + keyName }; - OzoneDebug ozoneDebugShell = new OzoneDebug(conf); int exitCode = ozoneDebugShell.execute(args); return exitCode; } @@ -220,7 +219,6 @@ private int runChunkInfoAndVerifyPaths(String volumeName, String bucketName, String[] args = new String[] { getSetConfStringFromConf(OMConfigKeys.OZONE_OM_ADDRESS_KEY), "chunkinfo", bucketPath + Path.SEPARATOR + keyName }; - OzoneDebug ozoneDebugShell = new OzoneDebug(conf); int exitCode = 1; try (GenericTestUtils.SystemOutCapturer capture = new GenericTestUtils .SystemOutCapturer()) { diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneRepairShell.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneRepairShell.java index 9216c909ee4..e770a36c737 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneRepairShell.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneRepairShell.java @@ -19,15 +19,10 @@ import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.ozone.MiniOzoneCluster; -import org.apache.hadoop.ozone.debug.DBScanner; -import org.apache.hadoop.ozone.debug.RDBParser; +import org.apache.hadoop.ozone.debug.ldb.RDBParser; import org.apache.hadoop.ozone.om.OMStorage; import org.apache.hadoop.ozone.repair.OzoneRepair; -import org.apache.hadoop.ozone.repair.RDBRepair; -import org.apache.hadoop.ozone.repair.TransactionInfoRepair; -import org.apache.hadoop.ozone.repair.quota.QuotaRepair; -import org.apache.hadoop.ozone.repair.quota.QuotaStatus; -import org.apache.hadoop.ozone.repair.quota.QuotaTrigger; +import org.apache.hadoop.ozone.repair.ldb.RDBRepair; import org.apache.ozone.test.GenericTestUtils; import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.BeforeAll; @@ -89,7 +84,7 @@ public void reset() { @Test public void testUpdateTransactionInfoTable() throws Exception { - CommandLine cmd = new CommandLine(new RDBRepair()).addSubcommand(new TransactionInfoRepair()); + CommandLine cmd = new CommandLine(new RDBRepair()); String dbPath = new File(OMStorage.getOmDbDir(conf) + "/" + OM_DB_NAME).getPath(); cluster.getOzoneManager().stop(); @@ -120,7 +115,7 @@ public void testUpdateTransactionInfoTable() throws Exception { } private String scanTransactionInfoTable(String dbPath) throws Exception { - CommandLine cmdDBScanner = new CommandLine(new RDBParser()).addSubcommand(new DBScanner()); + CommandLine cmdDBScanner = new CommandLine(new RDBParser()); String[] argsDBScanner = new String[] {"--db=" + dbPath, "scan", "--column_family", "transactionInfoTable"}; cmdDBScanner.execute(argsDBScanner); @@ -138,12 +133,11 @@ private String[] parseScanOutput(String output) throws IOException { @Test public void testQuotaRepair() throws Exception { - CommandLine cmd = new CommandLine(new OzoneRepair()).addSubcommand(new CommandLine(new QuotaRepair()) - .addSubcommand(new QuotaStatus()).addSubcommand(new QuotaTrigger())); + CommandLine cmd = new OzoneRepair().getCmd(); String[] args = new String[] {"quota", "status", "--service-host", conf.get(OZONE_OM_ADDRESS_KEY)}; int exitCode = cmd.execute(args); - assertEquals(0, exitCode); + assertEquals(0, exitCode, err::toString); args = new String[] {"quota", "start", "--service-host", conf.get(OZONE_OM_ADDRESS_KEY)}; exitCode = cmd.execute(args); assertEquals(0, exitCode); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneShellHA.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneShellHA.java index 4c5325edab1..4dc06d8eeb9 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneShellHA.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneShellHA.java @@ -23,7 +23,6 @@ import java.io.IOException; import java.io.PrintStream; import java.io.UnsupportedEncodingException; -import java.net.URI; import java.util.Map; import java.util.Arrays; import java.util.HashSet; @@ -35,12 +34,12 @@ import org.apache.hadoop.crypto.key.KeyProvider; import org.apache.hadoop.crypto.key.kms.KMSClientProvider; import org.apache.hadoop.crypto.key.kms.server.MiniKMS; +import org.apache.hadoop.hdds.scm.ScmConfigKeys; import org.apache.hadoop.hdds.utils.IOUtils; import org.apache.hadoop.fs.CommonConfigurationKeysPublic; import org.apache.hadoop.fs.FileChecksum; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; -import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.TrashPolicy; import org.apache.hadoop.hdds.cli.GenericCli; @@ -112,6 +111,7 @@ import org.junit.jupiter.api.Test; import org.junit.jupiter.api.TestMethodOrder; import org.junit.jupiter.api.Timeout; +import org.junit.jupiter.api.io.TempDir; import org.junit.jupiter.params.ParameterizedTest; import org.junit.jupiter.params.provider.ValueSource; import org.slf4j.Logger; @@ -136,12 +136,13 @@ public class TestOzoneShellHA { LoggerFactory.getLogger(TestOzoneShellHA.class); private static final String DEFAULT_ENCODING = UTF_8.name(); - - private static File baseDir; + @TempDir + private static java.nio.file.Path path; + @TempDir + private static File kmsDir; private static File testFile; private static String testFilePathString; private static MiniOzoneHAClusterImpl cluster = null; - private static File testDir; private static MiniKMS miniKMS; private static OzoneClient client; private OzoneShell ozoneShell = null; @@ -155,6 +156,8 @@ public class TestOzoneShellHA { private static String omServiceId; private static int numOfOMs; + private static OzoneConfiguration ozoneConfiguration; + /** * Create a MiniOzoneCluster for testing with using distributed Ozone * handler type. @@ -171,20 +174,12 @@ public static void init() throws Exception { } protected static void startKMS() throws Exception { - testDir = GenericTestUtils.getTestDir( - TestOzoneShellHA.class.getSimpleName()); - File kmsDir = new File(testDir, UUID.randomUUID().toString()); - assertTrue(kmsDir.mkdirs()); MiniKMS.Builder miniKMSBuilder = new MiniKMS.Builder(); miniKMS = miniKMSBuilder.setKmsConfDir(kmsDir).build(); miniKMS.start(); } protected static void startCluster(OzoneConfiguration conf) throws Exception { - String path = GenericTestUtils.getTempPath( - TestOzoneShellHA.class.getSimpleName()); - baseDir = new File(path); - baseDir.mkdirs(); testFilePathString = path + OZONE_URI_DELIMITER + "testFile"; testFile = new File(testFilePathString); @@ -199,6 +194,8 @@ protected static void startCluster(OzoneConfiguration conf) throws Exception { getKeyProviderURI(miniKMS)); conf.setInt(OMConfigKeys.OZONE_DIR_DELETING_SERVICE_INTERVAL, 10); conf.setBoolean(OMConfigKeys.OZONE_OM_ENABLE_FILESYSTEM_PATHS, true); + conf.setInt(ScmConfigKeys.OZONE_SCM_CONTAINER_LIST_MAX_COUNT, 1); + ozoneConfiguration = conf; MiniOzoneHAClusterImpl.Builder builder = MiniOzoneCluster.newHABuilder(conf); builder.setOMServiceId(omServiceId) .setNumOfOzoneManagers(numOfOMs) @@ -221,14 +218,6 @@ public static void shutdown() { if (miniKMS != null) { miniKMS.stop(); } - - if (baseDir != null) { - FileUtil.fullyDelete(baseDir, true); - } - - if (testDir != null) { - FileUtil.fullyDelete(testDir, true); - } } @BeforeEach @@ -958,6 +947,33 @@ private String getStdOut() throws UnsupportedEncodingException { return res; } + @Test + public void testOzoneAdminCmdListAllContainer() + throws UnsupportedEncodingException { + String[] args = new String[] {"container", "create", "--scm", + "localhost:" + cluster.getStorageContainerManager().getClientRpcPort()}; + for (int i = 0; i < 2; i++) { + execute(ozoneAdminShell, args); + } + + String[] args1 = new String[] {"container", "list", "-c", "10", "--scm", + "localhost:" + cluster.getStorageContainerManager().getClientRpcPort()}; + execute(ozoneAdminShell, args1); + //results will be capped at the maximum allowed count + assertEquals(1, getNumOfContainers()); + + String[] args2 = new String[] {"container", "list", "-a", "--scm", + "localhost:" + cluster.getStorageContainerManager().getClientRpcPort()}; + execute(ozoneAdminShell, args2); + //Lists all containers + assertNotEquals(1, getNumOfContainers()); + } + + private int getNumOfContainers() + throws UnsupportedEncodingException { + return out.toString(DEFAULT_ENCODING).split("\"containerID\" :").length - 1; + } + /** * Helper function to retrieve Ozone client configuration for trash testing. * @param hostPrefix Scheme + Authority. e.g. ofs://om-service-test1 @@ -1149,8 +1165,6 @@ public void testListBucket() throws Exception { getClientConfForOFS(hostPrefix, cluster.getConf()); int pageSize = 20; clientConf.setInt(OZONE_FS_LISTING_PAGE_SIZE, pageSize); - URI uri = FileSystem.getDefaultUri(clientConf); - clientConf.setBoolean(String.format("fs.%s.impl.disable.cache", uri.getScheme()), true); OzoneFsShell shell = new OzoneFsShell(clientConf); String volName = "testlistbucket"; diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneTenantShell.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneTenantShell.java index 5d647507141..09770b097f8 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneTenantShell.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneTenantShell.java @@ -21,7 +21,6 @@ import com.google.common.base.Strings; import org.apache.commons.io.FileUtils; import org.apache.commons.lang3.StringUtils; -import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.hdds.cli.GenericCli; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.io.retry.RetryInvocationHandler; @@ -44,6 +43,7 @@ import org.junit.jupiter.api.AfterAll; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.io.TempDir; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.slf4j.event.Level; @@ -54,6 +54,7 @@ import java.io.IOException; import java.io.PrintStream; import java.io.UnsupportedEncodingException; +import java.nio.file.Path; import java.security.PrivilegedExceptionAction; import java.util.Arrays; import java.util.List; @@ -91,7 +92,8 @@ public class TestOzoneTenantShell { * Set the timeout for every test. */ - private static File baseDir; + @TempDir + private static Path path; private static File testFile; private static final File AUDIT_LOG_FILE = new File("audit.log"); @@ -137,11 +139,6 @@ public static void init() throws Exception { conf.setBoolean(OZONE_OM_TENANT_DEV_SKIP_RANGER, true); } - String path = GenericTestUtils.getTempPath( - TestOzoneTenantShell.class.getSimpleName()); - baseDir = new File(path); - baseDir.mkdirs(); - testFile = new File(path + OzoneConsts.OZONE_URI_DELIMITER + "testFile"); testFile.getParentFile().mkdirs(); testFile.createNewFile(); @@ -169,10 +166,6 @@ public static void shutdown() { cluster.shutdown(); } - if (baseDir != null) { - FileUtil.fullyDelete(baseDir, true); - } - if (AUDIT_LOG_FILE.exists()) { AUDIT_LOG_FILE.delete(); } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestReconfigShell.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestReconfigShell.java index 7c7f2b77ec5..cc508782a3d 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestReconfigShell.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestReconfigShell.java @@ -67,7 +67,8 @@ public class TestReconfigShell { */ @BeforeAll public static void setup() throws Exception { - OzoneConfiguration conf = new OzoneConfiguration(); + ozoneAdmin = new OzoneAdmin(); + OzoneConfiguration conf = ozoneAdmin.getOzoneConf(); conf.setTimeDuration(OZONE_SCM_STALENODE_INTERVAL, 3, TimeUnit.SECONDS); String omServiceId = UUID.randomUUID().toString(); cluster = MiniOzoneCluster.newHABuilder(conf) @@ -77,7 +78,6 @@ public static void setup() throws Exception { .setNumDatanodes(DATANODE_COUNT) .build(); cluster.waitForClusterToBeReady(); - ozoneAdmin = new OzoneAdmin(cluster.getConf()); ozoneManager = cluster.getOzoneManager(); storageContainerManager = cluster.getStorageContainerManager(); datanodeServices = cluster.getHddsDatanodes(); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestTransferLeadershipShell.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestTransferLeadershipShell.java index d3d7c7766e7..cde7583956c 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestTransferLeadershipShell.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestTransferLeadershipShell.java @@ -26,9 +26,10 @@ import org.apache.hadoop.ozone.MiniOzoneHAClusterImpl; import org.apache.hadoop.ozone.om.OzoneManager; import org.apache.ratis.protocol.RaftPeer; -import org.junit.jupiter.api.AfterEach; -import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.TestInstance; import java.io.IOException; import java.util.ArrayList; @@ -43,6 +44,7 @@ /** * Test transferLeadership with SCM HA setup. */ +@TestInstance(TestInstance.Lifecycle.PER_CLASS) public class TestTransferLeadershipShell { private MiniOzoneHAClusterImpl cluster = null; private OzoneConfiguration conf; @@ -50,6 +52,7 @@ public class TestTransferLeadershipShell { private String scmServiceId; private int numOfOMs = 3; private int numOfSCMs = 3; + private OzoneAdmin ozoneAdmin; private static final long SNAPSHOT_THRESHOLD = 5; @@ -58,9 +61,10 @@ public class TestTransferLeadershipShell { * * @throws IOException Exception */ - @BeforeEach + @BeforeAll public void init() throws Exception { - conf = new OzoneConfiguration(); + ozoneAdmin = new OzoneAdmin(); + conf = ozoneAdmin.getOzoneConf(); omServiceId = "om-service-test1"; scmServiceId = "scm-service-test1"; conf.setLong(ScmConfigKeys.OZONE_SCM_HA_RATIS_SNAPSHOT_THRESHOLD, @@ -78,7 +82,7 @@ public void init() throws Exception { /** * Shutdown MiniDFSCluster. */ - @AfterEach + @AfterAll public void shutdown() { if (cluster != null) { cluster.shutdown(); @@ -93,7 +97,6 @@ public void testOmTransfer() throws Exception { omList.remove(oldLeader); OzoneManager newLeader = omList.get(0); cluster.waitForClusterToBeReady(); - OzoneAdmin ozoneAdmin = new OzoneAdmin(conf); String[] args1 = {"om", "transfer", "-n", newLeader.getOMNodeId()}; ozoneAdmin.execute(args1); Thread.sleep(3000); @@ -117,7 +120,6 @@ public void testScmTransfer() throws Exception { scmList.remove(oldLeader); StorageContainerManager newLeader = scmList.get(0); - OzoneAdmin ozoneAdmin = new OzoneAdmin(conf); String[] args1 = {"scm", "transfer", "-n", newLeader.getScmId()}; ozoneAdmin.execute(args1); cluster.waitForClusterToBeReady(); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/tools/contract/AbstractContractDistCpTest.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/tools/contract/AbstractContractDistCpTest.java index 71f1b682d0f..861127916c2 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/tools/contract/AbstractContractDistCpTest.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/tools/contract/AbstractContractDistCpTest.java @@ -29,9 +29,11 @@ import static org.assertj.core.api.Assertions.assertThat; import java.io.IOException; +import java.util.Arrays; import java.util.Collections; import java.util.HashMap; import java.util.Map; +import java.util.stream.Collectors; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileStatus; @@ -52,9 +54,10 @@ import org.apache.hadoop.tools.DistCpOptions; import org.apache.hadoop.tools.SimpleCopyListing; import org.apache.hadoop.tools.mapred.CopyMapper; -import org.apache.hadoop.tools.util.DistCpTestUtils; +import org.apache.hadoop.util.ToolRunner; import org.apache.hadoop.util.functional.RemoteIterators; +import org.assertj.core.api.Assertions; import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; @@ -534,8 +537,7 @@ public void testLargeFilesFromRemote() throws Exception { public void testSetJobId() throws Exception { describe("check jobId is set in the conf"); remoteFS.create(new Path(remoteDir, "file1")).close(); - DistCpTestUtils - .assertRunDistCp(DistCpConstants.SUCCESS, remoteDir.toString(), + assertRunDistCp(DistCpConstants.SUCCESS, remoteDir.toString(), localDir.toString(), getDefaultCLIOptionsOrNull(), conf); assertThat(conf.get(CONF_LABEL_DISTCP_JOB_ID)) .withFailMessage("DistCp job id isn't set") @@ -719,7 +721,7 @@ public void testDistCpWithIterator() throws Exception { GenericTestUtils.LogCapturer.captureLogs(SimpleCopyListing.LOG); String options = "-useiterator -update -delete" + getDefaultCLIOptions(); - DistCpTestUtils.assertRunDistCp(DistCpConstants.SUCCESS, source.toString(), + assertRunDistCp(DistCpConstants.SUCCESS, source.toString(), dest.toString(), options, conf); // Check the target listing was also done using iterator. @@ -864,7 +866,7 @@ public void testDistCpWithFile() throws Exception { verifyPathExists(remoteFS, "", source); verifyPathExists(localFS, "", localDir); - DistCpTestUtils.assertRunDistCp(DistCpConstants.SUCCESS, source.toString(), + assertRunDistCp(DistCpConstants.SUCCESS, source.toString(), dest.toString(), getDefaultCLIOptionsOrNull(), conf); assertThat(RemoteIterators.toList(localFS.listFiles(dest, true))) @@ -889,7 +891,7 @@ public void testDistCpWithUpdateExistFile() throws Exception { verifyPathExists(remoteFS, "", source); verifyPathExists(localFS, "", dest); - DistCpTestUtils.assertRunDistCp(DistCpConstants.SUCCESS, source.toString(), + assertRunDistCp(DistCpConstants.SUCCESS, source.toString(), dest.toString(), "-delete -update" + getDefaultCLIOptions(), conf); assertThat(RemoteIterators.toList(localFS.listFiles(dest, true))) @@ -1015,4 +1017,37 @@ private void verifySkipAndCopyCounter(Job job, .withFailMessage("Mismatch in SKIP counter value") .isEqualTo(skipExpectedValue); } + + /** + * Runs distcp from src to dst, preserving XAttrs. Asserts the + * expected exit code. + * + * @param exitCode expected exit code + * @param src distcp src path + * @param dst distcp destination + * @param options distcp command line options + * @param conf Configuration to use + * @throws Exception if there is any error + */ + public static void assertRunDistCp(int exitCode, String src, String dst, + String options, Configuration conf) + throws Exception { + assertRunDistCp(exitCode, src, dst, + options == null ? new String[0] : options.trim().split(" "), conf); + } + + private static void assertRunDistCp(int exitCode, String src, String dst, + String[] options, Configuration conf) + throws Exception { + DistCp distCp = new DistCp(conf, null); + String[] optsArr = new String[options.length + 2]; + System.arraycopy(options, 0, optsArr, 0, options.length); + optsArr[optsArr.length - 2] = src; + optsArr[optsArr.length - 1] = dst; + + Assertions.assertThat(ToolRunner.run(conf, distCp, optsArr)) + .describedAs("Exit code of distcp %s", + Arrays.stream(optsArr).collect(Collectors.joining(" "))) + .isEqualTo(exitCode); + } } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/utils/InputSubstream.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/utils/InputSubstream.java new file mode 100644 index 00000000000..4908ecabf2e --- /dev/null +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/utils/InputSubstream.java @@ -0,0 +1,115 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + *

    + * http://www.apache.org/licenses/LICENSE-2.0 + *

    + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package org.apache.hadoop.utils; + +import com.google.common.base.Preconditions; + +import java.io.FilterInputStream; +import java.io.IOException; +import java.io.InputStream; + +/** + * A filter input stream implementation that exposes a range of the underlying input stream. + */ +public class InputSubstream extends FilterInputStream { + private static final int MAX_SKIPS = 100; + private long currentPosition; + private final long requestedSkipOffset; + private final long requestedLength; + private long markedPosition = 0; + + public InputSubstream(InputStream in, long skip, long length) { + super(in); + Preconditions.checkNotNull(in); + this.currentPosition = 0; + this.requestedSkipOffset = skip; + this.requestedLength = length; + } + + @Override + public int read() throws IOException { + byte[] b = new byte[1]; + int bytesRead = read(b, 0, 1); + + if (bytesRead == -1) { + return bytesRead; + } + return b[0]; + } + + @Override + public int read(byte[] b, int off, int len) throws IOException { + int count = 0; + while (currentPosition < requestedSkipOffset) { + long skippedBytes = super.skip(requestedSkipOffset - currentPosition); + if (skippedBytes == 0) { + count++; + if (count > MAX_SKIPS) { + throw new IOException( + "Unable to position the currentPosition from " + + currentPosition + " to " + + requestedSkipOffset); + } + } + currentPosition += skippedBytes; + } + + long bytesRemaining = + (requestedLength + requestedSkipOffset) - currentPosition; + if (bytesRemaining <= 0) { + return -1; + } + + len = (int) Math.min(len, bytesRemaining); + int bytesRead = super.read(b, off, len); + currentPosition += bytesRead; + + return bytesRead; + } + + @Override + public synchronized void mark(int readlimit) { + markedPosition = currentPosition; + super.mark(readlimit); + } + + @Override + public synchronized void reset() throws IOException { + currentPosition = markedPosition; + super.reset(); + } + + @Override + public void close() throws IOException { + // No-op operation since we don't want to close the underlying stream + // when the susbtream has been read + } + + @Override + public int available() throws IOException { + long bytesRemaining; + if (currentPosition < requestedSkipOffset) { + bytesRemaining = requestedLength; + } else { + bytesRemaining = + (requestedLength + requestedSkipOffset) - currentPosition; + } + + return (int) Math.min(bytesRemaining, super.available()); + } +} diff --git a/hadoop-ozone/integration-test/src/test/resources/log4j.properties b/hadoop-ozone/integration-test/src/test/resources/log4j.properties index 564b729d5fc..c732a15c48a 100644 --- a/hadoop-ozone/integration-test/src/test/resources/log4j.properties +++ b/hadoop-ozone/integration-test/src/test/resources/log4j.properties @@ -21,3 +21,4 @@ log4j.logger.org.apache.hadoop.security.ShellBasedUnixGroupsMapping=ERROR log4j.logger.org.apache.hadoop.util.NativeCodeLoader=ERROR log4j.logger.org.apache.hadoop.hdds.utils.db.managed=TRACE log4j.logger.org.apache.hadoop.hdds.utils.db.CodecBuffer=DEBUG +log4j.logger.org.apache.hadoop.ozone.client.OzoneClientFactory=DEBUG diff --git a/hadoop-ozone/interface-client/pom.xml b/hadoop-ozone/interface-client/pom.xml index 18d9584fbc8..2e68deeeb3b 100644 --- a/hadoop-ozone/interface-client/pom.xml +++ b/hadoop-ozone/interface-client/pom.xml @@ -20,16 +20,17 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> org.apache.ozone ozone - 1.5.0-SNAPSHOT + 2.0.0-SNAPSHOT ozone-interface-client - 1.5.0-SNAPSHOT + 2.0.0-SNAPSHOT Apache Ozone Client interface Apache Ozone Client Interface jar true + true @@ -48,7 +49,7 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> org.apache.hadoop.thirdparty - hadoop-shaded-protobuf_3_7 + hadoop-shaded-protobuf_3_25 @@ -187,13 +188,6 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> - - com.github.spotbugs - spotbugs-maven-plugin - - ${basedir}/dev-support/findbugsExcludeFile.xml - - diff --git a/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto b/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto index f71dc44fec5..92c2b6b4cc5 100644 --- a/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto +++ b/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto @@ -153,6 +153,9 @@ enum Type { GetQuotaRepairStatus = 135; StartQuotaRepair = 136; SnapshotMoveTableKeys = 137; + PutObjectTagging = 140; + GetObjectTagging = 141; + DeleteObjectTagging = 142; } enum SafeMode { @@ -292,9 +295,14 @@ message OMRequest { optional RenameSnapshotRequest RenameSnapshotRequest = 129; optional ListOpenFilesRequest ListOpenFilesRequest = 130; optional QuotaRepairRequest QuotaRepairRequest = 131; + optional GetQuotaRepairStatusRequest GetQuotaRepairStatusRequest = 133; optional StartQuotaRepairRequest StartQuotaRepairRequest = 134; optional SnapshotMoveTableKeysRequest SnapshotMoveTableKeysRequest = 135; + + optional GetObjectTaggingRequest getObjectTaggingRequest = 140; + optional PutObjectTaggingRequest putObjectTaggingRequest = 141; + optional DeleteObjectTaggingRequest deleteObjectTaggingRequest = 142; } message OMResponse { @@ -424,6 +432,10 @@ message OMResponse { optional QuotaRepairResponse QuotaRepairResponse = 134; optional GetQuotaRepairStatusResponse GetQuotaRepairStatusResponse = 136; optional StartQuotaRepairResponse StartQuotaRepairResponse = 137; + + optional GetObjectTaggingResponse getObjectTaggingResponse = 140; + optional PutObjectTaggingResponse putObjectTaggingResponse = 141; + optional DeleteObjectTaggingResponse deleteObjectTaggingResponse = 142; } enum Status { @@ -1457,7 +1469,8 @@ message OMTokenProto { optional string accessKeyId = 12; optional string signature = 13; optional string strToSign = 14; - optional string omServiceId = 15; + optional string omServiceId = 15 [deprecated = true]; + optional string secretKeyId = 16; } message SecretKeyProto { @@ -2259,6 +2272,28 @@ message OMLockDetailsProto { optional uint64 writeLockNanos = 4; } +message PutObjectTaggingRequest { + required KeyArgs keyArgs = 1; +} + +message PutObjectTaggingResponse { +} + +message GetObjectTaggingRequest { + required KeyArgs keyArgs = 1; +} + +message GetObjectTaggingResponse { + repeated hadoop.hdds.KeyValue tags = 1; +} + +message DeleteObjectTaggingRequest { + required KeyArgs keyArgs = 1; +} + +message DeleteObjectTaggingResponse { +} + /** The OM service that takes care of Ozone namespace. */ diff --git a/hadoop-ozone/interface-client/src/main/resources/proto.lock b/hadoop-ozone/interface-client/src/main/resources/proto.lock index b8f5c395bae..0b28e0953c4 100644 --- a/hadoop-ozone/interface-client/src/main/resources/proto.lock +++ b/hadoop-ozone/interface-client/src/main/resources/proto.lock @@ -3270,6 +3270,12 @@ "name": "exclusiveReplicatedSize", "type": "uint64", "optional": true + }, + { + "id": 19, + "name": "deepCleanedDeletedDir", + "type": "bool", + "optional": true } ] }, @@ -4137,6 +4143,12 @@ "name": "ecReplicationConfig", "type": "hadoop.hdds.ECReplicationConfig", "optional": true + }, + { + "id": 8, + "name": "eTag", + "type": "string", + "optional": true } ] }, @@ -4687,6 +4699,29 @@ } ] }, + { + "name": "DeleteKeyError", + "fields": [ + { + "id": 1, + "name": "key", + "type": "string", + "optional": true + }, + { + "id": 2, + "name": "errorCode", + "type": "string", + "optional": true + }, + { + "id": 3, + "name": "errorMsg", + "type": "string", + "optional": true + } + ] + }, { "name": "DeleteKeysResponse", "fields": [ @@ -4701,6 +4736,12 @@ "name": "status", "type": "bool", "optional": true + }, + { + "id": 3, + "name": "errors", + "type": "DeleteKeyError", + "is_repeated": true } ] }, @@ -5636,6 +5677,12 @@ "name": "partName", "type": "string", "optional": true + }, + { + "id": 2, + "name": "eTag", + "type": "string", + "optional": true } ] }, @@ -5699,6 +5746,12 @@ "name": "partName", "type": "string", "required": true + }, + { + "id": 3, + "name": "eTag", + "type": "string", + "optional": true } ] }, @@ -5965,6 +6018,12 @@ "name": "size", "type": "uint64", "required": true + }, + { + "id": 5, + "name": "eTag", + "type": "string", + "optional": true } ] }, @@ -6671,7 +6730,13 @@ "id": 2, "name": "updatedSnapshotDBKey", "type": "string", - "is_repeated": true + "is_repeated": true, + "options": [ + { + "name": "deprecated", + "value": "true" + } + ] } ] }, @@ -6682,6 +6747,36 @@ "id": 1, "name": "snapshotProperty", "type": "SnapshotProperty", + "optional": true, + "options": [ + { + "name": "deprecated", + "value": "true" + } + ] + }, + { + "id": 2, + "name": "snapshotKey", + "type": "string", + "optional": true + }, + { + "id": 3, + "name": "snapshotSize", + "type": "SnapshotSize", + "optional": true + }, + { + "id": 4, + "name": "deepCleanedDeletedDir", + "type": "bool", + "optional": true + }, + { + "id": 5, + "name": "deepCleanedDeletedKey", + "type": "bool", "optional": true } ] @@ -6693,18 +6788,53 @@ "id": 1, "name": "snapshotKey", "type": "string", - "optional": true + "optional": true, + "options": [ + { + "name": "deprecated", + "value": "true" + } + ] }, { "id": 2, "name": "exclusiveSize", "type": "uint64", - "optional": true + "optional": true, + "options": [ + { + "name": "deprecated", + "value": "true" + } + ] }, { "id": 3, "name": "exclusiveReplicatedSize", "type": "uint64", + "optional": true, + "options": [ + { + "name": "deprecated", + "value": "true" + } + ] + } + ] + }, + { + "name": "SnapshotSize", + "fields": [ + { + "id": 1, + "name": "exclusiveSize", + "type": "uint64", + "optional": true + }, + { + "id": 2, + "name": "exclusiveReplicatedSize", + "type": "uint64", "optional": true } ] diff --git a/hadoop-ozone/interface-storage/pom.xml b/hadoop-ozone/interface-storage/pom.xml index ab1cc275ac1..cd2e1e34783 100644 --- a/hadoop-ozone/interface-storage/pom.xml +++ b/hadoop-ozone/interface-storage/pom.xml @@ -20,10 +20,10 @@ org.apache.ozone ozone - 1.5.0-SNAPSHOT + 2.0.0-SNAPSHOT ozone-interface-storage - 1.5.0-SNAPSHOT + 2.0.0-SNAPSHOT Apache Ozone Storage Interface Apache Ozone Storage Interface jar diff --git a/hadoop-ozone/interface-storage/src/main/java/org/apache/hadoop/ozone/om/codec/TokenIdentifierCodec.java b/hadoop-ozone/interface-storage/src/main/java/org/apache/hadoop/ozone/om/codec/TokenIdentifierCodec.java index ba54a44ac79..84203b1f65a 100644 --- a/hadoop-ozone/interface-storage/src/main/java/org/apache/hadoop/ozone/om/codec/TokenIdentifierCodec.java +++ b/hadoop-ozone/interface-storage/src/main/java/org/apache/hadoop/ozone/om/codec/TokenIdentifierCodec.java @@ -43,10 +43,15 @@ private TokenIdentifierCodec() { } @Override - public byte[] toPersistedFormat(OzoneTokenIdentifier object) { + public Class getTypeClass() { + return OzoneTokenIdentifier.class; + } + + @Override + public byte[] toPersistedFormat(OzoneTokenIdentifier object) throws IOException { Preconditions .checkNotNull(object, "Null object can't be converted to byte array."); - return object.toUniqueSerializedKey(); + return object.toProtoBuf().toByteArray(); } @Override @@ -55,11 +60,11 @@ public OzoneTokenIdentifier fromPersistedFormat(byte[] rawData) Preconditions.checkNotNull(rawData, "Null byte array can't converted to real object."); try { - OzoneTokenIdentifier object = OzoneTokenIdentifier.newInstance(); - return object.fromUniqueSerializedKey(rawData); + return OzoneTokenIdentifier.readProtoBuf(rawData); } catch (IOException ex) { try { - return OzoneTokenIdentifier.readProtoBuf(rawData); + OzoneTokenIdentifier object = OzoneTokenIdentifier.newInstance(); + return object.fromUniqueSerializedKey(rawData); } catch (InvalidProtocolBufferException e) { throw new IllegalArgumentException( "Can't encode the the raw data from the byte array", e); diff --git a/hadoop-ozone/interface-storage/src/main/java/org/apache/hadoop/ozone/om/helpers/OmPrefixInfo.java b/hadoop-ozone/interface-storage/src/main/java/org/apache/hadoop/ozone/om/helpers/OmPrefixInfo.java index 30fe6d69b76..a2fdfb99c54 100644 --- a/hadoop-ozone/interface-storage/src/main/java/org/apache/hadoop/ozone/om/helpers/OmPrefixInfo.java +++ b/hadoop-ozone/interface-storage/src/main/java/org/apache/hadoop/ozone/om/helpers/OmPrefixInfo.java @@ -42,7 +42,8 @@ public final class OmPrefixInfo extends WithObjectID implements CopyObject CODEC = new DelegatedCodec<>( Proto2Codec.get(PersistedPrefixInfo.getDefaultInstance()), OmPrefixInfo::getFromProtobuf, - OmPrefixInfo::getProtobuf); + OmPrefixInfo::getProtobuf, + OmPrefixInfo.class); public static Codec getCodec() { return CODEC; diff --git a/hadoop-ozone/ozone-manager/pom.xml b/hadoop-ozone/ozone-manager/pom.xml index ae427727def..8e78814eb6b 100644 --- a/hadoop-ozone/ozone-manager/pom.xml +++ b/hadoop-ozone/ozone-manager/pom.xml @@ -20,15 +20,16 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> org.apache.ozone ozone - 1.5.0-SNAPSHOT + 2.0.0-SNAPSHOT ozone-manager - 1.5.0-SNAPSHOT + 2.0.0-SNAPSHOT Apache Ozone Manager Server Apache Ozone Manager Server jar + false @@ -474,12 +475,13 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> - org.codehaus.mojo + dev.aspectj aspectj-maven-plugin ${aspectj-plugin.version} 1.8 1.8 + ${project.build.directory}/aspectj-build diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/audit/OMAction.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/audit/OMAction.java index 081477adf4d..5fd9fd6d595 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/audit/OMAction.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/audit/OMAction.java @@ -109,7 +109,11 @@ public enum OMAction implements AuditAction { UPGRADE_CANCEL, UPGRADE_FINALIZE, - LIST_OPEN_FILES; + LIST_OPEN_FILES, + + GET_OBJECT_TAGGING, + PUT_OBJECT_TAGGING, + DELETE_OBJECT_TAGGING; @Override public String getAction() { diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManager.java index a0f3053d731..9f6d8b81c10 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManager.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManager.java @@ -39,6 +39,7 @@ import java.time.Duration; import java.util.ArrayList; import java.util.List; +import java.util.Map; /** * Handles key level commands. @@ -177,6 +178,17 @@ ExpiredOpenKeys getExpiredOpenKeys(Duration expireThreshold, int count, List getExpiredMultipartUploads( Duration expireThreshold, int maxParts) throws IOException; + /** + * Look up an existing key from the OM table and retrieve the tags from + * the key info. + * + * @param args the args of the key provided by client. + * @param bucket the resolved parent bucket of the key. + * @return Map of the tag set associated with the key. + * @throws IOException + */ + Map getObjectTagging(OmKeyArgs args, ResolvedBucket bucket) throws IOException; + /** * Returns the metadataManager. * @return OMMetadataManager. diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java index 9bdbc70fb99..ccda21efc93 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java @@ -41,6 +41,11 @@ import java.util.stream.Collectors; import java.util.stream.Stream; +import com.google.common.annotations.VisibleForTesting; +import com.google.common.base.Preconditions; +import com.google.common.base.Strings; +import jakarta.annotation.Nonnull; +import org.apache.commons.lang3.StringUtils; import org.apache.commons.lang3.tuple.Pair; import org.apache.hadoop.conf.StorageUnit; import org.apache.hadoop.crypto.key.KeyProviderCryptoExtension; @@ -57,6 +62,7 @@ import org.apache.hadoop.hdds.scm.net.NodeImpl; import org.apache.hadoop.hdds.scm.pipeline.Pipeline; import org.apache.hadoop.hdds.scm.storage.BlockLocationInfo; +import org.apache.hadoop.hdds.security.token.OzoneBlockTokenSecretManager; import org.apache.hadoop.hdds.utils.BackgroundService; import org.apache.hadoop.hdds.utils.db.StringCodec; import org.apache.hadoop.hdds.utils.db.Table; @@ -71,6 +77,7 @@ import org.apache.hadoop.ozone.om.exceptions.OMException; import org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes; import org.apache.hadoop.ozone.om.helpers.BucketEncryptionKeyInfo; +import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.ozone.om.helpers.ListKeysResult; import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo; @@ -86,7 +93,6 @@ import org.apache.hadoop.ozone.om.helpers.OzoneAclUtil; import org.apache.hadoop.ozone.om.helpers.OzoneFSUtils; import org.apache.hadoop.ozone.om.helpers.OzoneFileStatus; -import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo; import org.apache.hadoop.ozone.om.request.OMClientRequest; import org.apache.hadoop.ozone.om.request.file.OMFileRequest; @@ -99,18 +105,14 @@ import org.apache.hadoop.ozone.om.service.SnapshotDirectoryCleaningService; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.ExpiredMultipartUploadsBucket; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.PartKeyInfo; -import org.apache.hadoop.hdds.security.token.OzoneBlockTokenSecretManager; import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer; import org.apache.hadoop.ozone.security.acl.OzoneObj; import org.apache.hadoop.ozone.security.acl.RequestContext; import org.apache.hadoop.security.SecurityUtil; import org.apache.hadoop.util.ReflectionUtils; import org.apache.hadoop.util.Time; - -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Preconditions; -import com.google.common.base.Strings; -import org.apache.commons.lang3.StringUtils; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import static java.lang.String.format; import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_SECURITY_KEY_PROVIDER_PATH; @@ -151,6 +153,8 @@ import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_SNAPSHOT_DIRECTORY_SERVICE_TIMEOUT_DEFAULT; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_SNAPSHOT_SST_FILTERING_SERVICE_INTERVAL; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_SNAPSHOT_SST_FILTERING_SERVICE_INTERVAL_DEFAULT; +import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_THREAD_NUMBER_DIR_DELETION; +import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_THREAD_NUMBER_DIR_DELETION_DEFAULT; import static org.apache.hadoop.ozone.om.OzoneManagerUtils.getBucketLayout; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.BUCKET_NOT_FOUND; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.FILE_NOT_FOUND; @@ -159,15 +163,11 @@ import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.KEY_NOT_FOUND; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.SCM_GET_PIPELINE_EXCEPTION; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.VOLUME_NOT_FOUND; -import static org.apache.hadoop.ozone.util.MetricUtil.captureLatencyNs; import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.Resource.BUCKET_LOCK; import static org.apache.hadoop.ozone.security.acl.OzoneObj.ResourceType.KEY; +import static org.apache.hadoop.ozone.util.MetricUtil.captureLatencyNs; import static org.apache.hadoop.util.Time.monotonicNow; -import jakarta.annotation.Nonnull; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - /** * Implementation of keyManager. */ @@ -259,8 +259,16 @@ public void start(OzoneConfiguration configuration) { OZONE_BLOCK_DELETING_SERVICE_TIMEOUT, OZONE_BLOCK_DELETING_SERVICE_TIMEOUT_DEFAULT, TimeUnit.MILLISECONDS); - dirDeletingService = new DirectoryDeletingService(dirDeleteInterval, - TimeUnit.MILLISECONDS, serviceTimeout, ozoneManager, configuration); + int dirDeletingServiceCorePoolSize = + configuration.getInt(OZONE_THREAD_NUMBER_DIR_DELETION, + OZONE_THREAD_NUMBER_DIR_DELETION_DEFAULT); + if (dirDeletingServiceCorePoolSize <= 0) { + dirDeletingServiceCorePoolSize = 1; + } + dirDeletingService = + new DirectoryDeletingService(dirDeleteInterval, TimeUnit.MILLISECONDS, + serviceTimeout, ozoneManager, configuration, + dirDeletingServiceCorePoolSize); dirDeletingService.start(); } @@ -737,6 +745,16 @@ public List getExpiredMultipartUploads( maxParts); } + @Override + public Map getObjectTagging(OmKeyArgs args, ResolvedBucket bucket) throws IOException { + Preconditions.checkNotNull(args); + + OmKeyInfo value = captureLatencyNs(metrics.getLookupReadKeyInfoLatencyNs(), + () -> readKeyInfo(args, bucket.bucketLayout())); + + return value.getTags(); + } + @Override public OMMetadataManager getMetadataManager() { return metadataManager; @@ -1721,7 +1739,7 @@ public List listStatus(OmKeyArgs args, boolean recursive, cacheKeyMap.clear(); List keyInfoList = new ArrayList<>(fileStatusList.size()); - fileStatusList.stream().map(s -> s.getKeyInfo()).forEach(keyInfoList::add); + fileStatusList.stream().map(OzoneFileStatus::getKeyInfo).forEach(keyInfoList::add); if (args.getLatestVersionLocation()) { slimLocationVersion(keyInfoList.toArray(new OmKeyInfo[0])); } @@ -2044,7 +2062,7 @@ public List getPendingDeletionSubDirs(long volumeId, long bucketId, parentInfo.getObjectID(), ""); long countEntries = 0; - Table dirTable = metadataManager.getDirectoryTable(); + Table dirTable = metadataManager.getDirectoryTable(); try (TableIterator> iterator = dirTable.iterator()) { diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMMXBean.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMMXBean.java index 54e81f8825d..1ba4f3d1d13 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMMXBean.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMMXBean.java @@ -36,4 +36,10 @@ public interface OMMXBean extends ServiceRuntimeInfo { String getRocksDbDirectory(); + /** + * Gets the OM hostname. + * + * @return the OM hostname for the datanode. + */ + String getHostname(); } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMMetrics.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMMetrics.java index cbe5205c10b..de4241b7ac4 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMMetrics.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMMetrics.java @@ -103,6 +103,10 @@ public class OMMetrics implements OmMetadataReaderMetrics { private @Metric MutableCounterLong numSetTime; private @Metric MutableCounterLong numGetKeyInfo; + private @Metric MutableCounterLong numGetObjectTagging; + private @Metric MutableCounterLong numPutObjectTagging; + private @Metric MutableCounterLong numDeleteObjectTagging; + // Failure Metrics private @Metric MutableCounterLong numVolumeCreateFails; private @Metric MutableCounterLong numVolumeUpdateFails; @@ -184,6 +188,10 @@ public class OMMetrics implements OmMetadataReaderMetrics { private @Metric MutableCounterLong numListOpenFilesFails; private @Metric MutableCounterLong getNumGetKeyInfoFails; + private @Metric MutableCounterLong numGetObjectTaggingFails; + private @Metric MutableCounterLong numPutObjectTaggingFails; + private @Metric MutableCounterLong numDeleteObjectTaggingFails; + private @Metric MutableCounterLong numRecoverLeaseFails; // Metrics for total amount of data written @@ -913,6 +921,35 @@ public void incNumGetKeyInfoFails() { getNumGetKeyInfoFails.incr(); } + @Override + public void incNumGetObjectTagging() { + numGetObjectTagging.incr(); + numKeyOps.incr(); + } + + @Override + public void incNumGetObjectTaggingFails() { + numGetObjectTaggingFails.incr(); + } + + public void incNumPutObjectTagging() { + numPutObjectTagging.incr(); + numKeyOps.incr(); + } + + public void incNumPutObjectTaggingFails() { + numPutObjectTaggingFails.incr(); + } + + public void incNumDeleteObjectTagging() { + numDeleteObjectTagging.incr(); + numKeyOps.incr(); + } + + public void incNumDeleteObjectTaggingFails() { + numDeleteObjectTaggingFails.incr(); + } + @VisibleForTesting public long getNumVolumeCreates() { return numVolumeCreates.value(); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMPerformanceMetrics.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMPerformanceMetrics.java index a01855d1b63..fc1d9e0e96f 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMPerformanceMetrics.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMPerformanceMetrics.java @@ -144,6 +144,12 @@ public static void unregister() { @Metric(about = "readFromRockDb latency in listKeys") private MutableRate listKeysReadFromRocksDbLatencyNs; + @Metric(about = "resolveBucketLink latency in getObjectTagging") + private MutableRate getObjectTaggingResolveBucketLatencyNs; + + @Metric(about = "ACLs check in getObjectTagging") + private MutableRate getObjectTaggingAclCheckLatencyNs; + public void addLookupLatency(long latencyInNs) { lookupLatencyNs.add(latencyInNs); } @@ -248,7 +254,7 @@ public void setListKeysAveragePagination(long keyCount) { public void setListKeysOpsPerSec(float opsPerSec) { listKeysOpsPerSec.set(opsPerSec); } - + MutableRate getListKeysAclCheckLatencyNs() { return listKeysAclCheckLatencyNs; } @@ -280,4 +286,16 @@ public MutableRate getDeleteKeyResolveBucketAndAclCheckLatencyNs() { public void addListKeysReadFromRocksDbLatencyNs(long latencyInNs) { listKeysReadFromRocksDbLatencyNs.add(latencyInNs); } + + public MutableRate getGetObjectTaggingResolveBucketLatencyNs() { + return getObjectTaggingResolveBucketLatencyNs; + } + + public MutableRate getGetObjectTaggingAclCheckLatencyNs() { + return getObjectTaggingAclCheckLatencyNs; + } + + public void addGetObjectTaggingLatencyNs(long latencyInNs) { + getObjectTaggingAclCheckLatencyNs.add(latencyInNs); + } } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java index 4873a7db491..6698ece4a8d 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java @@ -108,6 +108,8 @@ import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_FS_SNAPSHOT_MAX_LIMIT_DEFAULT; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_DB_MAX_OPEN_FILES; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_DB_MAX_OPEN_FILES_DEFAULT; +import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_SNAPSHOT_DB_MAX_OPEN_FILES; +import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_SNAPSHOT_DB_MAX_OPEN_FILES_DEFAULT; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_SNAPSHOT_CHECKPOINT_DIR_CREATION_POLL_TIMEOUT; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_SNAPSHOT_CHECKPOINT_DIR_CREATION_POLL_TIMEOUT_DEFAULT; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.BUCKET_NOT_FOUND; @@ -401,8 +403,9 @@ private OmMetadataManagerImpl(OzoneConfiguration conf, File dir, String name) throws IOException { lock = new OmReadOnlyLock(); omEpoch = 0; - setStore(loadDB(conf, dir, name, true, - java.util.Optional.of(Boolean.TRUE), Optional.empty())); + int maxOpenFiles = conf.getInt(OZONE_OM_SNAPSHOT_DB_MAX_OPEN_FILES, OZONE_OM_SNAPSHOT_DB_MAX_OPEN_FILES_DEFAULT); + + setStore(loadDB(conf, dir, name, true, Optional.of(Boolean.TRUE), maxOpenFiles, false, false)); initializeOmTables(CacheType.PARTIAL_CACHE, false); perfMetrics = null; } @@ -435,8 +438,7 @@ private OmMetadataManagerImpl(OzoneConfiguration conf, File dir, String name) checkSnapshotDirExist(checkpoint); } setStore(loadDB(conf, metaDir, dbName, false, - java.util.Optional.of(Boolean.TRUE), - Optional.of(maxOpenFiles), false, false)); + java.util.Optional.of(Boolean.TRUE), maxOpenFiles, false, false)); initializeOmTables(CacheType.PARTIAL_CACHE, false); } catch (IOException e) { stop(); @@ -578,7 +580,7 @@ public void start(OzoneConfiguration configuration) throws IOException { int maxOpenFiles = configuration.getInt(OZONE_OM_DB_MAX_OPEN_FILES, OZONE_OM_DB_MAX_OPEN_FILES_DEFAULT); - this.store = loadDB(configuration, metaDir, Optional.of(maxOpenFiles)); + this.store = loadDB(configuration, metaDir, maxOpenFiles); initializeOmTables(CacheType.FULL_CACHE, true); } @@ -586,33 +588,15 @@ public void start(OzoneConfiguration configuration) throws IOException { snapshotChainManager = new SnapshotChainManager(this); } - public static DBStore loadDB(OzoneConfiguration configuration, File metaDir) - throws IOException { - return loadDB(configuration, metaDir, Optional.empty()); - } - - public static DBStore loadDB(OzoneConfiguration configuration, File metaDir, Optional maxOpenFiles) - throws IOException { - return loadDB(configuration, metaDir, OM_DB_NAME, false, - java.util.Optional.empty(), maxOpenFiles, true, true); - } - - public static DBStore loadDB(OzoneConfiguration configuration, File metaDir, - String dbName, boolean readOnly, - java.util.Optional - disableAutoCompaction, - java.util.Optional maxOpenFiles) - throws IOException { - return loadDB(configuration, metaDir, dbName, readOnly, - disableAutoCompaction, maxOpenFiles, true, true); + public static DBStore loadDB(OzoneConfiguration configuration, File metaDir, int maxOpenFiles) throws IOException { + return loadDB(configuration, metaDir, OM_DB_NAME, false, java.util.Optional.empty(), maxOpenFiles, true, true); } @SuppressWarnings("checkstyle:parameternumber") public static DBStore loadDB(OzoneConfiguration configuration, File metaDir, String dbName, boolean readOnly, - java.util.Optional - disableAutoCompaction, - java.util.Optional maxOpenFiles, + java.util.Optional disableAutoCompaction, + int maxOpenFiles, boolean enableCompactionDag, boolean createCheckpointDirs) throws IOException { @@ -626,10 +610,10 @@ public static DBStore loadDB(OzoneConfiguration configuration, File metaDir, .setPath(Paths.get(metaDir.getPath())) .setMaxFSSnapshots(maxFSSnapshots) .setEnableCompactionDag(enableCompactionDag) - .setCreateCheckpointDirs(createCheckpointDirs); + .setCreateCheckpointDirs(createCheckpointDirs) + .setMaxNumberOfOpenFiles(maxOpenFiles); disableAutoCompaction.ifPresent( dbStoreBuilder::disableDefaultCFAutoCompaction); - maxOpenFiles.ifPresent(dbStoreBuilder::setMaxNumberOfOpenFiles); return addOMTablesAndCodecs(dbStoreBuilder).build(); } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataReader.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataReader.java index fdee1b71287..08f2115387e 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataReader.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataReader.java @@ -382,6 +382,7 @@ public ListKeysLightResult listKeysLight(String volumeName, * @param obj Ozone object. * @throws IOException if there is error. */ + @Override public List getAcl(OzoneObj obj) throws IOException { String volumeName = obj.getVolumeName(); @@ -428,6 +429,45 @@ public List getAcl(OzoneObj obj) throws IOException { } } + @Override + public Map getObjectTagging(OmKeyArgs args) throws IOException { + long start = Time.monotonicNowNanos(); + + ResolvedBucket bucket = captureLatencyNs( + perfMetrics.getLookupResolveBucketLatencyNs(), + () -> ozoneManager.resolveBucketLink(args)); + + boolean auditSuccess = true; + Map auditMap = bucket.audit(args.toAuditMap()); + + OmKeyArgs resolvedArgs = bucket.update(args); + + try { + if (isAclEnabled) { + captureLatencyNs(perfMetrics.getGetObjectTaggingAclCheckLatencyNs(), + () -> checkAcls(ResourceType.KEY, StoreType.OZONE, + ACLType.READ, bucket, + args.getKeyName()) + ); + } + metrics.incNumGetObjectTagging(); + return keyManager.getObjectTagging(resolvedArgs, bucket); + } catch (Exception ex) { + metrics.incNumGetObjectTaggingFails(); + auditSuccess = false; + audit.logReadFailure(buildAuditMessageForFailure(OMAction.GET_OBJECT_TAGGING, + auditMap, ex)); + throw ex; + } finally { + if (auditSuccess) { + audit.logReadSuccess(buildAuditMessageForSuccess(OMAction.GET_OBJECT_TAGGING, + auditMap)); + } + + perfMetrics.addGetObjectTaggingLatencyNs(Time.monotonicNowNanos() - start); + } + } + /** * Checks if current caller has acl permissions. * diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataReaderMetrics.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataReaderMetrics.java index 21b2e8b990a..171242310a0 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataReaderMetrics.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataReaderMetrics.java @@ -46,4 +46,8 @@ public interface OmMetadataReaderMetrics { void incNumKeyListFails(); void incNumGetAcl(); + + void incNumGetObjectTagging(); + + void incNumGetObjectTaggingFails(); } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshot.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshot.java index f863c086028..acb3a41e120 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshot.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshot.java @@ -47,6 +47,7 @@ import java.util.Arrays; import java.util.Collections; import java.util.List; +import java.util.Map; import java.util.UUID; import java.util.stream.Collectors; @@ -185,6 +186,11 @@ public List getAcl(OzoneObj obj) throws IOException { return omMetadataReader.getAcl(normalizeOzoneObj(obj)); } + @Override + public Map getObjectTagging(OmKeyArgs args) throws IOException { + return omMetadataReader.getObjectTagging(normalizeOmKeyArgs(args)); + } + private OzoneObj normalizeOzoneObj(OzoneObj o) { if (o == null) { return null; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotManager.java index dde5b22e793..f817625a979 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotManager.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotManager.java @@ -38,6 +38,7 @@ import java.util.UUID; import com.google.common.cache.RemovalListener; +import org.apache.commons.lang3.tuple.Pair; import org.apache.hadoop.hdds.StringUtils; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.server.ServerUtils; @@ -98,8 +99,8 @@ import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_SNAPSHOT_DIFF_DB_DIR; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_SNAPSHOT_DIFF_REPORT_MAX_PAGE_SIZE; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_SNAPSHOT_DIFF_REPORT_MAX_PAGE_SIZE_DEFAULT; +import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.FILE_NOT_FOUND; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.INVALID_KEY_NAME; -import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.INVALID_SNAPSHOT_ERROR; import static org.apache.hadoop.ozone.om.snapshot.SnapshotDiffManager.getSnapshotRootPath; import static org.apache.hadoop.ozone.om.snapshot.SnapshotUtils.checkSnapshotActive; import static org.apache.hadoop.ozone.om.snapshot.SnapshotUtils.dropColumnFamilyHandle; @@ -351,7 +352,8 @@ public OmSnapshot load(@Nonnull UUID snapshotId) throws IOException { // If it happens, then either snapshot has been purged in between or SnapshotChain is corrupted // and missing some entries which needs investigation. if (snapshotTableKey == null) { - throw new IOException("No snapshot exist with snapshotId: " + snapshotId); + throw new OMException("Snapshot " + snapshotId + + " is not found in the snapshot chain.", FILE_NOT_FOUND); } final SnapshotInfo snapshotInfo = getSnapshotInfo(snapshotTableKey); @@ -624,7 +626,12 @@ public ReferenceCounted getActiveFsMetadataOrSnapshot( String[] keyParts = keyName.split(OM_KEY_PREFIX); if (isSnapshotKey(keyParts)) { String snapshotName = keyParts[1]; - + // Updating the volumeName & bucketName in case the bucket is a linked bucket. We need to do this before a + // permission check, since linked bucket permissions and source bucket permissions could be different. + ResolvedBucket resolvedBucket = ozoneManager.resolveBucketLink(Pair.of(volumeName, + bucketName), false, false); + volumeName = resolvedBucket.realVolume(); + bucketName = resolvedBucket.realBucket(); return (ReferenceCounted) (ReferenceCounted) getActiveSnapshot(volumeName, bucketName, snapshotName); } else { @@ -656,7 +663,6 @@ private ReferenceCounted getSnapshot( // don't allow snapshot indicator without snapshot name throw new OMException(INVALID_KEY_NAME); } - String snapshotTableKey = SnapshotInfo.getTableKey(volumeName, bucketName, snapshotName); @@ -740,7 +746,7 @@ private SnapshotInfo getSnapshotInfo(String snapshotKey) throws IOException { snapshotInfo = ozoneManager.getMetadataManager().getSnapshotInfoTable().getSkipCache(snapshotKey); } if (snapshotInfo == null) { - throw new OMException("Snapshot '" + snapshotKey + "' is not found.", INVALID_SNAPSHOT_ERROR); + throw new OMException("Snapshot '" + snapshotKey + "' is not found.", FILE_NOT_FOUND); } return snapshotInfo; } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotMetrics.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotMetrics.java index 7560d453eb9..d00b12e94ce 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotMetrics.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotMetrics.java @@ -162,5 +162,22 @@ public void incNumGetAcl() { MutableCounterLong numKeyOps; private @Metric MutableCounterLong numFSOps; + + + private @Metric + MutableCounterLong numGetObjectTagging; + private @Metric + MutableCounterLong numGetObjectTaggingFails; + + @Override + public void incNumGetObjectTagging() { + numGetObjectTagging.incr(); + numKeyOps.incr(); + } + + @Override + public void incNumGetObjectTaggingFails() { + numGetObjectTaggingFails.incr(); + } } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneConfigUtil.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneConfigUtil.java index c09c5b91af5..cad987bb7da 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneConfigUtil.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneConfigUtil.java @@ -19,21 +19,11 @@ import org.apache.hadoop.hdds.client.DefaultReplicationConfig; import org.apache.hadoop.hdds.client.ReplicationConfig; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.ozone.om.exceptions.OMException; -import org.apache.hadoop.security.UserGroupInformation; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import java.io.IOException; -import java.util.Collection; - -import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ADMINISTRATORS; -import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ADMINISTRATORS_GROUPS; -import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_S3_ADMINISTRATORS; -import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_S3_ADMINISTRATORS_GROUPS; - /** * Utility class for ozone configurations. */ @@ -43,38 +33,6 @@ public final class OzoneConfigUtil { private OzoneConfigUtil() { } - /** - * Return list of s3 administrators prop from config. - * - * If ozone.s3.administrators value is empty string or unset, - * defaults to ozone.administrators value. - */ - static Collection getS3AdminsFromConfig(OzoneConfiguration conf) - throws IOException { - Collection ozAdmins = - conf.getTrimmedStringCollection(OZONE_S3_ADMINISTRATORS); - if (ozAdmins == null || ozAdmins.isEmpty()) { - ozAdmins = conf.getTrimmedStringCollection(OZONE_ADMINISTRATORS); - } - String omSPN = UserGroupInformation.getCurrentUser().getShortUserName(); - if (!ozAdmins.contains(omSPN)) { - ozAdmins.add(omSPN); - } - return ozAdmins; - } - - static Collection getS3AdminsGroupsFromConfig( - OzoneConfiguration conf) { - Collection s3AdminsGroup = - conf.getTrimmedStringCollection(OZONE_S3_ADMINISTRATORS_GROUPS); - if (s3AdminsGroup.isEmpty() && conf - .getTrimmedStringCollection(OZONE_S3_ADMINISTRATORS).isEmpty()) { - s3AdminsGroup = conf - .getTrimmedStringCollection(OZONE_ADMINISTRATORS_GROUPS); - } - return s3AdminsGroup; - } - public static ReplicationConfig resolveReplicationConfigPreference( HddsProtos.ReplicationType clientType, HddsProtos.ReplicationFactor clientFactor, diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java index e2acafdd242..2ccc16cc285 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java @@ -87,6 +87,8 @@ import org.apache.hadoop.hdds.scm.client.ScmTopologyClient; import org.apache.hadoop.hdds.scm.ha.SCMHAUtils; import org.apache.hadoop.hdds.scm.net.NetworkTopology; +import org.apache.hadoop.hdds.security.symmetric.DefaultSecretKeyClient; +import org.apache.hadoop.hdds.security.symmetric.SecretKeyClient; import org.apache.hadoop.hdds.server.OzoneAdmins; import org.apache.hadoop.hdds.utils.db.Table; import org.apache.hadoop.hdds.utils.db.Table.KeyValue; @@ -120,8 +122,6 @@ import org.apache.hadoop.hdds.scm.protocol.ScmBlockLocationProtocol; import org.apache.hadoop.hdds.scm.protocol.StorageContainerLocationProtocol; import org.apache.hadoop.hdds.security.SecurityConfig; -import org.apache.hadoop.hdds.security.symmetric.SecretKeySignerClient; -import org.apache.hadoop.hdds.security.symmetric.DefaultSecretKeySignerClient; import org.apache.hadoop.hdds.security.token.OzoneBlockTokenSecretManager; import org.apache.hadoop.hdds.security.x509.certificate.client.CertificateClient; import org.apache.hadoop.hdds.server.ServiceRuntimeInfoImpl; @@ -274,7 +274,7 @@ import static org.apache.hadoop.ozone.OzoneConsts.OM_METRICS_TEMP_FILE; import static org.apache.hadoop.ozone.OzoneConsts.OM_SNAPSHOT_DIR; import static org.apache.hadoop.ozone.OzoneConsts.PREPARE_MARKER_KEY; -import static org.apache.hadoop.ozone.OzoneConsts.OM_RATIS_SNAPSHOT_DIR; +import static org.apache.hadoop.ozone.OzoneConsts.OZONE_RATIS_SNAPSHOT_DIR; import static org.apache.hadoop.ozone.OzoneConsts.RPC_PORT; import static org.apache.hadoop.ozone.OzoneConsts.TRANSACTION_INFO_KEY; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_KEY_DELETING_LIMIT_PER_TASK; @@ -371,7 +371,7 @@ public final class OzoneManager extends ServiceRuntimeInfoImpl private OzoneDelegationTokenSecretManager delegationTokenMgr; private OzoneBlockTokenSecretManager blockTokenMgr; private CertificateClient certClient; - private SecretKeySignerClient secretKeyClient; + private SecretKeyClient secretKeyClient; private ScmTopologyClient scmTopologyClient; private final Text omRpcAddressTxt; private OzoneConfiguration configuration; @@ -475,6 +475,8 @@ public final class OzoneManager extends ServiceRuntimeInfoImpl private boolean fsSnapshotEnabled; + private String omHostName; + /** * OM Startup mode. */ @@ -671,8 +673,8 @@ private OzoneManager(OzoneConfiguration conf, StartupOption startupOption) SecretKeyProtocol secretKeyProtocol = HddsServerUtil.getSecretKeyClientForOm(conf); - secretKeyClient = new DefaultSecretKeySignerClient(secretKeyProtocol, - omNodeDetails.threadNamePrefix()); + secretKeyClient = DefaultSecretKeyClient.create( + conf, secretKeyProtocol, omNodeDetails.threadNamePrefix()); } serviceInfo = new ServiceInfoProvider(secConfig, this, certClient, testSecureOmFlag); @@ -695,11 +697,7 @@ private OzoneManager(OzoneConfiguration conf, StartupOption startupOption) // Get read only admin list readOnlyAdmins = OzoneAdmins.getReadonlyAdmins(conf); - Collection s3AdminUsernames = - OzoneConfigUtil.getS3AdminsFromConfig(configuration); - Collection s3AdminGroups = - OzoneConfigUtil.getS3AdminsGroupsFromConfig(configuration); - s3OzoneAdmins = new OzoneAdmins(s3AdminUsernames, s3AdminGroups); + s3OzoneAdmins = OzoneAdmins.getS3Admins(conf); instantiateServices(false); // Create special volume s3v which is required for S3G. @@ -737,6 +735,7 @@ private OzoneManager(OzoneConfiguration conf, StartupOption startupOption) } bucketUtilizationMetrics = BucketUtilizationMetrics.create(metadataManager); + omHostName = HddsUtils.getHostName(conf); } public boolean isStopped() { @@ -1089,6 +1088,7 @@ private OzoneDelegationTokenSecretManager createDelegationTokenSecretManager( .setOzoneManager(this) .setS3SecretManager(s3SecretManager) .setCertificateClient(certClient) + .setSecretKeyClient(secretKeyClient) .setOmServiceId(omNodeDetails.getServiceId()) .build(); } @@ -1131,7 +1131,7 @@ public void startSecretManager() { throw new UncheckedIOException(e); } - if (secConfig.isBlockTokenEnabled() && blockTokenMgr != null) { + if (secConfig.isSecurityEnabled()) { LOG.info("Starting secret key client."); try { secretKeyClient.start(configuration); @@ -1184,10 +1184,14 @@ public NetworkTopology getClusterMap() { * without fully setting up a working secure cluster. */ @VisibleForTesting - public void setSecretKeyClient( - SecretKeySignerClient secretKeyClient) { + public void setSecretKeyClient(SecretKeyClient secretKeyClient) { this.secretKeyClient = secretKeyClient; - blockTokenMgr.setSecretKeyClient(secretKeyClient); + if (blockTokenMgr != null) { + blockTokenMgr.setSecretKeyClient(secretKeyClient); + } + if (delegationTokenMgr != null) { + delegationTokenMgr.setSecretKeyClient(secretKeyClient); + } } /** @@ -1494,7 +1498,7 @@ private void initializeRatisDirs(OzoneConfiguration conf) throws IOException { // snapshot directory in Ratis storage directory. if yes, move it to // new snapshot directory. - File snapshotDir = new File(omRatisDirectory, OM_RATIS_SNAPSHOT_DIR); + File snapshotDir = new File(omRatisDirectory, OZONE_RATIS_SNAPSHOT_DIR); if (snapshotDir.isDirectory()) { FileUtils.moveDirectory(snapshotDir.toPath(), @@ -2333,6 +2337,10 @@ public boolean stop() { if (bucketUtilizationMetrics != null) { bucketUtilizationMetrics.unRegister(); } + + if (versionManager != null) { + versionManager.close(); + } return true; } catch (Exception e) { LOG.error("OzoneManager stop failed.", e); @@ -2975,12 +2983,13 @@ public SnapshotInfo getSnapshotInfo(String volumeName, String bucketName, Map auditMap = buildAuditMap(volumeName); auditMap.put(OzoneConsts.BUCKET, bucketName); try { - if (isAclEnabled) { - omMetadataReader.checkAcls(ResourceType.BUCKET, StoreType.OZONE, - ACLType.READ, volumeName, bucketName, null); - } + // Updating the volumeName & bucketName in case the bucket is a linked bucket. We need to do this before a + // permission check, since linked bucket permissions and source bucket permissions could be different. + ResolvedBucket resolvedBucket = resolveBucketLink(Pair.of(volumeName, bucketName)); + auditMap = buildAuditMap(resolvedBucket.realVolume()); + auditMap.put(OzoneConsts.BUCKET, resolvedBucket.realBucket()); SnapshotInfo snapshotInfo = - metadataManager.getSnapshotInfo(volumeName, bucketName, snapshotName); + metadataManager.getSnapshotInfo(resolvedBucket.realVolume(), resolvedBucket.realBucket(), snapshotName); AUDIT.logReadSuccess(buildAuditMessageForSuccess( OMAction.SNAPSHOT_INFO, auditMap)); @@ -3001,12 +3010,17 @@ public ListSnapshotResponse listSnapshot( Map auditMap = buildAuditMap(volumeName); auditMap.put(OzoneConsts.BUCKET, bucketName); try { + // Updating the volumeName & bucketName in case the bucket is a linked bucket. We need to do this before a + // permission check, since linked bucket permissions and source bucket permissions could be different. + ResolvedBucket resolvedBucket = resolveBucketLink(Pair.of(volumeName, bucketName)); + auditMap = buildAuditMap(resolvedBucket.realVolume()); + auditMap.put(OzoneConsts.BUCKET, resolvedBucket.realBucket()); if (isAclEnabled) { omMetadataReader.checkAcls(ResourceType.BUCKET, StoreType.OZONE, - ACLType.LIST, volumeName, bucketName, null); + ACLType.LIST, resolvedBucket.realVolume(), resolvedBucket.realBucket(), null); } ListSnapshotResponse listSnapshotResponse = - metadataManager.listSnapshot(volumeName, bucketName, + metadataManager.listSnapshot(resolvedBucket.realVolume(), resolvedBucket.realBucket(), snapshotPrefix, prevSnapshot, maxListResult); AUDIT.logReadSuccess(buildAuditMessageForSuccess( @@ -3061,6 +3075,11 @@ private void unregisterMXBean() { } } + @Override + public String getNamespace() { + return omNodeDetails.getServiceId(); + } + @Override public String getRpcPort() { return "" + omRpcAddress.getPort(); @@ -3120,6 +3139,11 @@ public String getRocksDbDirectory() { return String.valueOf(OMStorage.getOmDbDir(configuration)); } + @Override + public String getHostname() { + return omHostName; + } + @VisibleForTesting public OzoneManagerHttpServer getHttpServer() { return httpServer; @@ -4345,7 +4369,7 @@ private void checkAdminUserPrivilege(String operation) throws IOException { } public boolean isS3Admin(UserGroupInformation callerUgi) { - return callerUgi != null && s3OzoneAdmins.isAdmin(callerUgi); + return OzoneAdmins.isS3Admin(callerUgi, s3OzoneAdmins); } @VisibleForTesting @@ -4395,10 +4419,16 @@ public ResolvedBucket resolveBucketLink(Pair requested, } public ResolvedBucket resolveBucketLink(Pair requested, - boolean allowDanglingBuckets) + boolean allowDanglingBuckets) throws IOException { + return resolveBucketLink(requested, allowDanglingBuckets, isAclEnabled); + } + + public ResolvedBucket resolveBucketLink(Pair requested, + boolean allowDanglingBuckets, + boolean aclEnabled) throws IOException { OmBucketInfo resolved; - if (isAclEnabled) { + if (aclEnabled) { UserGroupInformation ugi = getRemoteUser(); if (getS3Auth() != null) { ugi = UserGroupInformation.createRemoteUser( @@ -4409,15 +4439,26 @@ public ResolvedBucket resolveBucketLink(Pair requested, ugi, remoteIp != null ? remoteIp : omRpcAddress.getAddress(), remoteIp != null ? remoteIp.getHostName() : - omRpcAddress.getHostName(), allowDanglingBuckets); + omRpcAddress.getHostName(), allowDanglingBuckets, aclEnabled); } else { resolved = resolveBucketLink(requested, new HashSet<>(), - null, null, null, allowDanglingBuckets); + null, null, null, allowDanglingBuckets, aclEnabled); } return new ResolvedBucket(requested.getLeft(), requested.getRight(), resolved); } + private OmBucketInfo resolveBucketLink( + Pair volumeAndBucket, + Set> visited, + UserGroupInformation userGroupInformation, + InetAddress remoteAddress, + String hostName, + boolean allowDanglingBuckets) throws IOException { + return resolveBucketLink(volumeAndBucket, visited, userGroupInformation, remoteAddress, hostName, + allowDanglingBuckets, isAclEnabled); + } + /** * Resolves bucket symlinks. Read permission is required for following links. * @@ -4435,7 +4476,8 @@ private OmBucketInfo resolveBucketLink( UserGroupInformation userGroupInformation, InetAddress remoteAddress, String hostName, - boolean allowDanglingBuckets) throws IOException { + boolean allowDanglingBuckets, + boolean aclEnabled) throws IOException { String volumeName = volumeAndBucket.getLeft(); String bucketName = volumeAndBucket.getRight(); @@ -4458,7 +4500,7 @@ private OmBucketInfo resolveBucketLink( DETECTED_LOOP_IN_BUCKET_LINKS); } - if (isAclEnabled) { + if (aclEnabled) { final ACLType type = ACLType.READ; checkAcls(ResourceType.BUCKET, StoreType.OZONE, type, volumeName, bucketName, null, userGroupInformation, @@ -4469,7 +4511,7 @@ private OmBucketInfo resolveBucketLink( return resolveBucketLink( Pair.of(info.getSourceVolume(), info.getSourceBucket()), visited, userGroupInformation, remoteAddress, hostName, - allowDanglingBuckets); + allowDanglingBuckets, aclEnabled); } @VisibleForTesting @@ -4767,6 +4809,15 @@ public void startQuotaRepair(List buckets) throws IOException { new QuotaRepairTask(this).repair(buckets); } + @Override + public Map getObjectTagging(final OmKeyArgs args) + throws IOException { + try (ReferenceCounted rcReader = getReader(args)) { + return rcReader.get().getObjectTagging(args); + } + } + + /** * Write down Layout version of a finalized feature to DB on finalization. * @param lvm OMLayoutVersionManager @@ -4897,13 +4948,11 @@ public SnapshotDiffResponse snapshotDiff(String volume, boolean forceFullDiff, boolean disableNativeDiff) throws IOException { - - if (isAclEnabled) { - omMetadataReader.checkAcls(ResourceType.BUCKET, StoreType.OZONE, ACLType.READ, volume, bucket, null); - } - - return omSnapshotManager.getSnapshotDiffReport(volume, bucket, fromSnapshot, toSnapshot, - token, pageSize, forceFullDiff, disableNativeDiff); + // Updating the volumeName & bucketName in case the bucket is a linked bucket. We need to do this before a + // permission check, since linked bucket permissions and source bucket permissions could be different. + ResolvedBucket resolvedBucket = resolveBucketLink(Pair.of(volume, bucket), false); + return omSnapshotManager.getSnapshotDiffReport(resolvedBucket.realVolume(), resolvedBucket.realBucket(), + fromSnapshot, toSnapshot, token, pageSize, forceFullDiff, disableNativeDiff); } public CancelSnapshotDiffResponse cancelSnapshotDiff(String volume, @@ -4911,12 +4960,9 @@ public CancelSnapshotDiffResponse cancelSnapshotDiff(String volume, String fromSnapshot, String toSnapshot) throws IOException { - - if (isAclEnabled) { - omMetadataReader.checkAcls(ResourceType.BUCKET, StoreType.OZONE, ACLType.READ, volume, bucket, null); - } - - return omSnapshotManager.cancelSnapshotDiff(volume, bucket, fromSnapshot, toSnapshot); + ResolvedBucket resolvedBucket = this.resolveBucketLink(Pair.of(volume, bucket), false); + return omSnapshotManager.cancelSnapshotDiff(resolvedBucket.realVolume(), resolvedBucket.realBucket(), + fromSnapshot, toSnapshot); } public List listSnapshotDiffJobs(String volume, @@ -4924,12 +4970,13 @@ public List listSnapshotDiffJobs(String volume, String jobStatus, boolean listAll) throws IOException { - + ResolvedBucket resolvedBucket = this.resolveBucketLink(Pair.of(volume, bucket), false); if (isAclEnabled) { omMetadataReader.checkAcls(ResourceType.BUCKET, StoreType.OZONE, ACLType.LIST, volume, bucket, null); } - return omSnapshotManager.getSnapshotDiffList(volume, bucket, jobStatus, listAll); + return omSnapshotManager.getSnapshotDiffList(resolvedBucket.realVolume(), resolvedBucket.realBucket(), + jobStatus, listAll); } public String printCompactionLogDag(String fileNamePrefix, diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/TrashPolicyOzone.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/TrashPolicyOzone.java index 9064d5d454c..2aa8114e278 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/TrashPolicyOzone.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/TrashPolicyOzone.java @@ -33,22 +33,14 @@ import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.fs.TrashPolicyDefault; import org.apache.hadoop.fs.FileAlreadyExistsException; -import org.apache.hadoop.fs.permission.FsAction; -import org.apache.hadoop.fs.permission.FsPermission; -import org.apache.hadoop.fs.InvalidPathException; +import org.apache.hadoop.fs.ozone.OzoneTrashPolicy; import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.conf.OMClientConfig; -import org.apache.hadoop.ozone.om.helpers.OzoneFSUtils; -import org.apache.hadoop.ozone.OFSPath; import org.apache.hadoop.util.Time; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_TRASH_INTERVAL_KEY; -import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_TRASH_INTERVAL_DEFAULT; import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_TRASH_CHECKPOINT_INTERVAL_KEY; import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_TRASH_CHECKPOINT_INTERVAL_DEFAULT; @@ -57,45 +49,26 @@ * of TrashPolicy ozone-specific trash optimizations are/will be made such as * having a multithreaded TrashEmptier. */ -public class TrashPolicyOzone extends TrashPolicyDefault { +public class TrashPolicyOzone extends OzoneTrashPolicy { private static final Logger LOG = LoggerFactory.getLogger(TrashPolicyOzone.class); - private static final Path CURRENT = new Path("Current"); - - private static final FsPermission PERMISSION = - new FsPermission(FsAction.ALL, FsAction.NONE, FsAction.NONE); - private static final DateFormat CHECKPOINT = new SimpleDateFormat( "yyMMddHHmmss"); /** Format of checkpoint directories used prior to Hadoop 0.23. */ private static final DateFormat OLD_CHECKPOINT = new SimpleDateFormat("yyMMddHHmm"); - private static final int MSECS_PER_MINUTE = 60 * 1000; - private long emptierInterval; - private Configuration configuration; - private OzoneManager om; - private OzoneConfiguration ozoneConfiguration; - public TrashPolicyOzone() { } @Override public void initialize(Configuration conf, FileSystem fs) { - this.fs = fs; - this.configuration = conf; - float hadoopTrashInterval = conf.getFloat( - FS_TRASH_INTERVAL_KEY, FS_TRASH_INTERVAL_DEFAULT); - // check whether user has configured ozone specific trash-interval - // if not fall back to hadoop configuration - this.deletionInterval = (long)(conf.getFloat( - OMConfigKeys.OZONE_FS_TRASH_INTERVAL_KEY, hadoopTrashInterval) - * MSECS_PER_MINUTE); + super.initialize(conf, fs); float hadoopCheckpointInterval = conf.getFloat( FS_TRASH_CHECKPOINT_INTERVAL_KEY, FS_TRASH_CHECKPOINT_INTERVAL_DEFAULT); @@ -112,7 +85,6 @@ public void initialize(Configuration conf, FileSystem fs) { + "Changing to default value 0", deletionInterval); this.deletionInterval = 0; } - ozoneConfiguration = OzoneConfiguration.of(this.configuration); } TrashPolicyOzone(FileSystem fs, Configuration conf, OzoneManager om) { @@ -122,142 +94,10 @@ public void initialize(Configuration conf, FileSystem fs) { @Override public Runnable getEmptier() throws IOException { - return new TrashPolicyOzone.Emptier((OzoneConfiguration) configuration, + return new TrashPolicyOzone.Emptier(getOzoneConfiguration(), emptierInterval, om.getThreadNamePrefix()); } - @Override - public boolean moveToTrash(Path path) throws IOException { - if (validatePath(path)) { - if (!isEnabled()) { - return false; - } - - if (!path.isAbsolute()) { // make path absolute - path = new Path(fs.getWorkingDirectory(), path); - } - - // check that path exists - fs.getFileStatus(path); - String qpath = fs.makeQualified(path).toString(); - - Path trashRoot = fs.getTrashRoot(path); - Path trashCurrent = new Path(trashRoot, CURRENT); - if (qpath.startsWith(trashRoot.toString())) { - return false; // already in trash - } - - if (trashRoot.getParent().toString().startsWith(qpath)) { - throw new IOException("Cannot move \"" + path - + "\" to the trash, as it contains the trash"); - } - - Path trashPath; - Path baseTrashPath; - if (fs.getUri().getScheme().equals(OzoneConsts.OZONE_OFS_URI_SCHEME)) { - OFSPath ofsPath = new OFSPath(path, ozoneConfiguration); - // trimming volume and bucket in order to be compatible with o3fs - // Also including volume and bucket name in the path is redundant as - // the key is already in a particular volume and bucket. - Path trimmedVolumeAndBucket = - new Path(OzoneConsts.OZONE_URI_DELIMITER - + ofsPath.getKeyName()); - trashPath = makeTrashRelativePath(trashCurrent, trimmedVolumeAndBucket); - baseTrashPath = makeTrashRelativePath(trashCurrent, - trimmedVolumeAndBucket.getParent()); - } else { - trashPath = makeTrashRelativePath(trashCurrent, path); - baseTrashPath = makeTrashRelativePath(trashCurrent, path.getParent()); - } - - IOException cause = null; - - // try twice, in case checkpoint between the mkdirs() & rename() - for (int i = 0; i < 2; i++) { - try { - if (!fs.mkdirs(baseTrashPath, PERMISSION)) { // create current - LOG.warn("Can't create(mkdir) trash directory: " + baseTrashPath); - return false; - } - } catch (FileAlreadyExistsException e) { - // find the path which is not a directory, and modify baseTrashPath - // & trashPath, then mkdirs - Path existsFilePath = baseTrashPath; - while (!fs.exists(existsFilePath)) { - existsFilePath = existsFilePath.getParent(); - } - baseTrashPath = new Path(baseTrashPath.toString() - .replace(existsFilePath.toString(), - existsFilePath.toString() + Time.now())); - trashPath = new Path(baseTrashPath, trashPath.getName()); - // retry, ignore current failure - --i; - continue; - } catch (IOException e) { - LOG.warn("Can't create trash directory: " + baseTrashPath, e); - cause = e; - break; - } - try { - // if the target path in Trash already exists, then append with - // a current time in millisecs. - String orig = trashPath.toString(); - - while (fs.exists(trashPath)) { - trashPath = new Path(orig + Time.now()); - } - - // move to current trash - boolean renamed = fs.rename(path, trashPath); - if (!renamed) { - LOG.error("Failed to move to trash: {}", path); - throw new IOException("Failed to move to trash: " + path); - } - LOG.info("Moved: '" + path + "' to trash at: " + trashPath); - return true; - } catch (IOException e) { - cause = e; - } - } - throw (IOException) new IOException("Failed to move to trash: " + path) - .initCause(cause); - } - return false; - } - - private boolean validatePath(Path path) throws IOException { - String key = path.toUri().getPath(); - // Check to see if bucket is path item to be deleted. - // Cannot moveToTrash if bucket is deleted, - // return error for this condition - OFSPath ofsPath = new OFSPath(key.substring(1), ozoneConfiguration); - if (path.isRoot() || ofsPath.isBucket()) { - throw new IOException("Recursive rm of bucket " - + path.toString() + " not permitted"); - } - - Path trashRoot = this.fs.getTrashRoot(path); - - LOG.debug("Key path to moveToTrash: {}", key); - String trashRootKey = trashRoot.toUri().getPath(); - LOG.debug("TrashrootKey for moveToTrash: {}", trashRootKey); - - if (!OzoneFSUtils.isValidName(key)) { - throw new InvalidPathException("Invalid path Name " + key); - } - // first condition tests when length key is <= length trash - // and second when length key > length trash - if ((key.contains(this.fs.TRASH_PREFIX)) && (trashRootKey.startsWith(key)) - || key.startsWith(trashRootKey)) { - return false; - } - return true; - } - - private Path makeTrashRelativePath(Path basePath, Path rmFilePath) { - return Path.mergePaths(basePath, rmFilePath); - } - protected class Emptier implements Runnable { private Configuration conf; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/codec/OMDBDefinition.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/codec/OMDBDefinition.java index de567447ae3..be57a7b7451 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/codec/OMDBDefinition.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/codec/OMDBDefinition.java @@ -50,145 +50,115 @@ /** * Class defines the structure and types of the om.db. */ -public class OMDBDefinition extends DBDefinition.WithMap { +public final class OMDBDefinition extends DBDefinition.WithMap { public static final DBColumnFamilyDefinition DELETED_TABLE = new DBColumnFamilyDefinition<>( OmMetadataManagerImpl.DELETED_TABLE, - String.class, StringCodec.get(), - RepeatedOmKeyInfo.class, RepeatedOmKeyInfo.getCodec(true)); public static final DBColumnFamilyDefinition USER_TABLE = new DBColumnFamilyDefinition<>( OmMetadataManagerImpl.USER_TABLE, - String.class, StringCodec.get(), - PersistedUserVolumeInfo.class, Proto2Codec.get(PersistedUserVolumeInfo.getDefaultInstance())); public static final DBColumnFamilyDefinition VOLUME_TABLE = new DBColumnFamilyDefinition<>( OmMetadataManagerImpl.VOLUME_TABLE, - String.class, StringCodec.get(), - OmVolumeArgs.class, OmVolumeArgs.getCodec()); public static final DBColumnFamilyDefinition OPEN_KEY_TABLE = new DBColumnFamilyDefinition<>( OmMetadataManagerImpl.OPEN_KEY_TABLE, - String.class, StringCodec.get(), - OmKeyInfo.class, OmKeyInfo.getCodec(true)); public static final DBColumnFamilyDefinition KEY_TABLE = new DBColumnFamilyDefinition<>( OmMetadataManagerImpl.KEY_TABLE, - String.class, StringCodec.get(), - OmKeyInfo.class, OmKeyInfo.getCodec(true)); public static final DBColumnFamilyDefinition BUCKET_TABLE = new DBColumnFamilyDefinition<>( OmMetadataManagerImpl.BUCKET_TABLE, - String.class, StringCodec.get(), - OmBucketInfo.class, OmBucketInfo.getCodec()); public static final DBColumnFamilyDefinition MULTIPART_INFO_TABLE = new DBColumnFamilyDefinition<>( OmMetadataManagerImpl.MULTIPARTINFO_TABLE, - String.class, StringCodec.get(), - OmMultipartKeyInfo.class, OmMultipartKeyInfo.getCodec()); public static final DBColumnFamilyDefinition PREFIX_TABLE = new DBColumnFamilyDefinition<>( OmMetadataManagerImpl.PREFIX_TABLE, - String.class, StringCodec.get(), - OmPrefixInfo.class, OmPrefixInfo.getCodec()); public static final DBColumnFamilyDefinition DTOKEN_TABLE = new DBColumnFamilyDefinition<>( OmMetadataManagerImpl.DELEGATION_TOKEN_TABLE, - OzoneTokenIdentifier.class, TokenIdentifierCodec.get(), - Long.class, LongCodec.get()); public static final DBColumnFamilyDefinition S3_SECRET_TABLE = new DBColumnFamilyDefinition<>( OmMetadataManagerImpl.S3_SECRET_TABLE, - String.class, StringCodec.get(), - S3SecretValue.class, S3SecretValue.getCodec()); public static final DBColumnFamilyDefinition TRANSACTION_INFO_TABLE = new DBColumnFamilyDefinition<>( OmMetadataManagerImpl.TRANSACTION_INFO_TABLE, - String.class, StringCodec.get(), - TransactionInfo.class, TransactionInfo.getCodec()); public static final DBColumnFamilyDefinition DIRECTORY_TABLE = new DBColumnFamilyDefinition<>( OmMetadataManagerImpl.DIRECTORY_TABLE, - String.class, StringCodec.get(), - OmDirectoryInfo.class, OmDirectoryInfo.getCodec()); public static final DBColumnFamilyDefinition FILE_TABLE = new DBColumnFamilyDefinition<>( OmMetadataManagerImpl.FILE_TABLE, - String.class, StringCodec.get(), - OmKeyInfo.class, OmKeyInfo.getCodec(true)); public static final DBColumnFamilyDefinition OPEN_FILE_TABLE = new DBColumnFamilyDefinition<>( OmMetadataManagerImpl.OPEN_FILE_TABLE, - String.class, StringCodec.get(), - OmKeyInfo.class, OmKeyInfo.getCodec(true)); public static final DBColumnFamilyDefinition DELETED_DIR_TABLE = new DBColumnFamilyDefinition<>(OmMetadataManagerImpl.DELETED_DIR_TABLE, - String.class, StringCodec.get(), OmKeyInfo.class, + StringCodec.get(), OmKeyInfo.getCodec(true)); public static final DBColumnFamilyDefinition META_TABLE = new DBColumnFamilyDefinition<>( OmMetadataManagerImpl.META_TABLE, - String.class, StringCodec.get(), - String.class, StringCodec.get()); // Tables for multi-tenancy @@ -197,27 +167,26 @@ public class OMDBDefinition extends DBDefinition.WithMap { TENANT_ACCESS_ID_TABLE = new DBColumnFamilyDefinition<>( OmMetadataManagerImpl.TENANT_ACCESS_ID_TABLE, - String.class, // accessId + // accessId StringCodec.get(), - OmDBAccessIdInfo.class, // tenantId, secret, principal + // tenantId, secret, principal OmDBAccessIdInfo.getCodec()); public static final DBColumnFamilyDefinition PRINCIPAL_TO_ACCESS_IDS_TABLE = new DBColumnFamilyDefinition<>( OmMetadataManagerImpl.PRINCIPAL_TO_ACCESS_IDS_TABLE, - String.class, // User principal + // User principal StringCodec.get(), - OmDBUserPrincipalInfo.class, // List of accessIds + // List of accessIds OmDBUserPrincipalInfo.getCodec()); public static final DBColumnFamilyDefinition TENANT_STATE_TABLE = new DBColumnFamilyDefinition<>( OmMetadataManagerImpl.TENANT_STATE_TABLE, - String.class, // tenantId (tenant name) + // tenantId (tenant name) StringCodec.get(), - OmDBTenantState.class, OmDBTenantState.getCodec()); // End tables for S3 multi-tenancy @@ -226,18 +195,15 @@ public class OMDBDefinition extends DBDefinition.WithMap { SNAPSHOT_INFO_TABLE = new DBColumnFamilyDefinition<>( OmMetadataManagerImpl.SNAPSHOT_INFO_TABLE, - String.class, // snapshot path + // snapshot path StringCodec.get(), - SnapshotInfo.class, SnapshotInfo.getCodec()); public static final DBColumnFamilyDefinition COMPACTION_LOG_TABLE = new DBColumnFamilyDefinition<>( OmMetadataManagerImpl.COMPACTION_LOG_TABLE, - String.class, StringCodec.get(), - CompactionLogEntry.class, CompactionLogEntry.getCodec()); /** @@ -254,9 +220,9 @@ public class OMDBDefinition extends DBDefinition.WithMap { SNAPSHOT_RENAMED_TABLE = new DBColumnFamilyDefinition<>( OmMetadataManagerImpl.SNAPSHOT_RENAMED_TABLE, - String.class, // /volumeName/bucketName/objectID + // /volumeName/bucketName/objectID StringCodec.get(), - String.class, // path to key in prev snapshot's key(file)/dir Table. + // path to key in prev snapshot's key(file)/dir Table. StringCodec.get()); private static final Map> @@ -284,7 +250,13 @@ public class OMDBDefinition extends DBDefinition.WithMap { USER_TABLE, VOLUME_TABLE); - public OMDBDefinition() { + private static final OMDBDefinition INSTANCE = new OMDBDefinition(); + + public static OMDBDefinition get() { + return INSTANCE; + } + + private OMDBDefinition() { super(COLUMN_FAMILIES); } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/helpers/OMAuditLogger.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/helpers/OMAuditLogger.java index 18ee42756ef..491f2dadbf8 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/helpers/OMAuditLogger.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/helpers/OMAuditLogger.java @@ -91,6 +91,9 @@ private static void init() { CMD_AUDIT_ACTION_MAP.put(Type.Prepare, OMAction.UPGRADE_PREPARE); CMD_AUDIT_ACTION_MAP.put(Type.CancelPrepare, OMAction.UPGRADE_CANCEL); CMD_AUDIT_ACTION_MAP.put(Type.FinalizeUpgrade, OMAction.UPGRADE_FINALIZE); + CMD_AUDIT_ACTION_MAP.put(Type.GetObjectTagging, OMAction.GET_OBJECT_TAGGING); + CMD_AUDIT_ACTION_MAP.put(Type.PutObjectTagging, OMAction.PUT_OBJECT_TAGGING); + CMD_AUDIT_ACTION_MAP.put(Type.DeleteObjectTagging, OMAction.DELETE_OBJECT_TAGGING); } private static OMAction getAction(OzoneManagerProtocolProtos.OMRequest request) { diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/multitenant/RangerClientMultiTenantAccessController.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/multitenant/RangerClientMultiTenantAccessController.java index 4aae4d9a77e..31892199bf8 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/multitenant/RangerClientMultiTenantAccessController.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/multitenant/RangerClientMultiTenantAccessController.java @@ -130,9 +130,15 @@ public RangerClientMultiTenantAccessController(OzoneConfiguration conf) LOG.info("authType = {}, login user = {}", authType, usernameOrPrincipal); - client = new RangerClient(rangerHttpsAddress, - authType, usernameOrPrincipal, passwordOrKeytab, - rangerServiceName, OzoneConsts.OZONE); + UserGroupInformation loginUser = UserGroupInformation.getLoginUser(); + try { + client = new RangerClient(rangerHttpsAddress, + authType, usernameOrPrincipal, passwordOrKeytab, + rangerServiceName, OzoneConsts.OZONE); + } finally { + // set back the expected login user + UserGroupInformation.setLoginUser(loginUser); + } // Whether or not the Ranger credentials are valid is unknown right after // RangerClient initialization here. Because RangerClient does not perform diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerDoubleBuffer.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerDoubleBuffer.java index 753088183b5..42ae90b9181 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerDoubleBuffer.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerDoubleBuffer.java @@ -40,7 +40,6 @@ import org.apache.hadoop.hdds.tracing.TracingUtil; import org.apache.hadoop.hdds.utils.TransactionInfo; import org.apache.hadoop.hdds.utils.db.BatchOperation; -import org.apache.hadoop.hdds.utils.db.DBColumnFamilyDefinition; import org.apache.hadoop.ozone.om.OMMetadataManager; import org.apache.hadoop.ozone.om.S3SecretManager; import org.apache.hadoop.ozone.om.codec.OMDBDefinition; @@ -478,10 +477,7 @@ private void addCleanupEntry(Entry entry, Map> cleanupEpochs) if (cleanupTableInfo != null) { final List cleanupTables; if (cleanupTableInfo.cleanupAll()) { - cleanupTables = new OMDBDefinition().getColumnFamilies() - .stream() - .map(DBColumnFamilyDefinition::getName) - .collect(Collectors.toList()); + cleanupTables = OMDBDefinition.get().getColumnFamilyNames(); } else { cleanupTables = Arrays.asList(cleanupTableInfo.cleanupTables()); } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerRatisServer.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerRatisServer.java index af4d42ad68a..9f187dd0219 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerRatisServer.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerRatisServer.java @@ -71,6 +71,7 @@ import org.apache.ratis.grpc.GrpcTlsConfig; import org.apache.ratis.netty.NettyConfigKeys; import org.apache.ratis.protocol.ClientId; +import org.apache.ratis.protocol.ClientInvocationId; import org.apache.ratis.protocol.SetConfigurationRequest; import org.apache.ratis.protocol.exceptions.LeaderNotReadyException; import org.apache.ratis.protocol.exceptions.LeaderSteppingDownException; @@ -87,6 +88,7 @@ import org.apache.ratis.rpc.SupportedRpcType; import org.apache.ratis.server.RaftServer; import org.apache.ratis.server.RaftServerConfigKeys; +import org.apache.ratis.server.RetryCache; import org.apache.ratis.server.protocol.TermIndex; import org.apache.ratis.server.storage.RaftStorage; import org.apache.ratis.util.LifeCycle; @@ -460,16 +462,11 @@ public void removeRaftPeer(OMNodeDetails omNodeDetails) { * ratis server. */ private RaftClientRequest createRaftRequestImpl(OMRequest omRequest) { - if (!ozoneManager.isTestSecureOmFlag()) { - Preconditions.checkArgument(Server.getClientId() != DUMMY_CLIENT_ID); - Preconditions.checkArgument(Server.getCallId() != INVALID_CALL_ID); - } return RaftClientRequest.newBuilder() - .setClientId( - ClientId.valueOf(UUID.nameUUIDFromBytes(Server.getClientId()))) + .setClientId(getClientId()) .setServerId(server.getId()) .setGroupId(raftGroupId) - .setCallId(Server.getCallId()) + .setCallId(getCallId()) .setMessage( Message.valueOf( OMRatisHelper.convertRequestToByteString(omRequest))) @@ -477,6 +474,39 @@ private RaftClientRequest createRaftRequestImpl(OMRequest omRequest) { .build(); } + private ClientId getClientId() { + final byte[] clientIdBytes = Server.getClientId(); + if (!ozoneManager.isTestSecureOmFlag()) { + Preconditions.checkArgument(clientIdBytes != DUMMY_CLIENT_ID); + } + return ClientId.valueOf(UUID.nameUUIDFromBytes(clientIdBytes)); + } + + private long getCallId() { + final long callId = Server.getCallId(); + if (!ozoneManager.isTestSecureOmFlag()) { + Preconditions.checkArgument(callId != INVALID_CALL_ID); + } + return callId; + } + + public OMResponse checkRetryCache() throws ServiceException { + final ClientInvocationId invocationId = ClientInvocationId.valueOf(getClientId(), getCallId()); + final RetryCache.Entry cacheEntry = getServerDivision().getRetryCache().getIfPresent(invocationId); + if (cacheEntry == null) { + return null; //cache miss + } + //cache hit + try { + return getOMResponse(cacheEntry.getReplyFuture().get()); + } catch (ExecutionException ex) { + throw new ServiceException(ex.getMessage(), ex); + } catch (InterruptedException ex) { + Thread.currentThread().interrupt(); + throw new ServiceException(ex.getMessage(), ex); + } + } + /** * Process the raftClientReply and return OMResponse. * @param omRequest @@ -538,6 +568,10 @@ private OMResponse createOmResponseImpl(OMRequest omRequest, } } + return getOMResponse(reply); + } + + private OMResponse getOMResponse(RaftClientReply reply) throws ServiceException { try { return OMRatisHelper.getOMResponseFromRaftClientReply(reply); } catch (IOException ex) { @@ -547,9 +581,6 @@ private OMResponse createOmResponseImpl(OMRequest omRequest, throw new ServiceException(ex); } } - - // TODO: Still need to handle RaftRetry failure exception and - // NotReplicated exception. } /** diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerStateMachine.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerStateMachine.java index 463afba9421..6a5274ca01f 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerStateMachine.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerStateMachine.java @@ -46,6 +46,7 @@ import org.apache.hadoop.ozone.protocolPB.OzoneManagerRequestHandler; import org.apache.hadoop.ozone.protocolPB.RequestHandler; import org.apache.hadoop.security.UserGroupInformation; +import org.apache.hadoop.util.Time; import org.apache.hadoop.util.concurrent.HadoopExecutors; import org.apache.ratis.proto.RaftProtos; import org.apache.ratis.proto.RaftProtos.StateMachineLogEntryProto; @@ -88,7 +89,6 @@ public class OzoneManagerStateMachine extends BaseStateMachine { new SimpleStateMachineStorage(); private final OzoneManager ozoneManager; private RequestHandler handler; - private RaftGroupId raftGroupId; private volatile OzoneManagerDoubleBuffer ozoneManagerDoubleBuffer; private final ExecutorService executorService; private final ExecutorService installSnapshotExecutor; @@ -134,8 +134,8 @@ public void initialize(RaftServer server, RaftGroupId id, RaftStorage raftStorage) throws IOException { getLifeCycle().startAndTransition(() -> { super.initialize(server, id, raftStorage); - this.raftGroupId = id; storage.init(raftStorage); + LOG.info("{}: initialize {} with {}", getId(), id, getLastAppliedTermIndex()); }); } @@ -143,8 +143,9 @@ public void initialize(RaftServer server, RaftGroupId id, public synchronized void reinitialize() throws IOException { loadSnapshotInfoFromDB(); if (getLifeCycleState() == LifeCycle.State.PAUSED) { - unpause(getLastAppliedTermIndex().getIndex(), - getLastAppliedTermIndex().getTerm()); + final TermIndex lastApplied = getLastAppliedTermIndex(); + unpause(lastApplied.getIndex(), lastApplied.getTerm()); + LOG.info("{}: reinitialize {} with {}", getId(), getGroupId(), lastApplied); } } @@ -160,6 +161,7 @@ public void notifyLeaderChanged(RaftGroupMemberId groupMemberId, RaftPeerId newLeaderId) { // Initialize OMHAMetrics ozoneManager.omHAMetricsInit(newLeaderId.toString()); + LOG.info("{}: leader changed to {}", groupMemberId, newLeaderId); } /** Notified by Ratis for non-StateMachine term-index update. */ @@ -213,8 +215,15 @@ public void notifyConfigurationChanged(long term, long index, RaftProtos.RaftConfigurationProto newRaftConfiguration) { List newPeers = newRaftConfiguration.getPeersList(); - LOG.info("Received Configuration change notification from Ratis. New Peer" + - " list:\n{}", newPeers); + final StringBuilder logBuilder = new StringBuilder(1024) + .append("notifyConfigurationChanged from Ratis: term=").append(term) + .append(", index=").append(index) + .append(", New Peer list: "); + newPeers.forEach(peer -> logBuilder.append(peer.getId().toStringUtf8()) + .append("(") + .append(peer.getAddress()) + .append("), ")); + LOG.info(logBuilder.substring(0, logBuilder.length() - 2)); List newPeerIds = new ArrayList<>(); for (RaftProtos.RaftPeerProto raftPeerProto : newPeers) { @@ -263,7 +272,7 @@ public TransactionContext startTransaction( messageContent); Preconditions.checkArgument(raftClientRequest.getRaftGroupId().equals( - raftGroupId)); + getGroupId())); try { handler.validateRequest(omRequest); } catch (IOException ioe) { @@ -293,6 +302,10 @@ public TransactionContext preAppendTransaction(TransactionContext trx) OzoneManagerPrepareState prepareState = ozoneManager.getPrepareState(); + if (LOG.isDebugEnabled()) { + LOG.debug("{}: preAppendTransaction {}", getId(), TermIndex.valueOf(trx.getLogEntry())); + } + if (cmdType == OzoneManagerProtocolProtos.Type.Prepare) { // Must authenticate prepare requests here, since we must determine // whether or not to apply the prepare gate before proceeding with the @@ -303,8 +316,7 @@ public TransactionContext preAppendTransaction(TransactionContext trx) if (ozoneManager.getAclsEnabled() && !ozoneManager.isAdmin(userGroupInformation)) { String message = "Access denied for user " + userGroupInformation - + ". " - + "Superuser privilege is required to prepare ozone managers."; + + ". Superuser privilege is required to prepare upgrade/downgrade."; OMException cause = new OMException(message, OMException.ResultCodes.ACCESS_DENIED); // Leader should not step down because of this failure. @@ -341,6 +353,7 @@ public CompletableFuture applyTransaction(TransactionContext trx) { : OMRatisHelper.convertByteStringToOMRequest( trx.getStateMachineLogEntry().getLogData()); final TermIndex termIndex = TermIndex.valueOf(trx.getLogEntry()); + LOG.debug("{}: applyTransaction {}", getId(), termIndex); // In the current approach we have one single global thread executor. // with single thread. Right now this is being done for correctness, as // applyTransaction will be run on multiple OM's we want to execute the @@ -427,12 +440,14 @@ public synchronized void pause() { */ public synchronized void unpause(long newLastAppliedSnaphsotIndex, long newLastAppliedSnapShotTermIndex) { - LOG.info("OzoneManagerStateMachine is un-pausing"); if (statePausedCount.decrementAndGet() == 0) { getLifeCycle().startAndTransition(() -> { this.ozoneManagerDoubleBuffer = buildDoubleBufferForRatis(); this.setLastAppliedTermIndex(TermIndex.valueOf( newLastAppliedSnapShotTermIndex, newLastAppliedSnaphsotIndex)); + LOG.info("{}: OzoneManagerStateMachine un-pause completed. " + + "newLastAppliedSnaphsotIndex: {}, newLastAppliedSnapShotTermIndex: {}", + getId(), newLastAppliedSnaphsotIndex, newLastAppliedSnapShotTermIndex); }); } } @@ -482,15 +497,15 @@ private synchronized long takeSnapshotImpl() throws IOException { final TermIndex applied = getLastAppliedTermIndex(); final TermIndex notified = getLastNotifiedTermIndex(); final TermIndex snapshot = applied.compareTo(notified) > 0 ? applied : notified; - LOG.info(" applied = {}", applied); - LOG.info(" skipped = {}", lastSkippedIndex); - LOG.info("notified = {}", notified); - LOG.info("snapshot = {}", snapshot); + long startTime = Time.monotonicNow(); final TransactionInfo transactionInfo = TransactionInfo.valueOf(snapshot); ozoneManager.setTransactionInfo(transactionInfo); ozoneManager.getMetadataManager().getTransactionInfoTable().put(TRANSACTION_INFO_KEY, transactionInfo); ozoneManager.getMetadataManager().getStore().flushDB(); + LOG.info("{}: taking snapshot. applied = {}, skipped = {}, " + + "notified = {}, current snapshot index = {}, took {} ms", + getId(), applied, lastSkippedIndex, notified, snapshot, Time.monotonicNow() - startTime); return snapshot.getIndex(); } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/utils/OzoneManagerRatisUtils.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/utils/OzoneManagerRatisUtils.java index 5a1612e021a..dc634248c28 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/utils/OzoneManagerRatisUtils.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/utils/OzoneManagerRatisUtils.java @@ -110,7 +110,7 @@ import org.slf4j.LoggerFactory; import static org.apache.hadoop.hdds.HddsConfigKeys.OZONE_METADATA_DIRS; -import static org.apache.hadoop.ozone.OzoneConsts.OM_RATIS_SNAPSHOT_DIR; +import static org.apache.hadoop.ozone.OzoneConsts.OZONE_RATIS_SNAPSHOT_DIR; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_RATIS_SNAPSHOT_DIR; import static org.apache.hadoop.ozone.om.OzoneManagerUtils.getBucketLayout; @@ -340,6 +340,16 @@ public static OMClientRequest createClientRequest(OMRequest omRequest, return new S3ExpiredMultipartUploadsAbortRequest(omRequest); case QuotaRepair: return new OMQuotaRepairRequest(omRequest); + case PutObjectTagging: + keyArgs = omRequest.getPutObjectTaggingRequest().getKeyArgs(); + volumeName = keyArgs.getVolumeName(); + bucketName = keyArgs.getBucketName(); + break; + case DeleteObjectTagging: + keyArgs = omRequest.getDeleteObjectTaggingRequest().getKeyArgs(); + volumeName = keyArgs.getVolumeName(); + bucketName = keyArgs.getBucketName(); + break; default: throw new OMException("Unrecognized write command type request " + cmdType, OMException.ResultCodes.INVALID_REQUEST); @@ -439,8 +449,7 @@ public static Status exceptionToResponseStatus(Exception exception) { */ public static TransactionInfo getTrxnInfoFromCheckpoint( OzoneConfiguration conf, Path dbPath) throws Exception { - return HAUtils - .getTrxnInfoFromCheckpoint(conf, dbPath, new OMDBDefinition()); + return HAUtils.getTrxnInfoFromCheckpoint(conf, dbPath, OMDBDefinition.get()); } /** @@ -485,7 +494,7 @@ public static String getOMRatisSnapshotDirectory(ConfigurationSource conf) { OZONE_OM_RATIS_SNAPSHOT_DIR, OZONE_METADATA_DIRS); File metaDirPath = ServerUtils.getOzoneMetaDirPath(conf); snapshotDir = Paths.get(metaDirPath.getPath(), - OM_RATIS_SNAPSHOT_DIR).toString(); + OZONE_RATIS_SNAPSHOT_DIR).toString(); } return snapshotDir; } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/BucketLayoutAwareOMKeyRequestFactory.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/BucketLayoutAwareOMKeyRequestFactory.java index 4a5558ed7f1..5d542bfb912 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/BucketLayoutAwareOMKeyRequestFactory.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/BucketLayoutAwareOMKeyRequestFactory.java @@ -49,6 +49,10 @@ import org.apache.hadoop.ozone.om.request.s3.multipart.S3MultipartUploadCommitPartRequestWithFSO; import org.apache.hadoop.ozone.om.request.s3.multipart.S3MultipartUploadAbortRequest; import org.apache.hadoop.ozone.om.request.s3.multipart.S3MultipartUploadAbortRequestWithFSO; +import org.apache.hadoop.ozone.om.request.s3.tagging.S3DeleteObjectTaggingRequest; +import org.apache.hadoop.ozone.om.request.s3.tagging.S3DeleteObjectTaggingRequestWithFSO; +import org.apache.hadoop.ozone.om.request.s3.tagging.S3PutObjectTaggingRequest; +import org.apache.hadoop.ozone.om.request.s3.tagging.S3PutObjectTaggingRequestWithFSO; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Type; import jakarta.annotation.Nonnull; @@ -191,6 +195,23 @@ public final class BucketLayoutAwareOMKeyRequestFactory { addRequestClass(Type.SetTimes, OMKeySetTimesRequestWithFSO.class, BucketLayout.FILE_SYSTEM_OPTIMIZED); + + // PutObjectTagging + addRequestClass(Type.PutObjectTagging, + S3PutObjectTaggingRequest.class, + BucketLayout.OBJECT_STORE); + addRequestClass(Type.PutObjectTagging, + S3PutObjectTaggingRequestWithFSO.class, + BucketLayout.FILE_SYSTEM_OPTIMIZED); + + // DeleteObjectTagging + addRequestClass(Type.DeleteObjectTagging, + S3DeleteObjectTaggingRequest.class, + BucketLayout.OBJECT_STORE); + addRequestClass(Type.DeleteObjectTagging, + S3DeleteObjectTaggingRequestWithFSO.class, + BucketLayout.FILE_SYSTEM_OPTIMIZED); + } private BucketLayoutAwareOMKeyRequestFactory() { diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/OMClientRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/OMClientRequest.java index 17f9663ae1f..c9c664b303f 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/OMClientRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/OMClientRequest.java @@ -381,7 +381,6 @@ vol, bucket, key, volOwner, bucketOwner, createUGIForApi(), */ @VisibleForTesting public UserGroupInformation createUGI() throws AuthenticationException { - if (userGroupInformation != null) { return userGroupInformation; } @@ -413,6 +412,11 @@ public UserGroupInformation createUGIForApi() throws OMException { return ugi; } + @VisibleForTesting + public void setUGI(UserGroupInformation ugi) { + this.userGroupInformation = ugi; + } + /** * Return InetAddress created from OMRequest userInfo. If userInfo is not * set, returns null. diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/OMClientRequestUtils.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/OMClientRequestUtils.java index 26487935a65..1b318354eeb 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/OMClientRequestUtils.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/OMClientRequestUtils.java @@ -99,4 +99,17 @@ private static boolean checkInSnapshotCache( } return false; } + + public static boolean shouldLogClientRequestFailure(IOException exception) { + if (!(exception instanceof OMException)) { + return true; + } + OMException omException = (OMException) exception; + switch (omException.getResult()) { + case KEY_NOT_FOUND: + return false; + default: + return true; + } + } } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketCreateRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketCreateRequest.java index 72c5cf57d99..3c21a2a851b 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketCreateRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketCreateRequest.java @@ -66,10 +66,12 @@ import java.nio.file.InvalidPathException; import java.util.ArrayList; import java.util.List; +import java.util.stream.Collectors; import static org.apache.hadoop.ozone.OzoneAcl.AclScope.ACCESS; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.BUCKET_ALREADY_EXISTS; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.VOLUME_NOT_FOUND; +import static org.apache.hadoop.ozone.om.helpers.OzoneAclUtil.getDefaultAclList; import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.Resource.BUCKET_LOCK; import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.Resource.VOLUME_LOCK; @@ -246,8 +248,7 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIn omBucketInfo.setUpdateID(transactionLogIndex, ozoneManager.isRatisEnabled()); - // Add default acls from volume. - addDefaultAcls(omBucketInfo, omVolumeArgs); + addDefaultAcls(omBucketInfo, omVolumeArgs, ozoneManager); // check namespace quota checkQuotaInNamespace(omVolumeArgs, 1L); @@ -322,16 +323,20 @@ private boolean isECBucket(BucketInfo bucketInfo) { * @param omVolumeArgs */ private void addDefaultAcls(OmBucketInfo omBucketInfo, - OmVolumeArgs omVolumeArgs) { - // Add default acls for bucket creator. + OmVolumeArgs omVolumeArgs, OzoneManager ozoneManager) throws OMException { List acls = new ArrayList<>(); + // Add default acls + acls.addAll(getDefaultAclList(createUGIForApi(), ozoneManager.getConfiguration())); if (omBucketInfo.getAcls() != null) { + // Add acls for bucket creator. acls.addAll(omBucketInfo.getAcls()); } // Add default acls from volume. List defaultVolumeAcls = omVolumeArgs.getDefaultAcls(); OzoneAclUtil.inheritDefaultAcls(acls, defaultVolumeAcls, ACCESS); + // Remove the duplicates + acls = acls.stream().distinct().collect(Collectors.toList()); omBucketInfo.setAcls(acls); } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMDirectoryCreateRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMDirectoryCreateRequest.java index 802cfa54e60..732886fa0e6 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMDirectoryCreateRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMDirectoryCreateRequest.java @@ -22,19 +22,11 @@ import java.nio.file.InvalidPathException; import java.nio.file.Path; import java.nio.file.Paths; -import java.util.ArrayList; -import java.util.Collections; import java.util.List; import java.util.Map; import com.google.common.base.Preconditions; -import org.apache.hadoop.hdds.client.ECReplicationConfig; -import org.apache.hadoop.hdds.client.ReplicationConfig; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.ratis.server.protocol.TermIndex; -import org.apache.hadoop.ozone.OmUtils; -import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup; -import org.apache.hadoop.ozone.om.helpers.OzoneFSUtils; import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; @@ -74,7 +66,6 @@ import org.apache.hadoop.util.Time; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.FILE_ALREADY_EXISTS; -import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.INVALID_KEY_NAME; import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.Resource.BUCKET_LOCK; import static org.apache.hadoop.ozone.om.request.file.OMFileRequest.OMDirectoryResult.DIRECTORY_EXISTS_IN_GIVENPATH; import static org.apache.hadoop.ozone.om.request.file.OMFileRequest.OMDirectoryResult.FILE_EXISTS_IN_GIVENPATH; @@ -89,11 +80,6 @@ public class OMDirectoryCreateRequest extends OMKeyRequest { private static final Logger LOG = LoggerFactory.getLogger(OMDirectoryCreateRequest.class); - // The maximum number of directories which can be created through a single - // transaction (recursive directory creations) is 2^8 - 1 as only 8 - // bits are set aside for this in ObjectID. - private static final long MAX_NUM_OF_RECURSIVE_DIRS = 255; - /** * Stores the result of request execution in * OMClientRequest#validateAndUpdateCache. @@ -117,8 +103,10 @@ public OMRequest preExecute(OzoneManager ozoneManager) throws IOException { super.preExecute(ozoneManager).getCreateDirectoryRequest(); Preconditions.checkNotNull(createDirectoryRequest); - OmUtils.verifyKeyNameWithSnapshotReservedWord( - createDirectoryRequest.getKeyArgs().getKeyName()); + KeyArgs keyArgs = createDirectoryRequest.getKeyArgs(); + ValidateKeyArgs validateArgs = new ValidateKeyArgs.Builder() + .setSnapshotReservedWord(keyArgs.getKeyName()).build(); + validateKey(ozoneManager, validateArgs); KeyArgs.Builder newKeyArgs = createDirectoryRequest.getKeyArgs() .toBuilder().setModificationTime(Time.now()); @@ -202,7 +190,7 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIn dirKeyInfo = createDirectoryKeyInfoWithACL(keyName, keyArgs, baseObjId, omBucketInfo, omPathInfo, trxnLogIndex, - ozoneManager.getDefaultReplicationConfig()); + ozoneManager.getDefaultReplicationConfig(), ozoneManager.getConfiguration()); missingParentInfos = getAllParentInfo(ozoneManager, keyArgs, missingParents, omBucketInfo, omPathInfo, trxnLogIndex); @@ -249,58 +237,6 @@ dirKeyInfo, missingParentInfos, result, getBucketLayout(), return omClientResponse; } - /** - * Construct OmKeyInfo for every parent directory in missing list. - * @param ozoneManager - * @param keyArgs - * @param missingParents list of parent directories to be created - * @param bucketInfo - * @param omPathInfo - * @param trxnLogIndex - * @return {@code List} - * @throws IOException - */ - public static List getAllParentInfo(OzoneManager ozoneManager, - KeyArgs keyArgs, List missingParents, OmBucketInfo bucketInfo, - OMFileRequest.OMPathInfo omPathInfo, long trxnLogIndex) - throws IOException { - List missingParentInfos = new ArrayList<>(); - - // The base id is left shifted by 8 bits for creating space to - // create (2^8 - 1) object ids in every request. - // maxObjId represents the largest object id allocation possible inside - // the transaction. - long baseObjId = ozoneManager.getObjectIdFromTxId(trxnLogIndex); - long maxObjId = baseObjId + MAX_NUM_OF_RECURSIVE_DIRS; - long objectCount = 1; // baseObjID is used by the leaf directory - - String volumeName = keyArgs.getVolumeName(); - String bucketName = keyArgs.getBucketName(); - String keyName = keyArgs.getKeyName(); - - for (String missingKey : missingParents) { - long nextObjId = baseObjId + objectCount; - if (nextObjId > maxObjId) { - throw new OMException("Too many directories in path. Exceeds limit of " - + MAX_NUM_OF_RECURSIVE_DIRS + ". Unable to create directory: " - + keyName + " in volume/bucket: " + volumeName + "/" + bucketName, - INVALID_KEY_NAME); - } - - LOG.debug("missing parent {} getting added to KeyTable", missingKey); - - OmKeyInfo parentKeyInfo = - createDirectoryKeyInfoWithACL(missingKey, keyArgs, nextObjId, - bucketInfo, omPathInfo, trxnLogIndex, - ozoneManager.getDefaultReplicationConfig()); - objectCount++; - - missingParentInfos.add(parentKeyInfo); - } - - return missingParentInfos; - } - private void logResult(CreateDirectoryRequest createDirectoryRequest, KeyArgs keyArgs, OMMetrics omMetrics, Result result, Exception exception, int numMissingParents) { @@ -335,69 +271,6 @@ private void logResult(CreateDirectoryRequest createDirectoryRequest, } } - /** - * fill in a KeyInfo for a new directory entry in OM database. - * without initializing ACLs from the KeyArgs - used for intermediate - * directories which get created internally/recursively during file - * and directory create. - * @param keyName - * @param keyArgs - * @param objectId - * @param bucketInfo - * @param omPathInfo - * @param transactionIndex - * @param serverDefaultReplConfig - * @return the OmKeyInfo structure - */ - public static OmKeyInfo createDirectoryKeyInfoWithACL(String keyName, - KeyArgs keyArgs, long objectId, OmBucketInfo bucketInfo, - OMFileRequest.OMPathInfo omPathInfo, long transactionIndex, - ReplicationConfig serverDefaultReplConfig) { - return dirKeyInfoBuilderNoACL(keyName, keyArgs, objectId, - serverDefaultReplConfig) - .setAcls(getAclsForDir(keyArgs, bucketInfo, omPathInfo)) - .setUpdateID(transactionIndex).build(); - } - - private static OmKeyInfo.Builder dirKeyInfoBuilderNoACL(String keyName, - KeyArgs keyArgs, long objectId, - ReplicationConfig serverDefaultReplConfig) { - String dirName = OzoneFSUtils.addTrailingSlashIfNeeded(keyName); - - OmKeyInfo.Builder keyInfoBuilder = - new OmKeyInfo.Builder() - .setVolumeName(keyArgs.getVolumeName()) - .setBucketName(keyArgs.getBucketName()) - .setKeyName(dirName) - .setOwnerName(keyArgs.getOwnerName()) - .setOmKeyLocationInfos(Collections.singletonList( - new OmKeyLocationInfoGroup(0, new ArrayList<>()))) - .setCreationTime(keyArgs.getModificationTime()) - .setModificationTime(keyArgs.getModificationTime()) - .setDataSize(0); - if (keyArgs.getFactor() != null && keyArgs - .getFactor() != HddsProtos.ReplicationFactor.ZERO && keyArgs - .getType() != HddsProtos.ReplicationType.EC) { - // Factor available and not an EC replication config. - keyInfoBuilder.setReplicationConfig(ReplicationConfig - .fromProtoTypeAndFactor(keyArgs.getType(), keyArgs.getFactor())); - } else if (keyArgs.getType() == HddsProtos.ReplicationType.EC) { - // Found EC type - keyInfoBuilder.setReplicationConfig( - new ECReplicationConfig(keyArgs.getEcReplicationConfig())); - } else { - // default type - keyInfoBuilder.setReplicationConfig(serverDefaultReplConfig); - } - - keyInfoBuilder.setObjectID(objectId); - return keyInfoBuilder; - } - - static long getMaxNumOfRecursiveDirs() { - return MAX_NUM_OF_RECURSIVE_DIRS; - } - @RequestFeatureValidator( conditions = ValidationCondition.CLUSTER_NEEDS_FINALIZATION, processingPhase = RequestProcessingPhase.PRE_PROCESS, diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMDirectoryCreateRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMDirectoryCreateRequestWithFSO.java index 46a2ac5f7cc..8bef8e17928 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMDirectoryCreateRequestWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMDirectoryCreateRequestWithFSO.java @@ -51,12 +51,10 @@ import java.nio.file.InvalidPathException; import java.nio.file.Path; import java.nio.file.Paths; -import java.util.ArrayList; import java.util.List; import java.util.Map; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.FILE_ALREADY_EXISTS; -import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.INVALID_KEY_NAME; import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.Resource.BUCKET_LOCK; import static org.apache.hadoop.ozone.om.request.file.OMFileRequest.OMDirectoryResult.DIRECTORY_EXISTS_IN_GIVENPATH; import static org.apache.hadoop.ozone.om.request.file.OMFileRequest.OMDirectoryResult.FILE_EXISTS; @@ -145,8 +143,7 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIn OmBucketInfo omBucketInfo = getBucketInfo(omMetadataManager, volumeName, bucketName); // prepare all missing parents - missingParentInfos = - OMDirectoryCreateRequestWithFSO.getAllMissingParentDirInfo( + missingParentInfos = getAllMissingParentDirInfo( ozoneManager, keyArgs, omBucketInfo, omPathInfo, trxnLogIndex); final long volumeId = omMetadataManager.getVolumeId(volumeName); @@ -163,7 +160,7 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIn omPathInfo.getLeafNodeName(), keyArgs, omPathInfo.getLeafNodeObjectId(), omPathInfo.getLastKnownParentId(), trxnLogIndex, - omBucketInfo, omPathInfo); + omBucketInfo, omPathInfo, ozoneManager.getConfiguration()); OMFileRequest.addDirectoryTableCacheEntries(omMetadataManager, volumeId, bucketId, trxnLogIndex, missingParentInfos, dirInfo); @@ -235,86 +232,4 @@ private void logResult(CreateDirectoryRequest createDirectoryRequest, createDirectoryRequest); } } - - /** - * Construct OmDirectoryInfo for every parent directory in missing list. - * - * @param keyArgs key arguments - * @param pathInfo list of parent directories to be created and its ACLs - * @param trxnLogIndex transaction log index id - * @return list of missing parent directories - * @throws IOException DB failure - */ - public static List getAllMissingParentDirInfo( - OzoneManager ozoneManager, KeyArgs keyArgs, OmBucketInfo bucketInfo, - OMFileRequest.OMPathInfoWithFSO pathInfo, long trxnLogIndex) - throws IOException { - List missingParentInfos = new ArrayList<>(); - - // The base id is left shifted by 8 bits for creating space to - // create (2^8 - 1) object ids in every request. - // maxObjId represents the largest object id allocation possible inside - // the transaction. - long baseObjId = ozoneManager.getObjectIdFromTxId(trxnLogIndex); - long maxObjId = baseObjId + getMaxNumOfRecursiveDirs(); - long objectCount = 1; - - String volumeName = keyArgs.getVolumeName(); - String bucketName = keyArgs.getBucketName(); - String keyName = keyArgs.getKeyName(); - - long lastKnownParentId = pathInfo.getLastKnownParentId(); - List missingParents = pathInfo.getMissingParents(); - for (String missingKey : missingParents) { - long nextObjId = baseObjId + objectCount; - if (nextObjId > maxObjId) { - throw new OMException("Too many directories in path. Exceeds limit of " - + getMaxNumOfRecursiveDirs() + ". Unable to create directory: " - + keyName + " in volume/bucket: " + volumeName + "/" + bucketName, - INVALID_KEY_NAME); - } - - LOG.debug("missing parent {} getting added to DirectoryTable", - missingKey); - OmDirectoryInfo dirInfo = createDirectoryInfoWithACL(missingKey, - keyArgs, nextObjId, lastKnownParentId, trxnLogIndex, - bucketInfo, pathInfo); - objectCount++; - - missingParentInfos.add(dirInfo); - - // updating id for the next sub-dir - lastKnownParentId = nextObjId; - } - pathInfo.setLastKnownParentId(lastKnownParentId); - pathInfo.setLeafNodeObjectId(baseObjId + objectCount); - return missingParentInfos; - } - - /** - * Fill in a DirectoryInfo for a new directory entry in OM database. - * @param dirName - * @param keyArgs - * @param objectId - * @param parentObjectId - * @param bucketInfo - * @param omPathInfo - * @return the OmDirectoryInfo structure - */ - private static OmDirectoryInfo createDirectoryInfoWithACL( - String dirName, KeyArgs keyArgs, long objectId, - long parentObjectId, long transactionIndex, - OmBucketInfo bucketInfo, OMFileRequest.OMPathInfo omPathInfo) { - - return OmDirectoryInfo.newBuilder() - .setName(dirName) - .setOwner(keyArgs.getOwnerName()) - .setCreationTime(keyArgs.getModificationTime()) - .setModificationTime(keyArgs.getModificationTime()) - .setObjectID(objectId) - .setUpdateID(transactionIndex) - .setParentObjectID(parentObjectId) - .setAcls(getAclsForDir(keyArgs, bucketInfo, omPathInfo)) - .build(); - } } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileCreateRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileCreateRequest.java index c13af319c5c..08b25718288 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileCreateRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileCreateRequest.java @@ -30,9 +30,7 @@ import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.hdds.client.ReplicationConfig; import org.apache.ratis.server.protocol.TermIndex; -import org.apache.hadoop.ozone.OmUtils; import org.apache.hadoop.ozone.OzoneConsts; -import org.apache.hadoop.ozone.om.OMConfigKeys; import org.apache.hadoop.ozone.om.OzoneConfigUtil; import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.ozone.om.request.util.OmResponseUtil; @@ -93,16 +91,11 @@ public OMRequest preExecute(OzoneManager ozoneManager) throws IOException { Preconditions.checkNotNull(createFileRequest); KeyArgs keyArgs = createFileRequest.getKeyArgs(); - - // Verify key name - OmUtils.verifyKeyNameWithSnapshotReservedWord(keyArgs.getKeyName()); - final boolean checkKeyNameEnabled = ozoneManager.getConfiguration() - .getBoolean(OMConfigKeys.OZONE_OM_KEYNAME_CHARACTER_CHECK_ENABLED_KEY, - OMConfigKeys.OZONE_OM_KEYNAME_CHARACTER_CHECK_ENABLED_DEFAULT); - if (checkKeyNameEnabled) { - OmUtils.validateKeyName(StringUtils.removeEnd(keyArgs.getKeyName(), - OzoneConsts.FS_FILE_COPYING_TEMP_SUFFIX)); - } + ValidateKeyArgs validateArgs = new ValidateKeyArgs.Builder() + .setSnapshotReservedWord(keyArgs.getKeyName()) + .setKeyName(StringUtils.removeEnd(keyArgs.getKeyName(), + OzoneConsts.FS_FILE_COPYING_TEMP_SUFFIX)).build(); + validateKey(ozoneManager, validateArgs); UserInfo userInfo = getUserInfo(); if (keyArgs.getKeyName().length() == 0) { @@ -254,7 +247,7 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIn keyArgs.getDataSize(), locations, getFileEncryptionInfo(keyArgs), ozoneManager.getPrefixManager(), omBucketInfo, pathInfo, trxnLogIndex, ozoneManager.getObjectIdFromTxId(trxnLogIndex), - ozoneManager.isRatisEnabled(), repConfig); + ozoneManager.isRatisEnabled(), repConfig, ozoneManager.getConfiguration()); validateEncryptionKeyInfo(omBucketInfo, keyArgs); long openVersion = omKeyInfo.getLatestVersionLocations().getVersion(); @@ -262,8 +255,7 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIn String dbOpenKeyName = omMetadataManager.getOpenKey(volumeName, bucketName, keyName, clientID); - missingParentInfos = OMDirectoryCreateRequest - .getAllParentInfo(ozoneManager, keyArgs, + missingParentInfos = getAllParentInfo(ozoneManager, keyArgs, pathInfo.getMissingParents(), omBucketInfo, pathInfo, trxnLogIndex); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileCreateRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileCreateRequestWithFSO.java index 291b0a8d537..c4967d5af1f 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileCreateRequestWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileCreateRequestWithFSO.java @@ -153,9 +153,8 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIn omMetadataManager.getBucketKey(volumeName, bucketName)); // add all missing parents to dir table - missingParentInfos = - OMDirectoryCreateRequestWithFSO.getAllMissingParentDirInfo( - ozoneManager, keyArgs, bucketInfo, pathInfoFSO, trxnLogIndex); + missingParentInfos = getAllMissingParentDirInfo( + ozoneManager, keyArgs, bucketInfo, pathInfoFSO, trxnLogIndex); // total number of keys created. numKeysCreated = missingParentInfos.size(); @@ -171,7 +170,7 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIn getFileEncryptionInfo(keyArgs), ozoneManager.getPrefixManager(), bucketInfo, pathInfoFSO, trxnLogIndex, pathInfoFSO.getLeafNodeObjectId(), - ozoneManager.isRatisEnabled(), repConfig); + ozoneManager.isRatisEnabled(), repConfig, ozoneManager.getConfiguration()); validateEncryptionKeyInfo(bucketInfo, keyArgs); long openVersion = omFileInfo.getLatestVersionLocations().getVersion(); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCommitRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCommitRequest.java index 378e0cb12ce..87d126de98a 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCommitRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCommitRequest.java @@ -30,9 +30,7 @@ import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.ozone.OzoneManagerVersion; import org.apache.ratis.server.protocol.TermIndex; -import org.apache.hadoop.ozone.OmUtils; import org.apache.hadoop.ozone.OzoneConsts; -import org.apache.hadoop.ozone.om.OMConfigKeys; import org.apache.hadoop.ozone.om.helpers.KeyValueUtil; import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; import org.apache.hadoop.ozone.om.helpers.BucketLayout; @@ -100,14 +98,11 @@ public OMRequest preExecute(OzoneManager ozoneManager) throws IOException { ozoneManager.checkFeatureEnabled(OzoneManagerVersion.ATOMIC_REWRITE_KEY); } - // Verify key name - final boolean checkKeyNameEnabled = ozoneManager.getConfiguration() - .getBoolean(OMConfigKeys.OZONE_OM_KEYNAME_CHARACTER_CHECK_ENABLED_KEY, - OMConfigKeys.OZONE_OM_KEYNAME_CHARACTER_CHECK_ENABLED_DEFAULT); - if (checkKeyNameEnabled) { - OmUtils.validateKeyName(StringUtils.removeEnd(keyArgs.getKeyName(), - OzoneConsts.FS_FILE_COPYING_TEMP_SUFFIX)); - } + ValidateKeyArgs validateArgs = new ValidateKeyArgs.Builder() + .setKeyName(StringUtils.removeEnd(keyArgs.getKeyName(), + OzoneConsts.FS_FILE_COPYING_TEMP_SUFFIX)).build(); + validateKey(ozoneManager, validateArgs); + boolean isHsync = commitKeyRequest.hasHsync() && commitKeyRequest.getHsync(); boolean isRecovery = commitKeyRequest.hasRecovery() && commitKeyRequest.getRecovery(); boolean enableHsync = OzoneFSUtils.canEnableHsync(ozoneManager.getConfiguration(), false); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCreateRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCreateRequest.java index dee5bb0fe0e..e817901c22e 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCreateRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCreateRequest.java @@ -30,13 +30,10 @@ import org.apache.hadoop.hdds.client.ReplicationConfig; import org.apache.hadoop.ozone.OzoneManagerVersion; import org.apache.ratis.server.protocol.TermIndex; -import org.apache.hadoop.ozone.OmUtils; -import org.apache.hadoop.ozone.om.OMConfigKeys; import org.apache.hadoop.ozone.om.OzoneConfigUtil; import org.apache.hadoop.ozone.om.exceptions.OMException; import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.ozone.om.lock.OzoneLockStrategy; -import org.apache.hadoop.ozone.om.request.file.OMDirectoryCreateRequest; import org.apache.hadoop.ozone.om.request.file.OMFileRequest; import org.apache.hadoop.ozone.om.request.util.OmResponseUtil; import org.apache.hadoop.ozone.om.request.validation.RequestFeatureValidator; @@ -98,14 +95,10 @@ public OMRequest preExecute(OzoneManager ozoneManager) throws IOException { ozoneManager.checkFeatureEnabled(OzoneManagerVersion.ATOMIC_REWRITE_KEY); } - // Verify key name - OmUtils.verifyKeyNameWithSnapshotReservedWord(keyArgs.getKeyName()); - final boolean checkKeyNameEnabled = ozoneManager.getConfiguration() - .getBoolean(OMConfigKeys.OZONE_OM_KEYNAME_CHARACTER_CHECK_ENABLED_KEY, - OMConfigKeys.OZONE_OM_KEYNAME_CHARACTER_CHECK_ENABLED_DEFAULT); - if (checkKeyNameEnabled) { - OmUtils.validateKeyName(keyArgs.getKeyName()); - } + ValidateKeyArgs validateArgs = new ValidateKeyArgs.Builder() + .setSnapshotReservedWord(keyArgs.getKeyName()) + .setKeyName(keyArgs.getKeyName()).build(); + validateKey(ozoneManager, validateArgs); String keyPath = keyArgs.getKeyName(); keyPath = validateAndNormalizeKey(ozoneManager.getEnableFileSystemPaths(), @@ -267,9 +260,8 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIn " as there is already file in the given path", NOT_A_FILE); } - missingParentInfos = OMDirectoryCreateRequest - .getAllParentInfo(ozoneManager, keyArgs, - pathInfo.getMissingParents(), bucketInfo, + missingParentInfos = getAllParentInfo(ozoneManager, keyArgs, + pathInfo.getMissingParents(), bucketInfo, pathInfo, trxnLogIndex); numMissingParents = missingParentInfos.size(); @@ -285,7 +277,7 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIn keyArgs.getDataSize(), locations, getFileEncryptionInfo(keyArgs), ozoneManager.getPrefixManager(), bucketInfo, pathInfo, trxnLogIndex, ozoneManager.getObjectIdFromTxId(trxnLogIndex), - ozoneManager.isRatisEnabled(), replicationConfig); + ozoneManager.isRatisEnabled(), replicationConfig, ozoneManager.getConfiguration()); validateEncryptionKeyInfo(bucketInfo, keyArgs); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCreateRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCreateRequestWithFSO.java index f40adb7495f..87cc151351e 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCreateRequestWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCreateRequestWithFSO.java @@ -31,7 +31,6 @@ import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo; -import org.apache.hadoop.ozone.om.request.file.OMDirectoryCreateRequestWithFSO; import org.apache.hadoop.ozone.om.request.file.OMFileRequest; import org.apache.hadoop.ozone.om.request.util.OmResponseUtil; import org.apache.hadoop.ozone.om.response.OMClientResponse; @@ -138,9 +137,8 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIn omMetadataManager.getBucketKey(volumeName, bucketName)); // add all missing parents to dir table - missingParentInfos = - OMDirectoryCreateRequestWithFSO.getAllMissingParentDirInfo( - ozoneManager, keyArgs, bucketInfo, pathInfoFSO, trxnLogIndex); + missingParentInfos = getAllMissingParentDirInfo( + ozoneManager, keyArgs, bucketInfo, pathInfoFSO, trxnLogIndex); // total number of keys created. numKeysCreated = missingParentInfos.size(); @@ -156,7 +154,7 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIn getFileEncryptionInfo(keyArgs), ozoneManager.getPrefixManager(), bucketInfo, pathInfoFSO, trxnLogIndex, pathInfoFSO.getLeafNodeObjectId(), - ozoneManager.isRatisEnabled(), repConfig); + ozoneManager.isRatisEnabled(), repConfig, ozoneManager.getConfiguration()); validateEncryptionKeyInfo(bucketInfo, keyArgs); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRenameRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRenameRequest.java index 804e536d21f..35940f5a770 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRenameRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRenameRequest.java @@ -24,9 +24,7 @@ import com.google.common.base.Preconditions; import org.apache.ratis.server.protocol.TermIndex; -import org.apache.hadoop.ozone.OmUtils; import org.apache.hadoop.ozone.OzoneConsts; -import org.apache.hadoop.ozone.om.OMConfigKeys; import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.ozone.om.request.util.OmResponseUtil; import org.apache.hadoop.ozone.om.request.validation.RequestFeatureValidator; @@ -83,15 +81,10 @@ public OMRequest preExecute(OzoneManager ozoneManager) throws IOException { .getRenameKeyRequest(); Preconditions.checkNotNull(renameKeyRequest); - // Verify key name - final boolean checkKeyNameEnabled = ozoneManager.getConfiguration() - .getBoolean(OMConfigKeys.OZONE_OM_KEYNAME_CHARACTER_CHECK_ENABLED_KEY, - OMConfigKeys.OZONE_OM_KEYNAME_CHARACTER_CHECK_ENABLED_DEFAULT); - if (checkKeyNameEnabled) { - OmUtils.validateKeyName(renameKeyRequest.getToKeyName()); - } - KeyArgs renameKeyArgs = renameKeyRequest.getKeyArgs(); + ValidateKeyArgs validateArgs = new ValidateKeyArgs.Builder() + .setKeyName(renameKeyRequest.getToKeyName()).build(); + validateKey(ozoneManager, validateArgs); String srcKey = extractSrcKey(renameKeyArgs); String dstKey = extractDstKey(renameKeyRequest); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRequest.java index 88c5ad91405..6a467f3acf5 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRequest.java @@ -39,6 +39,8 @@ import org.apache.hadoop.hdds.client.ContainerBlockID; import org.apache.hadoop.hdds.client.ECReplicationConfig; import org.apache.hadoop.hdds.client.ReplicationConfig; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.utils.db.cache.CacheKey; import org.apache.hadoop.hdds.utils.db.cache.CacheValue; import org.apache.hadoop.ozone.OmUtils; @@ -47,16 +49,19 @@ import org.apache.hadoop.ozone.om.OMMetrics; import org.apache.hadoop.ozone.om.PrefixManager; import org.apache.hadoop.ozone.om.ResolvedBucket; +import org.apache.hadoop.ozone.om.OMConfigKeys; import org.apache.hadoop.ozone.om.helpers.BucketEncryptionKeyInfo; import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.ozone.om.helpers.KeyValueUtil; import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; +import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup; import org.apache.hadoop.ozone.om.helpers.OmPrefixInfo; import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs; import org.apache.hadoop.ozone.om.helpers.OzoneAclUtil; +import org.apache.hadoop.ozone.om.helpers.OzoneFSUtils; import org.apache.hadoop.ozone.om.helpers.QuotaUtil; import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo; import org.apache.hadoop.ozone.om.lock.OzoneLockStrategy; @@ -98,9 +103,11 @@ import static org.apache.hadoop.ozone.OzoneConsts.OZONE_URI_DELIMITER; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes .BUCKET_NOT_FOUND; +import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.INVALID_KEY_NAME; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.INVALID_REQUEST; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes .VOLUME_NOT_FOUND; +import static org.apache.hadoop.ozone.om.helpers.OzoneAclUtil.getDefaultAclList; import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.Resource.BUCKET_LOCK; import static org.apache.hadoop.util.Time.monotonicNow; @@ -109,6 +116,11 @@ */ public abstract class OMKeyRequest extends OMClientRequest { + // The maximum number of directories which can be created through a single + // transaction (recursive directory creations) is 2^8 - 1 as only 8 + // bits are set aside for this in ObjectID. + private static final long MAX_NUM_OF_RECURSIVE_DIRS = 255; + @VisibleForTesting public static final Logger LOG = LoggerFactory.getLogger(OMKeyRequest.class); @@ -176,6 +188,80 @@ protected KeyArgs resolveBucketAndCheckOpenKeyAcls(KeyArgs keyArgs, return resolvedArgs; } + /** + * Define the parameters carried when verifying the Key. + */ + public static class ValidateKeyArgs { + private String snapshotReservedWord; + private String keyName; + private boolean validateSnapshotReserved; + private boolean validateKeyName; + + ValidateKeyArgs(String snapshotReservedWord, String keyName, + boolean validateSnapshotReserved, boolean validateKeyName) { + this.snapshotReservedWord = snapshotReservedWord; + this.keyName = keyName; + this.validateSnapshotReserved = validateSnapshotReserved; + this.validateKeyName = validateKeyName; + } + + public String getSnapshotReservedWord() { + return snapshotReservedWord; + } + + public String getKeyName() { + return keyName; + } + + public boolean isValidateSnapshotReserved() { + return validateSnapshotReserved; + } + + public boolean isValidateKeyName() { + return validateKeyName; + } + + /** + * Tools for building {@link ValidateKeyArgs}. + */ + public static class Builder { + private String snapshotReservedWord; + private String keyName; + private boolean validateSnapshotReserved; + private boolean validateKeyName; + + public Builder setSnapshotReservedWord(String snapshotReservedWord) { + this.snapshotReservedWord = snapshotReservedWord; + this.validateSnapshotReserved = true; + return this; + } + + public Builder setKeyName(String keyName) { + this.keyName = keyName; + this.validateKeyName = true; + return this; + } + + public ValidateKeyArgs build() { + return new ValidateKeyArgs(snapshotReservedWord, keyName, + validateSnapshotReserved, validateKeyName); + } + } + } + + protected void validateKey(OzoneManager ozoneManager, ValidateKeyArgs validateKeyArgs) + throws OMException { + if (validateKeyArgs.isValidateSnapshotReserved()) { + OmUtils.verifyKeyNameWithSnapshotReservedWord(validateKeyArgs.getSnapshotReservedWord()); + } + final boolean checkKeyNameEnabled = ozoneManager.getConfiguration() + .getBoolean(OMConfigKeys.OZONE_OM_KEYNAME_CHARACTER_CHECK_ENABLED_KEY, + OMConfigKeys.OZONE_OM_KEYNAME_CHARACTER_CHECK_ENABLED_DEFAULT); + if (validateKeyArgs.isValidateKeyName() && checkKeyNameEnabled) { + OmUtils.validateKeyName(validateKeyArgs.getKeyName()); + } + } + /** * This methods avoids multiple rpc calls to SCM by allocating multiple blocks * in one rpc call. @@ -325,11 +411,12 @@ public EncryptedKeyVersion run() throws IOException { return edek; } - protected List< OzoneAcl > getAclsForKey(KeyArgs keyArgs, + protected List getAclsForKey(KeyArgs keyArgs, OmBucketInfo bucketInfo, OMFileRequest.OMPathInfo omPathInfo, - PrefixManager prefixManager) { + PrefixManager prefixManager, OzoneConfiguration config) throws OMException { List acls = new ArrayList<>(); + acls.addAll(getDefaultAclList(createUGIForApi(), config)); if (keyArgs.getAclsList() != null) { acls.addAll(OzoneAclUtil.fromProtobuf(keyArgs.getAclsList())); } @@ -347,6 +434,8 @@ protected List< OzoneAcl > getAclsForKey(KeyArgs keyArgs, OmPrefixInfo prefixInfo = prefixList.get(prefixList.size() - 1); if (prefixInfo != null) { if (OzoneAclUtil.inheritDefaultAcls(acls, prefixInfo.getAcls(), ACCESS)) { + // Remove the duplicates + acls = acls.stream().distinct().collect(Collectors.toList()); return acls; } } @@ -357,6 +446,7 @@ protected List< OzoneAcl > getAclsForKey(KeyArgs keyArgs, // prefix are not set if (omPathInfo != null) { if (OzoneAclUtil.inheritDefaultAcls(acls, omPathInfo.getAcls(), ACCESS)) { + acls = acls.stream().distinct().collect(Collectors.toList()); return acls; } } @@ -365,10 +455,12 @@ protected List< OzoneAcl > getAclsForKey(KeyArgs keyArgs, // parent-dir are not set. if (bucketInfo != null) { if (OzoneAclUtil.inheritDefaultAcls(acls, bucketInfo.getAcls(), ACCESS)) { + acls = acls.stream().distinct().collect(Collectors.toList()); return acls; } } + acls = acls.stream().distinct().collect(Collectors.toList()); return acls; } @@ -377,12 +469,15 @@ protected List< OzoneAcl > getAclsForKey(KeyArgs keyArgs, * @param keyArgs * @param bucketInfo * @param omPathInfo + * @param config * @return Acls which inherited parent DEFAULT and keyArgs ACCESS acls. */ - protected static List getAclsForDir(KeyArgs keyArgs, - OmBucketInfo bucketInfo, OMFileRequest.OMPathInfo omPathInfo) { + protected List getAclsForDir(KeyArgs keyArgs, OmBucketInfo bucketInfo, + OMFileRequest.OMPathInfo omPathInfo, OzoneConfiguration config) throws OMException { // Acls inherited from parent or bucket will convert to DEFAULT scope List acls = new ArrayList<>(); + // add default ACLs + acls.addAll(getDefaultAclList(createUGIForApi(), config)); // Inherit DEFAULT acls from parent-dir if (omPathInfo != null) { @@ -395,12 +490,207 @@ protected static List getAclsForDir(KeyArgs keyArgs, OzoneAclUtil.inheritDefaultAcls(acls, bucketInfo.getAcls(), DEFAULT); } - // add itself acls + // add acls from clients acls.addAll(OzoneAclUtil.fromProtobuf(keyArgs.getAclsList())); - + acls = acls.stream().distinct().collect(Collectors.toList()); return acls; } + /** + * Construct OmDirectoryInfo for every parent directory in missing list. + * + * @param keyArgs key arguments + * @param pathInfo list of parent directories to be created and its ACLs + * @param trxnLogIndex transaction log index id + * @return list of missing parent directories + * @throws IOException DB failure + */ + protected List getAllMissingParentDirInfo( + OzoneManager ozoneManager, KeyArgs keyArgs, OmBucketInfo bucketInfo, + OMFileRequest.OMPathInfoWithFSO pathInfo, long trxnLogIndex) + throws IOException { + List missingParentInfos = new ArrayList<>(); + + // The base id is left shifted by 8 bits for creating space to + // create (2^8 - 1) object ids in every request. + // maxObjId represents the largest object id allocation possible inside + // the transaction. + long baseObjId = ozoneManager.getObjectIdFromTxId(trxnLogIndex); + long maxObjId = baseObjId + MAX_NUM_OF_RECURSIVE_DIRS; + long objectCount = 1; + + String volumeName = keyArgs.getVolumeName(); + String bucketName = keyArgs.getBucketName(); + String keyName = keyArgs.getKeyName(); + + long lastKnownParentId = pathInfo.getLastKnownParentId(); + List missingParents = pathInfo.getMissingParents(); + for (String missingKey : missingParents) { + long nextObjId = baseObjId + objectCount; + if (nextObjId > maxObjId) { + throw new OMException("Too many directories in path. Exceeds limit of " + + MAX_NUM_OF_RECURSIVE_DIRS + ". Unable to create directory: " + + keyName + " in volume/bucket: " + volumeName + "/" + bucketName, + INVALID_KEY_NAME); + } + + LOG.debug("missing parent {} getting added to DirectoryTable", + missingKey); + OmDirectoryInfo dirInfo = createDirectoryInfoWithACL(missingKey, + keyArgs, nextObjId, lastKnownParentId, trxnLogIndex, + bucketInfo, pathInfo, ozoneManager.getConfiguration()); + objectCount++; + + missingParentInfos.add(dirInfo); + + // updating id for the next sub-dir + lastKnownParentId = nextObjId; + } + pathInfo.setLastKnownParentId(lastKnownParentId); + pathInfo.setLeafNodeObjectId(baseObjId + objectCount); + return missingParentInfos; + } + + /** + * Construct OmKeyInfo for every parent directory in missing list. + * @param ozoneManager + * @param keyArgs + * @param missingParents list of parent directories to be created + * @param bucketInfo + * @param omPathInfo + * @param trxnLogIndex + * @return {@code List} + * @throws IOException + */ + protected List getAllParentInfo(OzoneManager ozoneManager, + KeyArgs keyArgs, List missingParents, OmBucketInfo bucketInfo, + OMFileRequest.OMPathInfo omPathInfo, long trxnLogIndex) + throws IOException { + List missingParentInfos = new ArrayList<>(); + + // The base id is left shifted by 8 bits for creating space to + // create (2^8 - 1) object ids in every request. + // maxObjId represents the largest object id allocation possible inside + // the transaction. + long baseObjId = ozoneManager.getObjectIdFromTxId(trxnLogIndex); + long maxObjId = baseObjId + MAX_NUM_OF_RECURSIVE_DIRS; + long objectCount = 1; // baseObjID is used by the leaf directory + + String volumeName = keyArgs.getVolumeName(); + String bucketName = keyArgs.getBucketName(); + String keyName = keyArgs.getKeyName(); + + for (String missingKey : missingParents) { + long nextObjId = baseObjId + objectCount; + if (nextObjId > maxObjId) { + throw new OMException("Too many directories in path. Exceeds limit of " + + MAX_NUM_OF_RECURSIVE_DIRS + ". Unable to create directory: " + + keyName + " in volume/bucket: " + volumeName + "/" + bucketName, + INVALID_KEY_NAME); + } + + LOG.debug("missing parent {} getting added to KeyTable", missingKey); + + OmKeyInfo parentKeyInfo = + createDirectoryKeyInfoWithACL(missingKey, keyArgs, nextObjId, + bucketInfo, omPathInfo, trxnLogIndex, + ozoneManager.getDefaultReplicationConfig(), ozoneManager.getConfiguration()); + objectCount++; + + missingParentInfos.add(parentKeyInfo); + } + + return missingParentInfos; + } + + /** + * Fill in a DirectoryInfo for a new directory entry in OM database. + * @param dirName + * @param keyArgs + * @param objectId + * @param parentObjectId + * @param bucketInfo + * @param omPathInfo + * @param config + * @return the OmDirectoryInfo structure + */ + @SuppressWarnings("parameternumber") + protected OmDirectoryInfo createDirectoryInfoWithACL( + String dirName, KeyArgs keyArgs, long objectId, + long parentObjectId, long transactionIndex, + OmBucketInfo bucketInfo, OMFileRequest.OMPathInfo omPathInfo, + OzoneConfiguration config) throws OMException { + return OmDirectoryInfo.newBuilder() + .setName(dirName) + .setOwner(keyArgs.getOwnerName()) + .setCreationTime(keyArgs.getModificationTime()) + .setModificationTime(keyArgs.getModificationTime()) + .setObjectID(objectId) + .setUpdateID(transactionIndex) + .setParentObjectID(parentObjectId).setAcls(getAclsForDir(keyArgs, bucketInfo, omPathInfo, config)) + .build(); + } + + /** + * fill in a KeyInfo for a new directory entry in OM database. + * without initializing ACLs from the KeyArgs - used for intermediate + * directories which get created internally/recursively during file + * and directory create. + * @param keyName + * @param keyArgs + * @param objectId + * @param bucketInfo + * @param omPathInfo + * @param transactionIndex + * @param serverDefaultReplConfig + * @param config + * @return the OmKeyInfo structure + */ + @SuppressWarnings("parameternumber") + protected OmKeyInfo createDirectoryKeyInfoWithACL(String keyName, + KeyArgs keyArgs, long objectId, OmBucketInfo bucketInfo, + OMFileRequest.OMPathInfo omPathInfo, long transactionIndex, + ReplicationConfig serverDefaultReplConfig, OzoneConfiguration config) throws OMException { + return dirKeyInfoBuilderNoACL(keyName, keyArgs, objectId, + serverDefaultReplConfig) + .setAcls(getAclsForDir(keyArgs, bucketInfo, omPathInfo, config)) + .setUpdateID(transactionIndex).build(); + } + + protected OmKeyInfo.Builder dirKeyInfoBuilderNoACL(String keyName, KeyArgs keyArgs, long objectId, + ReplicationConfig serverDefaultReplConfig) { + String dirName = OzoneFSUtils.addTrailingSlashIfNeeded(keyName); + + OmKeyInfo.Builder keyInfoBuilder = + new OmKeyInfo.Builder() + .setVolumeName(keyArgs.getVolumeName()) + .setBucketName(keyArgs.getBucketName()) + .setKeyName(dirName) + .setOwnerName(keyArgs.getOwnerName()) + .setOmKeyLocationInfos(Collections.singletonList( + new OmKeyLocationInfoGroup(0, new ArrayList<>()))) + .setCreationTime(keyArgs.getModificationTime()) + .setModificationTime(keyArgs.getModificationTime()) + .setDataSize(0); + if (keyArgs.getFactor() != null && keyArgs + .getFactor() != HddsProtos.ReplicationFactor.ZERO && keyArgs + .getType() != HddsProtos.ReplicationType.EC) { + // Factor available and not an EC replication config. + keyInfoBuilder.setReplicationConfig(ReplicationConfig + .fromProtoTypeAndFactor(keyArgs.getType(), keyArgs.getFactor())); + } else if (keyArgs.getType() == HddsProtos.ReplicationType.EC) { + // Found EC type + keyInfoBuilder.setReplicationConfig( + new ECReplicationConfig(keyArgs.getEcReplicationConfig())); + } else { + // default type + keyInfoBuilder.setReplicationConfig(serverDefaultReplConfig); + } + + keyInfoBuilder.setObjectID(objectId); + return keyInfoBuilder; + } + /** * Check Acls for the ozone bucket. * @param ozoneManager @@ -726,12 +1016,12 @@ protected OmKeyInfo prepareKeyInfo( @Nullable OmBucketInfo omBucketInfo, OMFileRequest.OMPathInfo omPathInfo, long transactionLogIndex, long objectID, boolean isRatisEnabled, - ReplicationConfig replicationConfig) + ReplicationConfig replicationConfig, OzoneConfiguration config) throws IOException { return prepareFileInfo(omMetadataManager, keyArgs, dbKeyInfo, size, locations, encInfo, prefixManager, omBucketInfo, omPathInfo, - transactionLogIndex, objectID, isRatisEnabled, replicationConfig); + transactionLogIndex, objectID, isRatisEnabled, replicationConfig, config); } /** @@ -749,12 +1039,12 @@ protected OmKeyInfo prepareFileInfo( @Nullable OmBucketInfo omBucketInfo, OMFileRequest.OMPathInfo omPathInfo, long transactionLogIndex, long objectID, - boolean isRatisEnabled, ReplicationConfig replicationConfig) - throws IOException { + boolean isRatisEnabled, ReplicationConfig replicationConfig, + OzoneConfiguration config) throws IOException { if (keyArgs.getIsMultipartKey()) { return prepareMultipartFileInfo(omMetadataManager, keyArgs, size, locations, encInfo, prefixManager, omBucketInfo, - omPathInfo, transactionLogIndex, objectID); + omPathInfo, transactionLogIndex, objectID, config); //TODO args.getMetadata } if (dbKeyInfo != null) { @@ -797,7 +1087,7 @@ protected OmKeyInfo prepareFileInfo( // Blocks will be appended as version 0. return createFileInfo(keyArgs, locations, replicationConfig, keyArgs.getDataSize(), encInfo, prefixManager, - omBucketInfo, omPathInfo, transactionLogIndex, objectID); + omBucketInfo, omPathInfo, transactionLogIndex, objectID, config); } /** @@ -814,7 +1104,8 @@ protected OmKeyInfo createFileInfo( @Nonnull PrefixManager prefixManager, @Nullable OmBucketInfo omBucketInfo, OMFileRequest.OMPathInfo omPathInfo, - long transactionLogIndex, long objectID) { + long transactionLogIndex, long objectID, + OzoneConfiguration config) throws OMException { OmKeyInfo.Builder builder = new OmKeyInfo.Builder(); builder.setVolumeName(keyArgs.getVolumeName()) .setBucketName(keyArgs.getBucketName()) @@ -827,7 +1118,7 @@ protected OmKeyInfo createFileInfo( .setReplicationConfig(replicationConfig) .setFileEncryptionInfo(encInfo) .setAcls(getAclsForKey( - keyArgs, omBucketInfo, omPathInfo, prefixManager)) + keyArgs, omBucketInfo, omPathInfo, prefixManager, config)) .addAllMetadata(KeyValueUtil.getFromProtobuf( keyArgs.getMetadataList())) .addAllTags(KeyValueUtil.getFromProtobuf( @@ -861,8 +1152,8 @@ private OmKeyInfo prepareMultipartFileInfo( FileEncryptionInfo encInfo, @Nonnull PrefixManager prefixManager, @Nullable OmBucketInfo omBucketInfo, OMFileRequest.OMPathInfo omPathInfo, - @Nonnull long transactionLogIndex, long objectID) - throws IOException { + @Nonnull long transactionLogIndex, long objectID, + OzoneConfiguration configuration) throws IOException { Preconditions.checkArgument(args.getMultipartNumber() > 0, "PartNumber Should be greater than zero"); @@ -900,7 +1191,7 @@ private OmKeyInfo prepareMultipartFileInfo( // is not an actual key, it is a part of the key. return createFileInfo(args, locations, partKeyInfo.getReplicationConfig(), size, encInfo, prefixManager, omBucketInfo, omPathInfo, - transactionLogIndex, objectID); + transactionLogIndex, objectID, configuration); } /** diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3InitiateMultipartUploadRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3InitiateMultipartUploadRequest.java index 26c559eef6e..0a2703c769e 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3InitiateMultipartUploadRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3InitiateMultipartUploadRequest.java @@ -209,7 +209,7 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIn .setOmKeyLocationInfos(Collections.singletonList( new OmKeyLocationInfoGroup(0, new ArrayList<>(), true))) .setAcls(getAclsForKey(keyArgs, bucketInfo, pathInfo, - ozoneManager.getPrefixManager())) + ozoneManager.getPrefixManager(), ozoneManager.getConfiguration())) .setObjectID(objectID) .setUpdateID(transactionLogIndex) .setFileEncryptionInfo(keyArgs.hasFileEncryptionInfo() ? diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3InitiateMultipartUploadRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3InitiateMultipartUploadRequestWithFSO.java index de78c665110..d55a7b41918 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3InitiateMultipartUploadRequestWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3InitiateMultipartUploadRequestWithFSO.java @@ -33,7 +33,6 @@ import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup; import org.apache.hadoop.ozone.om.helpers.OmMultipartKeyInfo; -import org.apache.hadoop.ozone.om.request.file.OMDirectoryCreateRequestWithFSO; import org.apache.hadoop.ozone.om.request.file.OMFileRequest; import org.apache.hadoop.ozone.om.request.util.OmResponseUtil; import org.apache.hadoop.ozone.om.response.OMClientResponse; @@ -121,8 +120,7 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIn volumeName, bucketName); // add all missing parents to dir table - missingParentInfos = OMDirectoryCreateRequestWithFSO - .getAllMissingParentDirInfo(ozoneManager, keyArgs, bucketInfo, + missingParentInfos = getAllMissingParentDirInfo(ozoneManager, keyArgs, bucketInfo, pathInfoFSO, transactionLogIndex); // We are adding uploadId to key, because if multiple users try to @@ -185,7 +183,7 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIn .setOmKeyLocationInfos(Collections.singletonList( new OmKeyLocationInfoGroup(0, new ArrayList<>(), true))) .setAcls(getAclsForKey(keyArgs, bucketInfo, pathInfoFSO, - ozoneManager.getPrefixManager())) + ozoneManager.getPrefixManager(), ozoneManager.getConfiguration())) .setObjectID(pathInfoFSO.getLeafNodeObjectId()) .setUpdateID(transactionLogIndex) .setFileEncryptionInfo(keyArgs.hasFileEncryptionInfo() ? diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCompleteRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCompleteRequest.java index 597a40006f9..2bb77005c95 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCompleteRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCompleteRequest.java @@ -33,7 +33,6 @@ import org.apache.hadoop.hdds.client.ReplicationConfig; import org.apache.hadoop.ozone.om.OzoneConfigUtil; -import org.apache.hadoop.ozone.om.request.file.OMDirectoryCreateRequestWithFSO; import org.apache.hadoop.ozone.om.request.file.OMFileRequest; import org.apache.hadoop.ozone.protocolPB.OMPBHelper; import org.apache.ratis.server.protocol.TermIndex; @@ -187,8 +186,7 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIn OMFileRequest.OMPathInfoWithFSO pathInfoFSO = OMFileRequest .verifyDirectoryKeysInPath(omMetadataManager, volumeName, bucketName, keyName, Paths.get(keyName)); - missingParentInfos = OMDirectoryCreateRequestWithFSO - .getAllMissingParentDirInfo(ozoneManager, keyArgs, omBucketInfo, + missingParentInfos = getAllMissingParentDirInfo(ozoneManager, keyArgs, omBucketInfo, pathInfoFSO, trxnLogIndex); if (missingParentInfos != null) { @@ -236,7 +234,7 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIn .setOmKeyLocationInfos(Collections.singletonList( new OmKeyLocationInfoGroup(0, new ArrayList<>(), true))) .setAcls(getAclsForKey(keyArgs, omBucketInfo, pathInfoFSO, - ozoneManager.getPrefixManager())) + ozoneManager.getPrefixManager(), ozoneManager.getConfiguration())) .setObjectID(pathInfoFSO.getLeafNodeObjectId()) .setUpdateID(trxnLogIndex) .setFileEncryptionInfo(keyArgs.hasFileEncryptionInfo() ? diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/tagging/S3DeleteObjectTaggingRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/tagging/S3DeleteObjectTaggingRequest.java new file mode 100644 index 00000000000..6146e1ac105 --- /dev/null +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/tagging/S3DeleteObjectTaggingRequest.java @@ -0,0 +1,190 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + *

    + * http://www.apache.org/licenses/LICENSE-2.0 + *

    + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package org.apache.hadoop.ozone.om.request.s3.tagging; + +import com.google.common.base.Preconditions; +import org.apache.hadoop.hdds.utils.db.cache.CacheKey; +import org.apache.hadoop.hdds.utils.db.cache.CacheValue; +import org.apache.hadoop.ozone.audit.OMAction; +import org.apache.hadoop.ozone.om.OMMetadataManager; +import org.apache.hadoop.ozone.om.OMMetrics; +import org.apache.hadoop.ozone.om.OzoneManager; +import org.apache.hadoop.ozone.om.exceptions.OMException; +import org.apache.hadoop.ozone.om.helpers.BucketLayout; +import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; +import org.apache.hadoop.ozone.om.request.OMClientRequestUtils; +import org.apache.hadoop.ozone.om.request.key.OMKeyRequest; +import org.apache.hadoop.ozone.om.request.util.OmResponseUtil; +import org.apache.hadoop.ozone.om.response.OMClientResponse; +import org.apache.hadoop.ozone.om.response.s3.tagging.S3DeleteObjectTaggingResponse; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.DeleteObjectTaggingRequest; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.DeleteObjectTaggingResponse; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.KeyArgs; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse; +import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLType; +import org.apache.ratis.server.protocol.TermIndex; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.IOException; +import java.util.Map; + +import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.KEY_NOT_FOUND; +import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.Resource.BUCKET_LOCK; + +/** + * Handles delete object tagging request. + */ +public class S3DeleteObjectTaggingRequest extends OMKeyRequest { + + private static final Logger LOG = + LoggerFactory.getLogger(S3DeleteObjectTaggingRequest.class); + + public S3DeleteObjectTaggingRequest(OMRequest omRequest, BucketLayout bucketLayout) { + super(omRequest, bucketLayout); + } + + @Override + public OMRequest preExecute(OzoneManager ozoneManager) throws IOException { + DeleteObjectTaggingRequest deleteObjectTaggingRequest = + super.preExecute(ozoneManager).getDeleteObjectTaggingRequest(); + Preconditions.checkNotNull(deleteObjectTaggingRequest); + + KeyArgs keyArgs = deleteObjectTaggingRequest.getKeyArgs(); + + String keyPath = keyArgs.getKeyName(); + keyPath = validateAndNormalizeKey(ozoneManager.getEnableFileSystemPaths(), + keyPath, getBucketLayout()); + + KeyArgs.Builder newKeyArgs = + keyArgs.toBuilder() + .setKeyName(keyPath); + + KeyArgs resolvedArgs = resolveBucketAndCheckKeyAcls(newKeyArgs.build(), + ozoneManager, ACLType.WRITE); + return getOmRequest().toBuilder() + .setUserInfo(getUserInfo()) + .setDeleteObjectTaggingRequest( + deleteObjectTaggingRequest.toBuilder().setKeyArgs(resolvedArgs)) + .build(); + } + + @Override + public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIndex termIndex) { + final long trxnLogIndex = termIndex.getIndex(); + + DeleteObjectTaggingRequest deleteObjectTaggingRequest = getOmRequest().getDeleteObjectTaggingRequest(); + + KeyArgs keyArgs = deleteObjectTaggingRequest.getKeyArgs(); + String volumeName = keyArgs.getVolumeName(); + String bucketName = keyArgs.getBucketName(); + String keyName = keyArgs.getKeyName(); + + OMMetrics omMetrics = ozoneManager.getMetrics(); + omMetrics.incNumDeleteObjectTagging(); + + Map auditMap = buildKeyArgsAuditMap(keyArgs); + + OMResponse.Builder omResponse = OmResponseUtil.getOMResponseBuilder( + getOmRequest()); + + OMMetadataManager omMetadataManager = ozoneManager.getMetadataManager(); + boolean acquiredLock = false; + OMClientResponse omClientResponse = null; + IOException exception = null; + Result result = null; + try { + mergeOmLockDetails( + omMetadataManager.getLock() + .acquireWriteLock(BUCKET_LOCK, volumeName, bucketName) + ); + acquiredLock = getOmLockDetails().isLockAcquired(); + + validateBucketAndVolume(omMetadataManager, volumeName, bucketName); + + String dbOzoneKey = + omMetadataManager.getOzoneKey(volumeName, bucketName, keyName); + + OmKeyInfo omKeyInfo = + omMetadataManager.getKeyTable(getBucketLayout()).get(dbOzoneKey); + if (omKeyInfo == null) { + throw new OMException("Key not found", KEY_NOT_FOUND); + } + + // Clear / delete the tags + omKeyInfo.getTags().clear(); + // Set the UpdateID to the current transactionLogIndex + omKeyInfo.setUpdateID(trxnLogIndex, ozoneManager.isRatisEnabled()); + + // Note: Key modification time is not changed because S3 last modified + // time only changes when there are changes in the object content + + // Update table cache + omMetadataManager.getKeyTable(getBucketLayout()).addCacheEntry( + new CacheKey<>(dbOzoneKey), + CacheValue.get(trxnLogIndex, omKeyInfo) + ); + + omClientResponse = new S3DeleteObjectTaggingResponse( + omResponse.setDeleteObjectTaggingResponse(DeleteObjectTaggingResponse.newBuilder()).build(), + omKeyInfo + ); + + result = Result.SUCCESS; + } catch (IOException ex) { + result = Result.FAILURE; + exception = ex; + omClientResponse = new S3DeleteObjectTaggingResponse( + createErrorOMResponse(omResponse, exception), + getBucketLayout() + ); + } finally { + if (acquiredLock) { + mergeOmLockDetails(omMetadataManager.getLock() + .releaseWriteLock(BUCKET_LOCK, volumeName, bucketName)); + } + if (omClientResponse != null) { + omClientResponse.setOmLockDetails(getOmLockDetails()); + } + } + + markForAudit(ozoneManager.getAuditLogger(), buildAuditMessage( + OMAction.DELETE_OBJECT_TAGGING, auditMap, exception, getOmRequest().getUserInfo() + )); + + switch (result) { + case SUCCESS: + LOG.debug("Delete object tagging success. Volume:{}, Bucket:{}, Key:{}.", volumeName, + bucketName, keyName); + break; + case FAILURE: + omMetrics.incNumDeleteObjectTaggingFails(); + if (OMClientRequestUtils.shouldLogClientRequestFailure(exception)) { + LOG.error("Delete object tagging failed. Volume:{}, Bucket:{}, Key:{}.", volumeName, + bucketName, keyName, exception); + } + break; + default: + LOG.error("Unrecognized Result for S3DeleteObjectTaggingRequest: {}", + deleteObjectTaggingRequest); + } + + return omClientResponse; + } +} diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/tagging/S3DeleteObjectTaggingRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/tagging/S3DeleteObjectTaggingRequestWithFSO.java new file mode 100644 index 00000000000..fb0561702a6 --- /dev/null +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/tagging/S3DeleteObjectTaggingRequestWithFSO.java @@ -0,0 +1,169 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + *

    + * http://www.apache.org/licenses/LICENSE-2.0 + *

    + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package org.apache.hadoop.ozone.om.request.s3.tagging; + +import org.apache.hadoop.hdds.utils.db.cache.CacheKey; +import org.apache.hadoop.hdds.utils.db.cache.CacheValue; +import org.apache.hadoop.ozone.om.OMMetadataManager; +import org.apache.hadoop.ozone.om.OMMetrics; +import org.apache.hadoop.ozone.om.OzoneManager; +import org.apache.hadoop.ozone.om.exceptions.OMException; +import org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes; +import org.apache.hadoop.ozone.om.helpers.BucketLayout; +import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; +import org.apache.hadoop.ozone.om.helpers.OzoneFileStatus; +import org.apache.hadoop.ozone.om.request.file.OMFileRequest; +import org.apache.hadoop.ozone.om.request.util.OmResponseUtil; +import org.apache.hadoop.ozone.om.response.OMClientResponse; +import org.apache.hadoop.ozone.om.response.s3.tagging.S3DeleteObjectTaggingResponseWithFSO; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.DeleteObjectTaggingRequest; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.DeleteObjectTaggingResponse; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.KeyArgs; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse; +import org.apache.ratis.server.protocol.TermIndex; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.IOException; +import java.util.Map; + +import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.Resource.BUCKET_LOCK; + +/** + * Handles delete object tagging request for FSO bucket. + */ +public class S3DeleteObjectTaggingRequestWithFSO extends S3DeleteObjectTaggingRequest { + + private static final Logger LOG = + LoggerFactory.getLogger(S3DeleteObjectTaggingRequestWithFSO.class); + + public S3DeleteObjectTaggingRequestWithFSO(OMRequest omRequest, + BucketLayout bucketLayout) { + super(omRequest, bucketLayout); + } + + @Override + public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIndex termIndex) { + final long trxnLogIndex = termIndex.getIndex(); + + DeleteObjectTaggingRequest deleteObjectTaggingRequest = getOmRequest().getDeleteObjectTaggingRequest(); + + KeyArgs keyArgs = deleteObjectTaggingRequest.getKeyArgs(); + String volumeName = keyArgs.getVolumeName(); + String bucketName = keyArgs.getBucketName(); + String keyName = keyArgs.getKeyName(); + + OMMetrics omMetrics = ozoneManager.getMetrics(); + omMetrics.incNumDeleteObjectTagging(); + + Map auditMap = buildKeyArgsAuditMap(keyArgs); + + OMResponse.Builder omResponse = OmResponseUtil.getOMResponseBuilder( + getOmRequest()); + + OMMetadataManager omMetadataManager = ozoneManager.getMetadataManager(); + boolean acquiredLock = false; + OMClientResponse omClientResponse = null; + IOException exception = null; + Result result = null; + try { + mergeOmLockDetails( + omMetadataManager.getLock() + .acquireWriteLock(BUCKET_LOCK, volumeName, bucketName) + ); + acquiredLock = getOmLockDetails().isLockAcquired(); + + validateBucketAndVolume(omMetadataManager, volumeName, bucketName); + + OzoneFileStatus keyStatus = OMFileRequest.getOMKeyInfoIfExists( + omMetadataManager, volumeName, bucketName, keyName, 0, + ozoneManager.getDefaultReplicationConfig()); + + if (keyStatus == null) { + throw new OMException("Key not found. Key: " + keyName, ResultCodes.KEY_NOT_FOUND); + } + + boolean isDirectory = keyStatus.isDirectory(); + + if (isDirectory) { + throw new OMException("DeleteObjectTagging is not currently supported for FSO directory", + ResultCodes.NOT_SUPPORTED_OPERATION); + } + + OmKeyInfo omKeyInfo = keyStatus.getKeyInfo(); + final long volumeId = omMetadataManager.getVolumeId(volumeName); + final long bucketId = omMetadataManager.getBucketId(volumeName, bucketName); + final String dbKey = omMetadataManager.getOzonePathKey(volumeId, bucketId, + omKeyInfo.getParentObjectID(), omKeyInfo.getFileName()); + + // Clear / delete the tags + omKeyInfo.getTags().clear(); + // Set the UpdateId to the current transactionLogIndex + omKeyInfo.setUpdateID(trxnLogIndex, ozoneManager.isRatisEnabled()); + + // Note: Key modification time is not changed because S3 last modified + // time only changes when there are changes in the object content + + // Update table cache for file table. No need to check directory table since + // DeleteObjectTagging rejects operations on FSO directory + omMetadataManager.getKeyTable(getBucketLayout()) + .addCacheEntry(new CacheKey<>(dbKey), + CacheValue.get(trxnLogIndex, omKeyInfo)); + + omClientResponse = new S3DeleteObjectTaggingResponseWithFSO( + omResponse.setDeleteObjectTaggingResponse(DeleteObjectTaggingResponse.newBuilder()).build(), + omKeyInfo, volumeId, bucketId + ); + + result = Result.SUCCESS; + } catch (IOException ex) { + result = Result.FAILURE; + exception = ex; + omClientResponse = new S3DeleteObjectTaggingResponseWithFSO( + createErrorOMResponse(omResponse, exception), + getBucketLayout() + ); + } finally { + if (acquiredLock) { + mergeOmLockDetails(omMetadataManager.getLock() + .releaseWriteLock(BUCKET_LOCK, volumeName, bucketName)); + } + if (omClientResponse != null) { + omClientResponse.setOmLockDetails(getOmLockDetails()); + } + } + + switch (result) { + case SUCCESS: + LOG.debug("Delete object tagging success. Volume:{}, Bucket:{}, Key:{}.", volumeName, + bucketName, keyName); + break; + case FAILURE: + omMetrics.incNumDeleteObjectTaggingFails(); + LOG.error("Delete object tagging failed. Volume:{}, Bucket:{}, Key:{}.", volumeName, + bucketName, keyName, exception); + break; + default: + LOG.error("Unrecognized Result for S3DeleteObjectTaggingRequest: {}", + deleteObjectTaggingRequest); + } + + return omClientResponse; + } +} diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/tagging/S3PutObjectTaggingRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/tagging/S3PutObjectTaggingRequest.java new file mode 100644 index 00000000000..aab67830383 --- /dev/null +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/tagging/S3PutObjectTaggingRequest.java @@ -0,0 +1,192 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + *

    + * http://www.apache.org/licenses/LICENSE-2.0 + *

    + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package org.apache.hadoop.ozone.om.request.s3.tagging; + +import com.google.common.base.Preconditions; +import org.apache.hadoop.hdds.utils.db.cache.CacheKey; +import org.apache.hadoop.hdds.utils.db.cache.CacheValue; +import org.apache.hadoop.ozone.audit.OMAction; +import org.apache.hadoop.ozone.om.OMMetadataManager; +import org.apache.hadoop.ozone.om.OMMetrics; +import org.apache.hadoop.ozone.om.OzoneManager; +import org.apache.hadoop.ozone.om.exceptions.OMException; +import org.apache.hadoop.ozone.om.helpers.BucketLayout; +import org.apache.hadoop.ozone.om.helpers.KeyValueUtil; +import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; +import org.apache.hadoop.ozone.om.request.OMClientRequestUtils; +import org.apache.hadoop.ozone.om.request.key.OMKeyRequest; +import org.apache.hadoop.ozone.om.request.util.OmResponseUtil; +import org.apache.hadoop.ozone.om.response.OMClientResponse; +import org.apache.hadoop.ozone.om.response.s3.tagging.S3PutObjectTaggingResponse; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.KeyArgs; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.PutObjectTaggingRequest; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.PutObjectTaggingResponse; +import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLType; +import org.apache.ratis.server.protocol.TermIndex; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.IOException; +import java.util.Map; + +import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.KEY_NOT_FOUND; +import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.Resource.BUCKET_LOCK; + +/** + * Handles put object tagging request. + */ +public class S3PutObjectTaggingRequest extends OMKeyRequest { + + private static final Logger LOG = + LoggerFactory.getLogger(S3PutObjectTaggingRequest.class); + + public S3PutObjectTaggingRequest(OMRequest omRequest, BucketLayout bucketLayout) { + super(omRequest, bucketLayout); + } + + @Override + public OMRequest preExecute(OzoneManager ozoneManager) throws IOException { + PutObjectTaggingRequest putObjectTaggingRequest = + super.preExecute(ozoneManager).getPutObjectTaggingRequest(); + Preconditions.checkNotNull(putObjectTaggingRequest); + + KeyArgs keyArgs = putObjectTaggingRequest.getKeyArgs(); + + String keyPath = keyArgs.getKeyName(); + keyPath = validateAndNormalizeKey(ozoneManager.getEnableFileSystemPaths(), + keyPath, getBucketLayout()); + + KeyArgs.Builder newKeyArgs = + keyArgs.toBuilder() + .setKeyName(keyPath); + + KeyArgs resolvedArgs = resolveBucketAndCheckKeyAcls(newKeyArgs.build(), + ozoneManager, ACLType.WRITE); + return getOmRequest().toBuilder() + .setUserInfo(getUserInfo()) + .setPutObjectTaggingRequest( + putObjectTaggingRequest.toBuilder().setKeyArgs(resolvedArgs)) + .build(); + } + + @Override + public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIndex termIndex) { + final long trxnLogIndex = termIndex.getIndex(); + + PutObjectTaggingRequest putObjectTaggingRequest = getOmRequest().getPutObjectTaggingRequest(); + + KeyArgs keyArgs = putObjectTaggingRequest.getKeyArgs(); + String volumeName = keyArgs.getVolumeName(); + String bucketName = keyArgs.getBucketName(); + String keyName = keyArgs.getKeyName(); + + OMMetrics omMetrics = ozoneManager.getMetrics(); + omMetrics.incNumPutObjectTagging(); + + Map auditMap = buildKeyArgsAuditMap(keyArgs); + + OMResponse.Builder omResponse = OmResponseUtil.getOMResponseBuilder( + getOmRequest()); + + OMMetadataManager omMetadataManager = ozoneManager.getMetadataManager(); + boolean acquiredLock = false; + OMClientResponse omClientResponse = null; + IOException exception = null; + Result result = null; + try { + mergeOmLockDetails( + omMetadataManager.getLock() + .acquireWriteLock(BUCKET_LOCK, volumeName, bucketName) + ); + acquiredLock = getOmLockDetails().isLockAcquired(); + + validateBucketAndVolume(omMetadataManager, volumeName, bucketName); + + String dbOzoneKey = + omMetadataManager.getOzoneKey(volumeName, bucketName, keyName); + + OmKeyInfo omKeyInfo = + omMetadataManager.getKeyTable(getBucketLayout()).get(dbOzoneKey); + if (omKeyInfo == null) { + throw new OMException("Key not found", KEY_NOT_FOUND); + } + + // Set the tags + omKeyInfo.getTags().clear(); + omKeyInfo.getTags().putAll(KeyValueUtil.getFromProtobuf(keyArgs.getTagsList())); + // Set the UpdateID to the current transactionLogIndex + omKeyInfo.setUpdateID(trxnLogIndex, ozoneManager.isRatisEnabled()); + + // Note: Key modification time is not changed because S3 last modified + // time only changes when there are changes in the object content + + // Update table cache + omMetadataManager.getKeyTable(getBucketLayout()).addCacheEntry( + new CacheKey<>(dbOzoneKey), + CacheValue.get(trxnLogIndex, omKeyInfo) + ); + + omClientResponse = new S3PutObjectTaggingResponse( + omResponse.setPutObjectTaggingResponse(PutObjectTaggingResponse.newBuilder()).build(), + omKeyInfo + ); + + result = Result.SUCCESS; + } catch (IOException ex) { + result = Result.FAILURE; + exception = ex; + omClientResponse = new S3PutObjectTaggingResponse( + createErrorOMResponse(omResponse, exception), + getBucketLayout() + ); + } finally { + if (acquiredLock) { + mergeOmLockDetails(omMetadataManager.getLock() + .releaseWriteLock(BUCKET_LOCK, volumeName, bucketName)); + } + if (omClientResponse != null) { + omClientResponse.setOmLockDetails(getOmLockDetails()); + } + } + + markForAudit(ozoneManager.getAuditLogger(), buildAuditMessage( + OMAction.PUT_OBJECT_TAGGING, auditMap, exception, getOmRequest().getUserInfo() + )); + + switch (result) { + case SUCCESS: + LOG.debug("Put object tagging success. Volume:{}, Bucket:{}, Key:{}.", volumeName, + bucketName, keyName); + break; + case FAILURE: + omMetrics.incNumPutObjectTaggingFails(); + if (OMClientRequestUtils.shouldLogClientRequestFailure(exception)) { + LOG.error("Put object tagging failed. Volume:{}, Bucket:{}, Key:{}.", volumeName, + bucketName, keyName, exception); + } + break; + default: + LOG.error("Unrecognized Result for S3PutObjectTaggingRequest: {}", + putObjectTaggingRequest); + } + + return omClientResponse; + } +} diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/tagging/S3PutObjectTaggingRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/tagging/S3PutObjectTaggingRequestWithFSO.java new file mode 100644 index 00000000000..2b6ca8601cb --- /dev/null +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/tagging/S3PutObjectTaggingRequestWithFSO.java @@ -0,0 +1,171 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + *

    + * http://www.apache.org/licenses/LICENSE-2.0 + *

    + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package org.apache.hadoop.ozone.om.request.s3.tagging; + +import org.apache.hadoop.hdds.utils.db.cache.CacheKey; +import org.apache.hadoop.hdds.utils.db.cache.CacheValue; +import org.apache.hadoop.ozone.om.OMMetadataManager; +import org.apache.hadoop.ozone.om.OMMetrics; +import org.apache.hadoop.ozone.om.OzoneManager; +import org.apache.hadoop.ozone.om.exceptions.OMException; +import org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes; +import org.apache.hadoop.ozone.om.helpers.BucketLayout; +import org.apache.hadoop.ozone.om.helpers.KeyValueUtil; +import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; +import org.apache.hadoop.ozone.om.helpers.OzoneFileStatus; +import org.apache.hadoop.ozone.om.request.file.OMFileRequest; +import org.apache.hadoop.ozone.om.request.util.OmResponseUtil; +import org.apache.hadoop.ozone.om.response.OMClientResponse; +import org.apache.hadoop.ozone.om.response.s3.tagging.S3PutObjectTaggingResponseWithFSO; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.KeyArgs; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.PutObjectTaggingRequest; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.PutObjectTaggingResponse; +import org.apache.ratis.server.protocol.TermIndex; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.IOException; +import java.util.Map; + +import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.Resource.BUCKET_LOCK; + +/** + * Handles put object tagging request for FSO bucket. + */ +public class S3PutObjectTaggingRequestWithFSO extends S3PutObjectTaggingRequest { + + private static final Logger LOG = + LoggerFactory.getLogger(S3PutObjectTaggingRequestWithFSO.class); + + public S3PutObjectTaggingRequestWithFSO(OMRequest omRequest, + BucketLayout bucketLayout) { + super(omRequest, bucketLayout); + } + + @Override + public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIndex termIndex) { + final long trxnLogIndex = termIndex.getIndex(); + + PutObjectTaggingRequest putObjectTaggingRequest = getOmRequest().getPutObjectTaggingRequest(); + + KeyArgs keyArgs = putObjectTaggingRequest.getKeyArgs(); + String volumeName = keyArgs.getVolumeName(); + String bucketName = keyArgs.getBucketName(); + String keyName = keyArgs.getKeyName(); + + OMMetrics omMetrics = ozoneManager.getMetrics(); + omMetrics.incNumPutObjectTagging(); + + Map auditMap = buildKeyArgsAuditMap(keyArgs); + + OMResponse.Builder omResponse = OmResponseUtil.getOMResponseBuilder( + getOmRequest()); + + OMMetadataManager omMetadataManager = ozoneManager.getMetadataManager(); + boolean acquiredLock = false; + OMClientResponse omClientResponse = null; + IOException exception = null; + Result result = null; + try { + mergeOmLockDetails( + omMetadataManager.getLock() + .acquireWriteLock(BUCKET_LOCK, volumeName, bucketName) + ); + acquiredLock = getOmLockDetails().isLockAcquired(); + + validateBucketAndVolume(omMetadataManager, volumeName, bucketName); + + OzoneFileStatus keyStatus = OMFileRequest.getOMKeyInfoIfExists( + omMetadataManager, volumeName, bucketName, keyName, 0, + ozoneManager.getDefaultReplicationConfig()); + + if (keyStatus == null) { + throw new OMException("Key not found. Key: " + keyName, ResultCodes.KEY_NOT_FOUND); + } + + boolean isDirectory = keyStatus.isDirectory(); + + if (isDirectory) { + throw new OMException("PutObjectTagging is not currently supported for FSO directory", + ResultCodes.NOT_SUPPORTED_OPERATION); + } + + OmKeyInfo omKeyInfo = keyStatus.getKeyInfo(); + final long volumeId = omMetadataManager.getVolumeId(volumeName); + final long bucketId = omMetadataManager.getBucketId(volumeName, bucketName); + final String dbKey = omMetadataManager.getOzonePathKey(volumeId, bucketId, + omKeyInfo.getParentObjectID(), omKeyInfo.getFileName()); + + // Set the tags + omKeyInfo.getTags().clear(); + omKeyInfo.getTags().putAll(KeyValueUtil.getFromProtobuf(keyArgs.getTagsList())); + // Set the UpdateId to the current transactionLogIndex + omKeyInfo.setUpdateID(trxnLogIndex, ozoneManager.isRatisEnabled()); + + // Note: Key modification time is not changed because S3 last modified + // time only changes when there are changes in the object content + + // Update table cache for file table. No need to check directory table since + // PutObjectTagging rejects operations on FSO directory + omMetadataManager.getKeyTable(getBucketLayout()) + .addCacheEntry(new CacheKey<>(dbKey), + CacheValue.get(trxnLogIndex, omKeyInfo)); + + omClientResponse = new S3PutObjectTaggingResponseWithFSO( + omResponse.setPutObjectTaggingResponse(PutObjectTaggingResponse.newBuilder()).build(), + omKeyInfo, volumeId, bucketId + ); + + result = Result.SUCCESS; + } catch (IOException ex) { + result = Result.FAILURE; + exception = ex; + omClientResponse = new S3PutObjectTaggingResponseWithFSO( + createErrorOMResponse(omResponse, exception), + getBucketLayout() + ); + } finally { + if (acquiredLock) { + mergeOmLockDetails(omMetadataManager.getLock() + .releaseWriteLock(BUCKET_LOCK, volumeName, bucketName)); + } + if (omClientResponse != null) { + omClientResponse.setOmLockDetails(getOmLockDetails()); + } + } + + switch (result) { + case SUCCESS: + LOG.debug("Put object tagging success. Volume:{}, Bucket:{}, Key:{}.", volumeName, + bucketName, keyName); + break; + case FAILURE: + omMetrics.incNumPutObjectTaggingFails(); + LOG.error("Put object tagging failed. Volume:{}, Bucket:{}, Key:{}.", volumeName, + bucketName, keyName, exception); + break; + default: + LOG.error("Unrecognized Result for S3PutObjectTaggingRequest: {}", + putObjectTaggingRequest); + } + + return omClientResponse; + } +} diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/tagging/package-info.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/tagging/package-info.java new file mode 100644 index 00000000000..d3f26d195ad --- /dev/null +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/tagging/package-info.java @@ -0,0 +1,21 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + *

    + * http://www.apache.org/licenses/LICENSE-2.0 + *

    + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +/** + * Package contains classes related to S3 tagging requests. + */ +package org.apache.hadoop.ozone.om.request.s3.tagging; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/OMSnapshotCreateRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/OMSnapshotCreateRequest.java index 2ded4f6a83e..59cc02b6fdb 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/OMSnapshotCreateRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/OMSnapshotCreateRequest.java @@ -18,10 +18,12 @@ package org.apache.hadoop.ozone.om.request.snapshot; +import org.apache.commons.lang3.tuple.Pair; import org.apache.hadoop.hdds.client.DefaultReplicationConfig; import org.apache.hadoop.hdds.client.ReplicationConfig; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.utils.TransactionInfo; +import org.apache.hadoop.ozone.om.ResolvedBucket; import org.apache.ratis.server.protocol.TermIndex; import org.apache.hadoop.hdds.utils.db.RDBStore; import org.apache.hadoop.hdds.utils.db.cache.CacheKey; @@ -75,8 +77,8 @@ public class OMSnapshotCreateRequest extends OMClientRequest { LoggerFactory.getLogger(OMSnapshotCreateRequest.class); private final String snapshotPath; - private final String volumeName; - private final String bucketName; + private String volumeName; + private String bucketName; private final String snapshotName; private final SnapshotInfo snapshotInfo; @@ -106,7 +108,11 @@ public OMRequest preExecute(OzoneManager ozoneManager) throws IOException { final OMRequest omRequest = super.preExecute(ozoneManager); // Verify name OmUtils.validateSnapshotName(snapshotName); - + // Updating the volumeName & bucketName in case the bucket is a linked bucket. We need to do this before a + // permission check, since linked bucket permissions and source bucket permissions could be different. + ResolvedBucket bucket = ozoneManager.resolveBucketLink(Pair.of(volumeName, bucketName), this); + this.volumeName = bucket.realVolume(); + this.bucketName = bucket.realBucket(); UserGroupInformation ugi = createUGIForApi(); String bucketOwner = ozoneManager.getBucketOwner(volumeName, bucketName, IAccessAuthorizer.ACLType.READ, OzoneObj.ResourceType.BUCKET); @@ -116,12 +122,12 @@ public OMRequest preExecute(OzoneManager ozoneManager) throws IOException { "Only bucket owners and Ozone admins can create snapshots", OMException.ResultCodes.PERMISSION_DENIED); } - - return omRequest.toBuilder().setCreateSnapshotRequest( - omRequest.getCreateSnapshotRequest().toBuilder() - .setSnapshotId(toProtobuf(UUID.randomUUID())) - .setCreationTime(Time.now()) - .build()).build(); + CreateSnapshotRequest.Builder createSnapshotRequest = omRequest.getCreateSnapshotRequest().toBuilder() + .setSnapshotId(toProtobuf(UUID.randomUUID())) + .setVolumeName(volumeName) + .setBucketName(this.bucketName) + .setCreationTime(Time.now()); + return omRequest.toBuilder().setCreateSnapshotRequest(createSnapshotRequest.build()).build(); } @Override diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/OMSnapshotDeleteRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/OMSnapshotDeleteRequest.java index a2b00138cf3..95f99c627c4 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/OMSnapshotDeleteRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/OMSnapshotDeleteRequest.java @@ -18,6 +18,8 @@ package org.apache.hadoop.ozone.om.request.snapshot; +import org.apache.commons.lang3.tuple.Pair; +import org.apache.hadoop.ozone.om.ResolvedBucket; import org.apache.ratis.server.protocol.TermIndex; import org.apache.hadoop.hdds.utils.db.cache.CacheKey; import org.apache.hadoop.hdds.utils.db.cache.CacheValue; @@ -82,6 +84,11 @@ public OMRequest preExecute(OzoneManager ozoneManager) throws IOException { String volumeName = deleteSnapshotRequest.getVolumeName(); String bucketName = deleteSnapshotRequest.getBucketName(); + // Updating the volumeName & bucketName in case the bucket is a linked bucket. We need to do this before a + // permission check, since linked bucket permissions and source bucket permissions could be different. + ResolvedBucket resolvedBucket = ozoneManager.resolveBucketLink(Pair.of(volumeName, bucketName), this); + volumeName = resolvedBucket.realVolume(); + bucketName = resolvedBucket.realBucket(); // Permission check UserGroupInformation ugi = createUGIForApi(); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/OMSnapshotRenameRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/OMSnapshotRenameRequest.java index 8341f875504..8cf0579647c 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/OMSnapshotRenameRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/OMSnapshotRenameRequest.java @@ -25,6 +25,8 @@ import java.io.IOException; import java.nio.file.InvalidPathException; + +import org.apache.commons.lang3.tuple.Pair; import org.apache.hadoop.hdds.utils.db.cache.CacheKey; import org.apache.hadoop.hdds.utils.db.cache.CacheValue; import org.apache.hadoop.ozone.OmUtils; @@ -32,6 +34,7 @@ import org.apache.hadoop.ozone.audit.OMAction; import org.apache.hadoop.ozone.om.OmMetadataManagerImpl; import org.apache.hadoop.ozone.om.OzoneManager; +import org.apache.hadoop.ozone.om.ResolvedBucket; import org.apache.hadoop.ozone.om.exceptions.OMException; import org.apache.hadoop.ozone.om.helpers.SnapshotInfo; import org.apache.hadoop.ozone.om.request.OMClientRequest; @@ -75,6 +78,11 @@ public OMRequest preExecute(OzoneManager ozoneManager) throws IOException { String volumeName = renameSnapshotRequest.getVolumeName(); String bucketName = renameSnapshotRequest.getBucketName(); + // Updating the volumeName & bucketName in case the bucket is a linked bucket. We need to do this before a + // permission check, since linked bucket permissions and source bucket permissions could be different. + ResolvedBucket resolvedBucket = ozoneManager.resolveBucketLink(Pair.of(volumeName, bucketName), this); + volumeName = resolvedBucket.realVolume(); + bucketName = resolvedBucket.realBucket(); // Permission check UserGroupInformation ugi = createUGIForApi(); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/OMVolumeCreateRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/OMVolumeCreateRequest.java index 01dbb5ba1e0..a22775107b9 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/OMVolumeCreateRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/OMVolumeCreateRequest.java @@ -20,11 +20,15 @@ import java.io.IOException; import java.nio.file.InvalidPathException; +import java.util.List; import java.util.Map; +import java.util.stream.Collectors; import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Preconditions; +import org.apache.hadoop.ozone.OzoneAcl; +import org.apache.hadoop.security.UserGroupInformation; import org.apache.ratis.server.protocol.TermIndex; import org.apache.hadoop.ozone.OmUtils; import org.apache.hadoop.ozone.om.request.util.OmResponseUtil; @@ -55,6 +59,7 @@ .VolumeInfo; import org.apache.hadoop.util.Time; +import static org.apache.hadoop.ozone.om.helpers.OzoneAclUtil.getDefaultAclList; import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.Resource.VOLUME_LOCK; import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.Resource.USER_LOCK; @@ -160,6 +165,18 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIn volumeList = omMetadataManager.getUserTable().get(dbUserKey); volumeList = addVolumeToOwnerList(volumeList, volume, owner, ozoneManager.getMaxUserVolumeCount(), transactionLogIndex); + + // Add default ACL for volume + List listOfAcls = getDefaultAclList(UserGroupInformation.createRemoteUser(owner), + ozoneManager.getConfiguration()); + // ACLs from VolumeArgs + if (omVolumeArgs.getAcls() != null) { + listOfAcls.addAll(omVolumeArgs.getAcls()); + } + // Remove the duplicates + listOfAcls = listOfAcls.stream().distinct().collect(Collectors.toList()); + omVolumeArgs.setAcls(listOfAcls); + createVolume(omMetadataManager, omVolumeArgs, volumeList, dbVolumeKey, dbUserKey, transactionLogIndex); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/tagging/S3DeleteObjectTaggingResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/tagging/S3DeleteObjectTaggingResponse.java new file mode 100644 index 00000000000..10181c9468f --- /dev/null +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/tagging/S3DeleteObjectTaggingResponse.java @@ -0,0 +1,73 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + *

    + * http://www.apache.org/licenses/LICENSE-2.0 + *

    + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package org.apache.hadoop.ozone.om.response.s3.tagging; + +import jakarta.annotation.Nonnull; +import org.apache.hadoop.hdds.utils.db.BatchOperation; +import org.apache.hadoop.ozone.om.OMMetadataManager; +import org.apache.hadoop.ozone.om.helpers.BucketLayout; +import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; +import org.apache.hadoop.ozone.om.response.CleanupTableInfo; +import org.apache.hadoop.ozone.om.response.key.OmKeyResponse; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse; + +import java.io.IOException; + +import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.KEY_TABLE; + +/** + * Response for delete object tagging request. + */ +@CleanupTableInfo(cleanupTables = {KEY_TABLE}) +public class S3DeleteObjectTaggingResponse extends OmKeyResponse { + + private OmKeyInfo omKeyInfo; + + public S3DeleteObjectTaggingResponse(@Nonnull OMResponse omResponse, + @Nonnull OmKeyInfo omKeyInfo) { + super(omResponse); + this.omKeyInfo = omKeyInfo; + } + + /** + * For when the request is not successful. + * For a successful request, the other constructor should be used. + */ + public S3DeleteObjectTaggingResponse(@Nonnull OMResponse omResponse, + @Nonnull BucketLayout bucketLayout) { + super(omResponse, bucketLayout); + checkStatusNotOK(); + } + + @Override + protected void addToDBBatch(OMMetadataManager omMetadataManager, + BatchOperation batchOperation) throws IOException { + omMetadataManager.getKeyTable(getBucketLayout()).putWithBatch(batchOperation, + omMetadataManager.getOzoneKey( + omKeyInfo.getVolumeName(), + omKeyInfo.getBucketName(), + omKeyInfo.getKeyName()), + omKeyInfo + ); + } + + protected OmKeyInfo getOmKeyInfo() { + return omKeyInfo; + } + +} diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/tagging/S3DeleteObjectTaggingResponseWithFSO.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/tagging/S3DeleteObjectTaggingResponseWithFSO.java new file mode 100644 index 00000000000..bb42668ad05 --- /dev/null +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/tagging/S3DeleteObjectTaggingResponseWithFSO.java @@ -0,0 +1,72 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + *

    + * http://www.apache.org/licenses/LICENSE-2.0 + *

    + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package org.apache.hadoop.ozone.om.response.s3.tagging; + +import jakarta.annotation.Nonnull; +import org.apache.hadoop.hdds.utils.db.BatchOperation; +import org.apache.hadoop.ozone.om.OMMetadataManager; +import org.apache.hadoop.ozone.om.helpers.BucketLayout; +import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; +import org.apache.hadoop.ozone.om.response.CleanupTableInfo; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse; + +import java.io.IOException; + +import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.FILE_TABLE; + +/** + * Response for delete object tagging request for FSO bucket. + */ +@CleanupTableInfo(cleanupTables = {FILE_TABLE}) +public class S3DeleteObjectTaggingResponseWithFSO extends S3DeleteObjectTaggingResponse { + + private long volumeId; + private long bucketId; + + public S3DeleteObjectTaggingResponseWithFSO(@Nonnull OMResponse omResponse, + @Nonnull OmKeyInfo omKeyInfo, + @Nonnull long volumeId, + @Nonnull long bucketId) { + super(omResponse, omKeyInfo); + this.volumeId = volumeId; + this.bucketId = bucketId; + } + + /** + * For when the request is not successful. + * For a successful request, the other constructor should be used. + */ + public S3DeleteObjectTaggingResponseWithFSO(@Nonnull OMResponse omResponse, + @Nonnull BucketLayout bucketLayout) { + super(omResponse, bucketLayout); + } + + @Override + protected void addToDBBatch(OMMetadataManager omMetadataManager, + BatchOperation batchOperation) throws IOException { + String ozoneDbKey = omMetadataManager.getOzonePathKey(volumeId, bucketId, + getOmKeyInfo().getParentObjectID(), getOmKeyInfo().getFileName()); + omMetadataManager.getKeyTable(getBucketLayout()) + .putWithBatch(batchOperation, ozoneDbKey, getOmKeyInfo()); + } + + @Override + public BucketLayout getBucketLayout() { + return BucketLayout.FILE_SYSTEM_OPTIMIZED; + } +} diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/tagging/S3PutObjectTaggingResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/tagging/S3PutObjectTaggingResponse.java new file mode 100644 index 00000000000..2acefe2ec6e --- /dev/null +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/tagging/S3PutObjectTaggingResponse.java @@ -0,0 +1,72 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + *

    + * http://www.apache.org/licenses/LICENSE-2.0 + *

    + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package org.apache.hadoop.ozone.om.response.s3.tagging; + +import jakarta.annotation.Nonnull; +import org.apache.hadoop.hdds.utils.db.BatchOperation; +import org.apache.hadoop.ozone.om.OMMetadataManager; +import org.apache.hadoop.ozone.om.helpers.BucketLayout; +import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; +import org.apache.hadoop.ozone.om.response.CleanupTableInfo; +import org.apache.hadoop.ozone.om.response.key.OmKeyResponse; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse; + +import java.io.IOException; + +import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.KEY_TABLE; + +/** + * Response for put object tagging request. + */ +@CleanupTableInfo(cleanupTables = {KEY_TABLE}) +public class S3PutObjectTaggingResponse extends OmKeyResponse { + + private OmKeyInfo omKeyInfo; + + public S3PutObjectTaggingResponse(@Nonnull OMResponse omResponse, + @Nonnull OmKeyInfo omKeyinfo) { + super(omResponse); + this.omKeyInfo = omKeyinfo; + } + + /** + * For when the request is not successful. + * For a successful request, the other constructor should be used. + */ + public S3PutObjectTaggingResponse(@Nonnull OMResponse omResponse, + @Nonnull BucketLayout bucketLayout) { + super(omResponse, bucketLayout); + checkStatusNotOK(); + } + + @Override + protected void addToDBBatch(OMMetadataManager omMetadataManager, + BatchOperation batchOperation) throws IOException { + omMetadataManager.getKeyTable(getBucketLayout()).putWithBatch(batchOperation, + omMetadataManager.getOzoneKey( + omKeyInfo.getVolumeName(), + omKeyInfo.getBucketName(), + omKeyInfo.getKeyName()), + omKeyInfo + ); + } + + protected OmKeyInfo getOmKeyInfo() { + return omKeyInfo; + } +} diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/tagging/S3PutObjectTaggingResponseWithFSO.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/tagging/S3PutObjectTaggingResponseWithFSO.java new file mode 100644 index 00000000000..6152fbabe89 --- /dev/null +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/tagging/S3PutObjectTaggingResponseWithFSO.java @@ -0,0 +1,72 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + *

    + * http://www.apache.org/licenses/LICENSE-2.0 + *

    + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package org.apache.hadoop.ozone.om.response.s3.tagging; + +import jakarta.annotation.Nonnull; +import org.apache.hadoop.hdds.utils.db.BatchOperation; +import org.apache.hadoop.ozone.om.OMMetadataManager; +import org.apache.hadoop.ozone.om.helpers.BucketLayout; +import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; +import org.apache.hadoop.ozone.om.response.CleanupTableInfo; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse; + +import java.io.IOException; + +import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.FILE_TABLE; + +/** + * Response for put object tagging request for FSO bucket. + */ +@CleanupTableInfo(cleanupTables = {FILE_TABLE}) +public class S3PutObjectTaggingResponseWithFSO extends S3PutObjectTaggingResponse { + + private long volumeId; + private long bucketId; + + public S3PutObjectTaggingResponseWithFSO(@Nonnull OMResponse omResponse, + @Nonnull OmKeyInfo omKeyInfo, + @Nonnull long volumeId, + @Nonnull long bucketId) { + super(omResponse, omKeyInfo); + this.volumeId = volumeId; + this.bucketId = bucketId; + } + + /** + * For when the request is not successful. + * For a successful request, the other constructor should be used. + */ + public S3PutObjectTaggingResponseWithFSO(@Nonnull OMResponse omResponse, + @Nonnull BucketLayout bucketLayout) { + super(omResponse, bucketLayout); + } + + @Override + protected void addToDBBatch(OMMetadataManager omMetadataManager, + BatchOperation batchOperation) throws IOException { + String ozoneDbKey = omMetadataManager.getOzonePathKey(volumeId, bucketId, + getOmKeyInfo().getParentObjectID(), getOmKeyInfo().getFileName()); + omMetadataManager.getKeyTable(getBucketLayout()) + .putWithBatch(batchOperation, ozoneDbKey, getOmKeyInfo()); + } + + @Override + public BucketLayout getBucketLayout() { + return BucketLayout.FILE_SYSTEM_OPTIMIZED; + } +} diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/tagging/package-info.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/tagging/package-info.java new file mode 100644 index 00000000000..9a104c4663a --- /dev/null +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/tagging/package-info.java @@ -0,0 +1,21 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + *

    + * http://www.apache.org/licenses/LICENSE-2.0 + *

    + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +/** + * Package contains classes related to S3 tagging responses. + */ +package org.apache.hadoop.ozone.om.response.s3.tagging; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/AbstractKeyDeletingService.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/AbstractKeyDeletingService.java index d5da77ca0aa..0ac6c986606 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/AbstractKeyDeletingService.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/AbstractKeyDeletingService.java @@ -252,7 +252,7 @@ private int submitPurgeKeysRequest(List results, try { OzoneManagerRatisUtils.submitRequest(ozoneManager, omRequest, clientId, runCount.get()); } catch (ServiceException e) { - LOG.error("PurgeKey request failed. Will retry at next run."); + LOG.error("PurgeKey request failed. Will retry at next run.", e); return 0; } @@ -280,7 +280,7 @@ private void addToMap(Map, List> map, String object protected void submitPurgePaths(List requests, String snapTableKey, - UUID expectedPreviousSnapshotId) { + UUID expectedPreviousSnapshotId, long rnCnt) { OzoneManagerProtocolProtos.PurgeDirectoriesRequest.Builder purgeDirRequest = OzoneManagerProtocolProtos.PurgeDirectoriesRequest.newBuilder(); @@ -305,9 +305,9 @@ protected void submitPurgePaths(List requests, // Submit Purge paths request to OM try { - OzoneManagerRatisUtils.submitRequest(ozoneManager, omRequest, clientId, runCount.get()); + OzoneManagerRatisUtils.submitRequest(ozoneManager, omRequest, clientId, rnCnt); } catch (ServiceException e) { - LOG.error("PurgePaths request failed. Will retry at next run."); + LOG.error("PurgePaths request failed. Will retry at next run.", e); } } @@ -400,7 +400,7 @@ public long optimizeDirDeletesAndSubmitRequest(long remainNum, List purgePathRequestList, String snapTableKey, long startTime, int remainingBufLimit, KeyManager keyManager, - UUID expectedPreviousSnapshotId) { + UUID expectedPreviousSnapshotId, long rnCnt) { // Optimization to handle delete sub-dir and keys to remove quickly // This case will be useful to handle when depth of directory is high @@ -422,6 +422,8 @@ public long optimizeDirDeletesAndSubmitRequest(long remainNum, } consumedSize += request.getSerializedSize(); purgePathRequestList.add(request); + // reduce remain count for self, sub-files, and sub-directories + remainNum = remainNum - 1; remainNum = remainNum - request.getDeletedSubFilesCount(); remainNum = remainNum - request.getMarkDeletedSubDirsCount(); // Count up the purgeDeletedDir, subDirs and subFiles @@ -440,7 +442,7 @@ public long optimizeDirDeletesAndSubmitRequest(long remainNum, } if (!purgePathRequestList.isEmpty()) { - submitPurgePaths(purgePathRequestList, snapTableKey, expectedPreviousSnapshotId); + submitPurgePaths(purgePathRequestList, snapTableKey, expectedPreviousSnapshotId, rnCnt); } if (dirNum != 0 || subDirNum != 0 || subFileNum != 0) { @@ -453,7 +455,7 @@ public long optimizeDirDeletesAndSubmitRequest(long remainNum, "DeletedDirectoryTable, iteration elapsed: {}ms," + " totalRunCount: {}", dirNum, subdirDelNum, subFileNum, (subDirNum - subdirDelNum), - Time.monotonicNow() - startTime, getRunCount()); + Time.monotonicNow() - startTime, rnCnt); } return remainNum; } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/DirectoryDeletingService.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/DirectoryDeletingService.java index b3000515998..a8270f92f2b 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/DirectoryDeletingService.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/DirectoryDeletingService.java @@ -23,6 +23,7 @@ import org.apache.hadoop.hdds.utils.BackgroundTask; import org.apache.hadoop.hdds.utils.BackgroundTaskQueue; import org.apache.hadoop.hdds.utils.BackgroundTaskResult; +import org.apache.hadoop.hdds.utils.IOUtils; import org.apache.hadoop.hdds.utils.db.Table; import org.apache.hadoop.hdds.utils.db.Table.KeyValue; import org.apache.hadoop.hdds.utils.db.TableIterator; @@ -49,6 +50,7 @@ import java.util.UUID; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicInteger; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_PATH_DELETING_LIMIT_PER_TASK; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_PATH_DELETING_LIMIT_PER_TASK_DEFAULT; @@ -74,10 +76,10 @@ public class DirectoryDeletingService extends AbstractKeyDeletingService { public static final Logger LOG = LoggerFactory.getLogger(DirectoryDeletingService.class); - // Use only a single thread for DirDeletion. Multiple threads would read - // or write to same tables and can send deletion requests for same key - // multiple times. - private static final int DIR_DELETING_CORE_POOL_SIZE = 1; + // Using multi thread for DirDeletion. Multiple threads would read + // from parent directory info from deleted directory table concurrently + // and send deletion requests. + private final int dirDeletingCorePoolSize; private static final int MIN_ERR_LIMIT_PER_TASK = 1000; // Number of items(dirs/files) to be batched in an iteration. @@ -86,11 +88,15 @@ public class DirectoryDeletingService extends AbstractKeyDeletingService { private final AtomicBoolean suspended; private AtomicBoolean isRunningOnAOS; + private final DeletedDirSupplier deletedDirSupplier; + + private AtomicInteger taskCount = new AtomicInteger(0); + public DirectoryDeletingService(long interval, TimeUnit unit, long serviceTimeout, OzoneManager ozoneManager, - OzoneConfiguration configuration) { + OzoneConfiguration configuration, int dirDeletingServiceCorePoolSize) { super(DirectoryDeletingService.class.getSimpleName(), interval, unit, - DIR_DELETING_CORE_POOL_SIZE, serviceTimeout, ozoneManager, null); + dirDeletingServiceCorePoolSize, serviceTimeout, ozoneManager, null); this.pathLimitPerTask = configuration .getInt(OZONE_PATH_DELETING_LIMIT_PER_TASK, OZONE_PATH_DELETING_LIMIT_PER_TASK_DEFAULT); @@ -102,6 +108,9 @@ public DirectoryDeletingService(long interval, TimeUnit unit, this.ratisByteLimit = (int) (limit * 0.9); this.suspended = new AtomicBoolean(false); this.isRunningOnAOS = new AtomicBoolean(false); + this.dirDeletingCorePoolSize = dirDeletingServiceCorePoolSize; + deletedDirSupplier = new DeletedDirSupplier(); + taskCount.set(0); } private boolean shouldRun() { @@ -116,6 +125,10 @@ public boolean isRunningOnAOS() { return isRunningOnAOS.get(); } + public AtomicInteger getTaskCount() { + return taskCount; + } + /** * Suspend the service. */ @@ -135,10 +148,55 @@ public void resume() { @Override public BackgroundTaskQueue getTasks() { BackgroundTaskQueue queue = new BackgroundTaskQueue(); - queue.add(new DirectoryDeletingService.DirDeletingTask(this)); + if (taskCount.get() > 0) { + LOG.info("{} Directory deleting task(s) already in progress.", + taskCount.get()); + return queue; + } + try { + deletedDirSupplier.reInitItr(); + } catch (IOException ex) { + LOG.error("Unable to get the iterator.", ex); + return queue; + } + taskCount.set(dirDeletingCorePoolSize); + for (int i = 0; i < dirDeletingCorePoolSize; i++) { + queue.add(new DirectoryDeletingService.DirDeletingTask(this)); + } return queue; } + @Override + public void shutdown() { + super.shutdown(); + deletedDirSupplier.closeItr(); + } + + private final class DeletedDirSupplier { + private TableIterator> + deleteTableIterator; + + private synchronized Table.KeyValue get() + throws IOException { + if (deleteTableIterator.hasNext()) { + return deleteTableIterator.next(); + } + return null; + } + + private synchronized void closeItr() { + IOUtils.closeQuietly(deleteTableIterator); + deleteTableIterator = null; + } + + private synchronized void reInitItr() throws IOException { + closeItr(); + deleteTableIterator = + getOzoneManager().getMetadataManager().getDeletedDirTable() + .iterator(); + } + } + private final class DirDeletingTask implements BackgroundTask { private final DirectoryDeletingService directoryDeletingService; @@ -153,87 +211,93 @@ public int getPriority() { @Override public BackgroundTaskResult call() { - if (shouldRun()) { - if (LOG.isDebugEnabled()) { - LOG.debug("Running DirectoryDeletingService"); - } - isRunningOnAOS.set(true); - getRunCount().incrementAndGet(); - long dirNum = 0L; - long subDirNum = 0L; - long subFileNum = 0L; - long remainNum = pathLimitPerTask; - int consumedSize = 0; - List purgePathRequestList = new ArrayList<>(); - List> allSubDirList - = new ArrayList<>((int) remainNum); - - Table.KeyValue pendingDeletedDirInfo; - - try (TableIterator> - deleteTableIterator = getOzoneManager().getMetadataManager(). - getDeletedDirTable().iterator()) { + try { + if (shouldRun()) { + if (LOG.isDebugEnabled()) { + LOG.debug("Running DirectoryDeletingService"); + } + isRunningOnAOS.set(true); + long rnCnt = getRunCount().incrementAndGet(); + long dirNum = 0L; + long subDirNum = 0L; + long subFileNum = 0L; + long remainNum = pathLimitPerTask; + int consumedSize = 0; + List purgePathRequestList = new ArrayList<>(); + List> allSubDirList = + new ArrayList<>((int) remainNum); + + Table.KeyValue pendingDeletedDirInfo; // This is to avoid race condition b/w purge request and snapshot chain updation. For AOS taking the global // snapshotId since AOS could process multiple buckets in one iteration. - UUID expectedPreviousSnapshotId = - ((OmMetadataManagerImpl)getOzoneManager().getMetadataManager()).getSnapshotChainManager() - .getLatestGlobalSnapshotId(); - - long startTime = Time.monotonicNow(); - while (remainNum > 0 && deleteTableIterator.hasNext()) { - pendingDeletedDirInfo = deleteTableIterator.next(); - // Do not reclaim if the directory is still being referenced by - // the previous snapshot. - if (previousSnapshotHasDir(pendingDeletedDirInfo)) { - continue; - } + try { + UUID expectedPreviousSnapshotId = + ((OmMetadataManagerImpl) getOzoneManager().getMetadataManager()).getSnapshotChainManager() + .getLatestGlobalSnapshotId(); - PurgePathRequest request = prepareDeleteDirRequest( - remainNum, pendingDeletedDirInfo.getValue(), - pendingDeletedDirInfo.getKey(), allSubDirList, - getOzoneManager().getKeyManager()); - if (isBufferLimitCrossed(ratisByteLimit, consumedSize, - request.getSerializedSize())) { - if (purgePathRequestList.size() != 0) { - // if message buffer reaches max limit, avoid sending further - remainNum = 0; + long startTime = Time.monotonicNow(); + while (remainNum > 0) { + pendingDeletedDirInfo = getPendingDeletedDirInfo(); + if (pendingDeletedDirInfo == null) { break; } - // if directory itself is having a lot of keys / files, - // reduce capacity to minimum level - remainNum = MIN_ERR_LIMIT_PER_TASK; - request = prepareDeleteDirRequest( - remainNum, pendingDeletedDirInfo.getValue(), + // Do not reclaim if the directory is still being referenced by + // the previous snapshot. + if (previousSnapshotHasDir(pendingDeletedDirInfo)) { + continue; + } + + PurgePathRequest request = prepareDeleteDirRequest(remainNum, + pendingDeletedDirInfo.getValue(), pendingDeletedDirInfo.getKey(), allSubDirList, getOzoneManager().getKeyManager()); + if (isBufferLimitCrossed(ratisByteLimit, consumedSize, + request.getSerializedSize())) { + if (purgePathRequestList.size() != 0) { + // if message buffer reaches max limit, avoid sending further + remainNum = 0; + break; + } + // if directory itself is having a lot of keys / files, + // reduce capacity to minimum level + remainNum = MIN_ERR_LIMIT_PER_TASK; + request = prepareDeleteDirRequest(remainNum, + pendingDeletedDirInfo.getValue(), + pendingDeletedDirInfo.getKey(), allSubDirList, + getOzoneManager().getKeyManager()); + } + consumedSize += request.getSerializedSize(); + purgePathRequestList.add(request); + // reduce remain count for self, sub-files, and sub-directories + remainNum = remainNum - 1; + remainNum = remainNum - request.getDeletedSubFilesCount(); + remainNum = remainNum - request.getMarkDeletedSubDirsCount(); + // Count up the purgeDeletedDir, subDirs and subFiles + if (request.getDeletedDir() != null && !request.getDeletedDir() + .isEmpty()) { + dirNum++; + } + subDirNum += request.getMarkDeletedSubDirsCount(); + subFileNum += request.getDeletedSubFilesCount(); } - consumedSize += request.getSerializedSize(); - purgePathRequestList.add(request); - remainNum = remainNum - request.getDeletedSubFilesCount(); - remainNum = remainNum - request.getMarkDeletedSubDirsCount(); - // Count up the purgeDeletedDir, subDirs and subFiles - if (request.getDeletedDir() != null - && !request.getDeletedDir().isEmpty()) { - dirNum++; - } - subDirNum += request.getMarkDeletedSubDirsCount(); - subFileNum += request.getDeletedSubFilesCount(); - } + optimizeDirDeletesAndSubmitRequest(remainNum, dirNum, subDirNum, + subFileNum, allSubDirList, purgePathRequestList, null, + startTime, ratisByteLimit - consumedSize, + getOzoneManager().getKeyManager(), expectedPreviousSnapshotId, + rnCnt); - optimizeDirDeletesAndSubmitRequest( - remainNum, dirNum, subDirNum, subFileNum, - allSubDirList, purgePathRequestList, null, startTime, - ratisByteLimit - consumedSize, - getOzoneManager().getKeyManager(), expectedPreviousSnapshotId); - - } catch (IOException e) { - LOG.error("Error while running delete directories and files " + - "background task. Will retry at next run.", e); - } - isRunningOnAOS.set(false); - synchronized (directoryDeletingService) { - this.directoryDeletingService.notify(); + } catch (IOException e) { + LOG.error( + "Error while running delete directories and files " + "background task. Will retry at next run.", + e); + } + isRunningOnAOS.set(false); + synchronized (directoryDeletingService) { + this.directoryDeletingService.notify(); + } } + } finally { + taskCount.getAndDecrement(); } // place holder by returning empty results of this call back. return BackgroundTaskResult.EmptyTaskResult.newResult(); @@ -299,4 +363,9 @@ private boolean previousSnapshotHasDir( } } + public KeyValue getPendingDeletedDirInfo() + throws IOException { + return deletedDirSupplier.get(); + } + } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/OMRangerBGSyncService.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/OMRangerBGSyncService.java index 768c77ad16e..fc6fe2b0c45 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/OMRangerBGSyncService.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/OMRangerBGSyncService.java @@ -390,7 +390,7 @@ public void setOMDBRangerServiceVersion(long version) OzoneManagerRatisUtils.submitRequest(ozoneManager, omRequest, CLIENT_ID, runCount.get()); } catch (ServiceException e) { LOG.error("SetRangerServiceVersion request failed. " - + "Will retry at next run."); + + "Will retry at next run.", e); throw e; } } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/OpenKeyCleanupService.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/OpenKeyCleanupService.java index c0d958f6121..6d53e48a0fd 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/OpenKeyCleanupService.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/OpenKeyCleanupService.java @@ -25,6 +25,7 @@ import org.apache.hadoop.hdds.utils.BackgroundTask; import org.apache.hadoop.hdds.utils.BackgroundTaskQueue; import org.apache.hadoop.hdds.utils.BackgroundTaskResult; +import org.apache.hadoop.ozone.ClientVersion; import org.apache.hadoop.ozone.OzoneConfigKeys; import org.apache.hadoop.ozone.om.ExpiredOpenKeys; import org.apache.hadoop.ozone.om.KeyManager; @@ -77,7 +78,7 @@ public class OpenKeyCleanupService extends BackgroundService { private final Duration leaseThreshold; private final int cleanupLimitPerTask; private final AtomicLong submittedOpenKeyCount; - private final AtomicLong runCount; + private final AtomicLong callId; private final AtomicBoolean suspended; public OpenKeyCleanupService(long interval, TimeUnit unit, long timeout, @@ -112,20 +113,10 @@ public OpenKeyCleanupService(long interval, TimeUnit unit, long timeout, OMConfigKeys.OZONE_OM_OPEN_KEY_CLEANUP_LIMIT_PER_TASK_DEFAULT); this.submittedOpenKeyCount = new AtomicLong(0); - this.runCount = new AtomicLong(0); + this.callId = new AtomicLong(0); this.suspended = new AtomicBoolean(false); } - /** - * Returns the number of times this Background service has run. - * - * @return Long, run count. - */ - @VisibleForTesting - public long getRunCount() { - return runCount.get(); - } - /** * Suspend the service (for testing). */ @@ -189,7 +180,6 @@ public BackgroundTaskResult call() throws Exception { if (!shouldRun()) { return BackgroundTaskResult.EmptyTaskResult.newResult(); } - runCount.incrementAndGet(); long startTime = Time.monotonicNow(); final ExpiredOpenKeys expiredOpenKeys; try { @@ -244,6 +234,7 @@ private OMRequest createCommitKeyRequest( .setCmdType(Type.CommitKey) .setCommitKeyRequest(request) .setClientId(clientId.toString()) + .setVersion(ClientVersion.CURRENT_VERSION) .build(); } @@ -265,7 +256,7 @@ private OMRequest createDeleteOpenKeysRequest( private OMResponse submitRequest(OMRequest omRequest) { try { - return OzoneManagerRatisUtils.submitRequest(ozoneManager, omRequest, clientId, runCount.get()); + return OzoneManagerRatisUtils.submitRequest(ozoneManager, omRequest, clientId, callId.incrementAndGet()); } catch (ServiceException e) { LOG.error("Open key " + omRequest.getCmdType() + " request failed. Will retry at next run.", e); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/SnapshotDiffManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/SnapshotDiffManager.java index db6d9b7b908..8add87f0633 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/SnapshotDiffManager.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/SnapshotDiffManager.java @@ -284,7 +284,8 @@ private boolean initNativeLibraryForEfficientDiff(final OzoneConfiguration conf) try { return ManagedRawSSTFileReader.loadLibrary(); } catch (NativeLibraryNotLoadedException e) { - LOG.error("Native Library for raw sst file reading loading failed.", e); + LOG.warn("Native Library for raw sst file reading loading failed." + + " Fallback to performing a full diff instead. {}", e.getMessage()); return false; } } @@ -1031,8 +1032,10 @@ private void getDeltaFilesAndDiffKeysToObjectIdToKeyMap( // tombstone is not loaded. // TODO: [SNAPSHOT] Update Rocksdb SSTFileIterator to read tombstone if (skipNativeDiff || !isNativeLibsLoaded) { - deltaFiles.addAll(getSSTFileListForSnapshot(fromSnapshot, - tablesToLookUp)); + Set inputFiles = getSSTFileListForSnapshot(fromSnapshot, tablesToLookUp); + ManagedRocksDB fromDB = ((RDBStore)fromSnapshot.getMetadataManager().getStore()).getDb().getManagedRocksDb(); + RocksDiffUtils.filterRelevantSstFiles(inputFiles, tablePrefixes, fromDB); + deltaFiles.addAll(inputFiles); } addToObjectIdMap(fsTable, tsTable, deltaFiles, !skipNativeDiff && isNativeLibsLoaded, @@ -1124,7 +1127,7 @@ Set getDeltaFiles(OmSnapshot fromSnapshot, String diffDir) throws IOException { // TODO: [SNAPSHOT] Refactor the parameter list - final Set deltaFiles = new HashSet<>(); + Optional> deltaFiles = Optional.empty(); // Check if compaction DAG is available, use that if so if (differ != null && fsInfo != null && tsInfo != null && !useFullDiff) { @@ -1138,40 +1141,36 @@ Set getDeltaFiles(OmSnapshot fromSnapshot, LOG.debug("Calling RocksDBCheckpointDiffer"); try { - List sstDiffList = differ.getSSTDiffListWithFullPath(toDSI, - fromDSI, diffDir); - deltaFiles.addAll(sstDiffList); + deltaFiles = differ.getSSTDiffListWithFullPath(toDSI, fromDSI, diffDir).map(HashSet::new); } catch (Exception exception) { LOG.warn("Failed to get SST diff file using RocksDBCheckpointDiffer. " + "It will fallback to full diff now.", exception); } } - if (useFullDiff || deltaFiles.isEmpty()) { + if (useFullDiff || !deltaFiles.isPresent()) { // If compaction DAG is not available (already cleaned up), fall back to // the slower approach. if (!useFullDiff) { LOG.warn("RocksDBCheckpointDiffer is not available, falling back to" + " slow path"); } - - Set fromSnapshotFiles = - RdbUtil.getSSTFilesForComparison( - ((RDBStore)fromSnapshot.getMetadataManager().getStore()) - .getDb().getManagedRocksDb(), - tablesToLookUp); - Set toSnapshotFiles = - RdbUtil.getSSTFilesForComparison( - ((RDBStore)toSnapshot.getMetadataManager().getStore()).getDb() - .getManagedRocksDb(), - tablesToLookUp); - - deltaFiles.addAll(fromSnapshotFiles); - deltaFiles.addAll(toSnapshotFiles); - RocksDiffUtils.filterRelevantSstFiles(deltaFiles, tablePrefixes); + ManagedRocksDB fromDB = ((RDBStore)fromSnapshot.getMetadataManager().getStore()) + .getDb().getManagedRocksDb(); + ManagedRocksDB toDB = ((RDBStore)toSnapshot.getMetadataManager().getStore()) + .getDb().getManagedRocksDb(); + Set fromSnapshotFiles = getSSTFileListForSnapshot(fromSnapshot, tablesToLookUp); + Set toSnapshotFiles = getSSTFileListForSnapshot(toSnapshot, tablesToLookUp); + Set diffFiles = new HashSet<>(); + diffFiles.addAll(fromSnapshotFiles); + diffFiles.addAll(toSnapshotFiles); + RocksDiffUtils.filterRelevantSstFiles(diffFiles, tablePrefixes, fromDB, toDB); + deltaFiles = Optional.of(diffFiles); } - return deltaFiles; + return deltaFiles.orElseThrow(() -> + new IOException("Error getting diff files b/w " + fromSnapshot.getSnapshotTableKey() + " and " + + toSnapshot.getSnapshotTableKey())); } private void validateEstimatedKeyChangesAreInLimits( diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/SnapshotUtils.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/SnapshotUtils.java index 201a9fe0c9c..0ac504246f6 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/SnapshotUtils.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/SnapshotUtils.java @@ -46,12 +46,10 @@ import java.util.UUID; import static org.apache.hadoop.ozone.OzoneConsts.OM_KEY_PREFIX; -import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.INVALID_SNAPSHOT_ERROR; import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.DIRECTORY_TABLE; import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.FILE_TABLE; import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.KEY_TABLE; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.FILE_NOT_FOUND; -import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.KEY_NOT_FOUND; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.TIMEOUT; /** @@ -88,7 +86,7 @@ public static SnapshotInfo getSnapshotInfo(final OzoneManager ozoneManager, } if (snapshotInfo == null) { throw new OMException("Snapshot '" + snapshotKey + "' is not found.", - KEY_NOT_FOUND); + FILE_NOT_FOUND); } return snapshotInfo; } @@ -164,7 +162,7 @@ public static SnapshotInfo getNextSnapshot(OzoneManager ozoneManager, // is removed in-memory but OMDoubleBuffer has not flushed yet. if (snapInfo == null) { throw new OMException("Provided Snapshot Info argument is null. Cannot get the next snapshot for a null value", - INVALID_SNAPSHOT_ERROR); + FILE_NOT_FOUND); } try { if (chainManager.hasNextPathSnapshot(snapInfo.getSnapshotPath(), @@ -201,7 +199,7 @@ private static UUID getPreviousSnapshotId(SnapshotInfo snapInfo, SnapshotChainMa // is removed in-memory but OMDoubleBuffer has not flushed yet. if (snapInfo == null) { throw new OMException("Provided Snapshot Info argument is null. Cannot get the previous snapshot for a null " + - "value", INVALID_SNAPSHOT_ERROR); + "value", FILE_NOT_FOUND); } try { if (chainManager.hasPreviousPathSnapshot(snapInfo.getSnapshotPath(), diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/upgrade/OMLayoutFeature.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/upgrade/OMLayoutFeature.java index 5a62a7cfc62..e5d9901fda1 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/upgrade/OMLayoutFeature.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/upgrade/OMLayoutFeature.java @@ -46,7 +46,8 @@ public enum OMLayoutFeature implements LayoutFeature { FILESYSTEM_SNAPSHOT(5, "Ozone version supporting snapshot"), QUOTA(6, "Ozone quota re-calculate"), - HBASE_SUPPORT(7, "Full support of hsync, lease recovery and listOpenFiles APIs for HBase"); + HBASE_SUPPORT(7, "Full support of hsync, lease recovery and listOpenFiles APIs for HBase"), + DELEGATION_TOKEN_SYMMETRIC_SIGN(8, "Delegation token signed by symmetric key"); /////////////////////////////// ///////////////////////////// // Example OM Layout Feature with Actions diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerProtocolServerSideTranslatorPB.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerProtocolServerSideTranslatorPB.java index 4506337e54d..6b55b7384bd 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerProtocolServerSideTranslatorPB.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerProtocolServerSideTranslatorPB.java @@ -176,9 +176,7 @@ public OMResponse processRequest(OMRequest request) throws ServiceException { return response; } - private OMResponse internalProcessRequest(OMRequest request) throws - ServiceException { - OMClientRequest omClientRequest = null; + private OMResponse internalProcessRequest(OMRequest request) throws ServiceException { boolean s3Auth = false; try { @@ -207,7 +205,16 @@ private OMResponse internalProcessRequest(OMRequest request) throws if (!s3Auth) { OzoneManagerRatisUtils.checkLeaderStatus(ozoneManager); } - OMRequest requestToSubmit; + + // check retry cache + final OMResponse cached = omRatisServer.checkRetryCache(); + if (cached != null) { + return cached; + } + + // process new request + OMClientRequest omClientRequest = null; + final OMRequest requestToSubmit; try { omClientRequest = createClientRequest(request, ozoneManager); // TODO: Note: Due to HDDS-6055, createClientRequest() could now @@ -215,6 +222,7 @@ private OMResponse internalProcessRequest(OMRequest request) throws // Added the assertion. assert (omClientRequest != null); OMClientRequest finalOmClientRequest = omClientRequest; + requestToSubmit = preExecute(finalOmClientRequest); this.lastRequestToSubmit = requestToSubmit; } catch (IOException ex) { @@ -225,7 +233,7 @@ private OMResponse internalProcessRequest(OMRequest request) throws return createErrorResponse(request, ex); } - OMResponse response = submitRequestToRatis(requestToSubmit); + final OMResponse response = omRatisServer.submitRequest(requestToSubmit); if (!response.getSuccess()) { omClientRequest.handleRequestFailure(ozoneManager); } @@ -246,14 +254,6 @@ public OMRequest getLastRequestToSubmit() { return lastRequestToSubmit; } - /** - * Submits request to OM's Ratis server. - */ - private OMResponse submitRequestToRatis(OMRequest request) - throws ServiceException { - return omRatisServer.submitRequest(request); - } - private OMResponse submitReadRequestToOM(OMRequest request) throws ServiceException { // Check if this OM is the leader. diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerRequestHandler.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerRequestHandler.java index 5682b040e85..ab1f68d9928 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerRequestHandler.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerRequestHandler.java @@ -21,6 +21,7 @@ import java.util.ArrayList; import java.util.HashSet; import java.util.List; +import java.util.Map; import java.util.Objects; import java.util.UUID; import java.util.stream.Collectors; @@ -42,6 +43,9 @@ import org.apache.hadoop.hdds.utils.FaultInjector; import org.apache.hadoop.ozone.OzoneAcl; import org.apache.hadoop.ozone.om.helpers.OMAuditLogger; +import org.apache.hadoop.ozone.om.helpers.KeyValueUtil; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.GetObjectTaggingRequest; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.GetObjectTaggingResponse; import org.apache.hadoop.ozone.util.PayloadUtils; import org.apache.hadoop.ozone.om.OzoneManager; import org.apache.hadoop.ozone.om.OzoneManagerPrepareState; @@ -142,6 +146,8 @@ import com.google.common.collect.Lists; +import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_SERVER_LIST_MAX_SIZE; +import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_SERVER_LIST_MAX_SIZE_DEFAULT; import static org.apache.hadoop.ozone.om.upgrade.OMLayoutFeature.HBASE_SUPPORT; import static org.apache.hadoop.ozone.om.upgrade.OMLayoutFeature.MULTITENANCY_SCHEMA; import static org.apache.hadoop.ozone.om.upgrade.OMLayoutFeature.FILESYSTEM_SNAPSHOT; @@ -177,9 +183,16 @@ public class OzoneManagerRequestHandler implements RequestHandler { LoggerFactory.getLogger(OzoneManagerRequestHandler.class); private final OzoneManager impl; private FaultInjector injector; + private long maxKeyListSize; + public OzoneManagerRequestHandler(OzoneManager om) { this.impl = om; + this.maxKeyListSize = om.getConfiguration().getLong(OZONE_OM_SERVER_LIST_MAX_SIZE, + OZONE_OM_SERVER_LIST_MAX_SIZE_DEFAULT); + if (this.maxKeyListSize <= 0) { + this.maxKeyListSize = OZONE_OM_SERVER_LIST_MAX_SIZE_DEFAULT; + } } //TODO simplify it to make it shorter @@ -385,6 +398,11 @@ public OMResponse handleReadRequest(OMRequest request) { startQuotaRepair(request.getStartQuotaRepairRequest()); responseBuilder.setStartQuotaRepairResponse(startQuotaRepairRsp); break; + case GetObjectTagging: + OzoneManagerProtocolProtos.GetObjectTaggingResponse getObjectTaggingResponse = + getObjectTagging(request.getGetObjectTaggingRequest()); + responseBuilder.setGetObjectTaggingResponse(getObjectTaggingResponse); + break; default: responseBuilder.setSuccess(false); responseBuilder.setMessage("Unrecognized Command Type: " + cmdType); @@ -736,7 +754,7 @@ private ListKeysResponse listKeys(ListKeysRequest request, int clientVersion) request.getBucketName(), request.getStartKey(), request.getPrefix(), - request.getCount()); + (int)Math.min(this.maxKeyListSize, request.getCount())); for (OmKeyInfo key : listKeysResult.getKeys()) { resp.addKeyInfo(key.getProtobuf(true, clientVersion)); } @@ -754,7 +772,7 @@ private ListKeysLightResponse listKeysLight(ListKeysRequest request) request.getBucketName(), request.getStartKey(), request.getPrefix(), - request.getCount()); + (int)Math.min(this.maxKeyListSize, request.getCount())); for (BasicOmKeyInfo key : listKeysLightResult.getKeys()) { resp.addBasicKeyInfo(key.getProtobuf()); } @@ -1225,7 +1243,7 @@ private ListStatusResponse listStatus( request.hasAllowPartialPrefix() && request.getAllowPartialPrefix(); List statuses = impl.listStatus(omKeyArgs, request.getRecursive(), - request.getStartKey(), request.getNumEntries(), + request.getStartKey(), Math.min(this.maxKeyListSize, request.getNumEntries()), allowPartialPrefixes); ListStatusResponse.Builder listStatusResponseBuilder = @@ -1251,7 +1269,7 @@ private ListStatusLightResponse listStatusLight( request.hasAllowPartialPrefix() && request.getAllowPartialPrefix(); List statuses = impl.listStatusLight(omKeyArgs, request.getRecursive(), - request.getStartKey(), request.getNumEntries(), + request.getStartKey(), Math.min(this.maxKeyListSize, request.getNumEntries()), allowPartialPrefixes); ListStatusLightResponse.Builder listStatusLightResponseBuilder = @@ -1479,7 +1497,7 @@ private OzoneManagerProtocolProtos.ListSnapshotResponse getSnapshots( throws IOException { ListSnapshotResponse implResponse = impl.listSnapshot( request.getVolumeName(), request.getBucketName(), request.getPrefix(), - request.getPrevSnapshot(), request.getMaxListResult()); + request.getPrevSnapshot(), (int)Math.min(request.getMaxListResult(), maxKeyListSize)); List snapshotInfoList = implResponse.getSnapshotInfos() .stream().map(SnapshotInfo::getProtobuf).collect(Collectors.toList()); @@ -1508,6 +1526,24 @@ private SetSafeModeResponse setSafeMode( .build(); } + private GetObjectTaggingResponse getObjectTagging(GetObjectTaggingRequest request) + throws IOException { + KeyArgs keyArgs = request.getKeyArgs(); + OmKeyArgs omKeyArgs = new OmKeyArgs.Builder() + .setVolumeName(keyArgs.getVolumeName()) + .setBucketName(keyArgs.getBucketName()) + .setKeyName(keyArgs.getKeyName()) + .build(); + + GetObjectTaggingResponse.Builder resp = + GetObjectTaggingResponse.newBuilder(); + + Map result = impl.getObjectTagging(omKeyArgs); + + resp.addAllTags(KeyValueUtil.toProtobuf(result)); + return resp.build(); + } + private SafeModeAction toSafeModeAction( OzoneManagerProtocolProtos.SafeMode safeMode) { switch (safeMode) { diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/security/OzoneDelegationTokenSecretManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/security/OzoneDelegationTokenSecretManager.java index a6fe61eb480..420cb6c6dcb 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/security/OzoneDelegationTokenSecretManager.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/security/OzoneDelegationTokenSecretManager.java @@ -25,13 +25,19 @@ import java.security.cert.X509Certificate; import java.util.Iterator; import java.util.Map; +import java.util.UUID; import java.util.concurrent.ConcurrentHashMap; +import com.google.common.annotations.VisibleForTesting; +import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.hdds.annotation.InterfaceAudience; import org.apache.hadoop.hdds.annotation.InterfaceStability; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.security.OzoneSecretManager; import org.apache.hadoop.hdds.security.SecurityConfig; +import org.apache.hadoop.hdds.security.exception.SCMSecurityException; +import org.apache.hadoop.hdds.security.symmetric.ManagedSecretKey; +import org.apache.hadoop.hdds.security.symmetric.SecretKeyClient; import org.apache.hadoop.hdds.security.x509.certificate.client.CertificateClient; import org.apache.hadoop.hdds.security.x509.exception.CertificateException; import org.apache.hadoop.io.Text; @@ -41,6 +47,7 @@ import org.apache.hadoop.ozone.om.exceptions.OMException; import org.apache.hadoop.ozone.om.exceptions.OMLeaderNotReadyException; import org.apache.hadoop.ozone.om.exceptions.OMNotLeaderException; +import org.apache.hadoop.ozone.om.upgrade.OMLayoutFeature; import org.apache.hadoop.ozone.security.OzoneSecretStore.OzoneManagerSecretState; import org.apache.hadoop.ozone.security.OzoneTokenIdentifier.TokenInfo; import org.apache.hadoop.security.AccessControlException; @@ -64,7 +71,7 @@ public class OzoneDelegationTokenSecretManager extends OzoneSecretManager { - private static final Logger LOG = LoggerFactory + public static final Logger LOG = LoggerFactory .getLogger(OzoneDelegationTokenSecretManager.class); private final Map currentTokens; private final OzoneSecretStore store; @@ -73,6 +80,7 @@ public class OzoneDelegationTokenSecretManager private final long tokenRemoverScanInterval; private final String omServiceId; private final OzoneManager ozoneManager; + private SecretKeyClient secretKeyClient; /** * If the delegation token update thread holds this lock, it will not get @@ -100,8 +108,8 @@ public OzoneDelegationTokenSecretManager(Builder b) throws IOException { isRatisEnabled = b.ozoneConf.getBoolean( OMConfigKeys.OZONE_OM_RATIS_ENABLE_KEY, OMConfigKeys.OZONE_OM_RATIS_ENABLE_DEFAULT); + this.secretKeyClient = b.secretKeyClient; loadTokenSecretState(store.loadState()); - } /** @@ -117,6 +125,7 @@ public static class Builder { private CertificateClient certClient; private String omServiceId; private OzoneManager ozoneManager; + private SecretKeyClient secretKeyClient; public OzoneDelegationTokenSecretManager build() throws IOException { return new OzoneDelegationTokenSecretManager(this); @@ -157,6 +166,11 @@ public Builder setCertificateClient(CertificateClient certificateClient) { return this; } + public Builder setSecretKeyClient(SecretKeyClient client) { + this.secretKeyClient = client; + return this; + } + public Builder setOmServiceId(String serviceId) { this.omServiceId = serviceId; return this; @@ -195,9 +209,15 @@ public Token createToken(Text owner, Text renewer, OzoneTokenIdentifier identifier = createIdentifier(owner, renewer, realUser); updateIdentifierDetails(identifier); - - byte[] password = createPassword(identifier.getBytes(), - getCurrentKey().getPrivateKey()); + byte[] password; + if (ozoneManager.getVersionManager().isAllowed(OMLayoutFeature.DELEGATION_TOKEN_SYMMETRIC_SIGN)) { + ManagedSecretKey currentSecretKey = secretKeyClient.getCurrentSecretKey(); + identifier.setSecretKeyId(currentSecretKey.getId().toString()); + password = currentSecretKey.sign(identifier.getBytes()); + } else { + identifier.setOmCertSerialId(getCertSerialId()); + password = createPassword(identifier.getBytes(), getCurrentKey().getPrivateKey()); + } long expiryTime = identifier.getIssueDate() + getTokenRenewInterval(); // For HA ratis will take care of updating. @@ -252,7 +272,6 @@ private void updateIdentifierDetails(OzoneTokenIdentifier identifier) { identifier.setMasterKeyId(getCurrentKey().getKeyId()); identifier.setSequenceNumber(sequenceNum); identifier.setMaxDate(now + getTokenMaxLifetime()); - identifier.setOmCertSerialId(getCertSerialId()); identifier.setOmServiceId(getOmServiceId()); } @@ -433,9 +452,29 @@ private TokenInfo validateToken(OzoneTokenIdentifier identifier) /** * Validates if given hash is valid. + * HDDS-8829 changes the delegation token from sign by OM's RSA private key to secret key supported by SCM. + * The default delegation token lifetime is 7 days. + * In the 7 days period after OM is upgraded from version without HDDS-8829 to version with HDDS-8829, tokens + * signed by RSA private key, and tokens signed by secret key will coexist. After 7 days, there will be only + * tokens signed by secrete key still valid. Following logic will handle both types of tokens. */ public boolean verifySignature(OzoneTokenIdentifier identifier, byte[] password) { + String secretKeyId = identifier.getSecretKeyId(); + if (StringUtils.isNotEmpty(secretKeyId)) { + try { + ManagedSecretKey verifyKey = secretKeyClient.getSecretKey(UUID.fromString(secretKeyId)); + return verifyKey.isValidSignature(identifier.getBytes(), password); + } catch (SCMSecurityException e) { + LOG.error("verifySignature for identifier {} failed", identifier, e); + return false; + } + } + + if (LOG.isDebugEnabled()) { + LOG.debug("Verify an asymmetric key signed Token {}", identifier); + } + X509Certificate signerCert; try { signerCert = getCertClient().getCertificate( @@ -511,6 +550,14 @@ private byte[] validateS3AuthInfo(OzoneTokenIdentifier identifier) } + /** + * Load delegation tokens from DB into memory. + * HDDS-8829 changes the delegation token from sign by OM's RSA private key to secret key supported by SCM. + * The default delegation token lifetime is 7 days. After OM is upgraded from version without HDDS-8829 to + * version with HDDS-8829 and restarts, tokens signed by RSA private key will be loaded from DB into memory. + * Next OM restarts, if after 7 days, there will be only tokens signed by secret key loaded into memory. + * Both types of token loading should be supported. + */ private void loadTokenSecretState( OzoneManagerSecretState state) throws IOException { LOG.info("Loading token state into token manager."); @@ -528,8 +575,17 @@ private void addPersistedDelegationToken(OzoneTokenIdentifier identifier, "Can't add persisted delegation token to a running SecretManager."); } - byte[] password = createPassword(identifier.getBytes(), - getCertClient().getPrivateKey()); + byte[] password; + if (StringUtils.isNotEmpty(identifier.getSecretKeyId())) { + ManagedSecretKey signKey = secretKeyClient.getSecretKey(UUID.fromString(identifier.getSecretKeyId())); + password = signKey.sign(identifier.getBytes()); + } else { + if (LOG.isDebugEnabled()) { + LOG.debug("Load an asymmetric key signed Token {}", identifier); + } + password = createPassword(identifier.getBytes(), getCertClient().getPrivateKey()); + } + if (identifier.getSequenceNumber() > getDelegationTokenSeqNum()) { setDelegationTokenSeqNum(identifier.getSequenceNumber()); } @@ -588,6 +644,11 @@ public void stop() throws IOException { } } + @VisibleForTesting + public void setSecretKeyClient(SecretKeyClient client) { + this.secretKeyClient = client; + } + /** * Remove expired delegation tokens from cache and persisted store. */ diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/OmTestManagers.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/OmTestManagers.java index edffd5ed74e..c7a14bb6eed 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/OmTestManagers.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/OmTestManagers.java @@ -28,6 +28,7 @@ import org.apache.hadoop.hdds.scm.protocol.ScmBlockLocationProtocol; import org.apache.hadoop.hdds.scm.protocol.StorageContainerLocationProtocol; import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem; +import org.apache.hadoop.ozone.client.OzoneClient; import org.apache.hadoop.ozone.client.OzoneClientFactory; import org.apache.hadoop.ozone.om.protocol.OzoneManagerProtocol; import org.apache.hadoop.hdds.security.token.OzoneBlockTokenSecretManager; @@ -52,6 +53,7 @@ public final class OmTestManagers { private final BucketManager bucketManager; private final PrefixManager prefixManager; private final ScmBlockLocationProtocol scmBlockClient; + private final OzoneClient rpcClient; public OzoneManager getOzoneManager() { return om; @@ -77,6 +79,9 @@ public KeyManager getKeyManager() { public ScmBlockLocationProtocol getScmBlockClient() { return scmBlockClient; } + public OzoneClient getRpcClient() { + return rpcClient; + } public OmTestManagers(OzoneConfiguration conf) throws AuthenticationException, IOException, InterruptedException, TimeoutException { @@ -121,7 +126,8 @@ public OmTestManagers(OzoneConfiguration conf, waitFor(() -> om.getOmRatisServer().checkLeaderStatus() == RaftServerStatus.LEADER_AND_READY, 10, 10_000); - writeClient = OzoneClientFactory.getRpcClient(conf) + rpcClient = OzoneClientFactory.getRpcClient(conf); + writeClient = rpcClient .getObjectStore().getClientProxy().getOzoneManagerClient(); metadataManager = (OmMetadataManagerImpl) HddsWhiteboxTestUtils .getInternalState(om, "metadataManager"); diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOMDBDefinition.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOMDBDefinition.java index 36245dc8741..680853cdc32 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOMDBDefinition.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOMDBDefinition.java @@ -27,13 +27,14 @@ import org.junit.jupiter.api.io.TempDir; import java.io.File; +import java.io.IOException; import java.nio.file.Path; import java.util.ArrayList; import java.util.Collection; +import java.util.List; /** - * Test that all the tables are covered both by OMDBDefinition - * as well as OmMetadataManagerImpl. + * Test that all the tables are covered both by OMDBDefinition and OmMetadataManagerImpl. */ public class TestOMDBDefinition { @@ -41,33 +42,33 @@ public class TestOMDBDefinition { private Path folder; @Test - public void testDBDefinition() throws Exception { + public void testDBDefinition() throws IOException { OzoneConfiguration configuration = new OzoneConfiguration(); File metaDir = folder.toFile(); - DBStore store = OmMetadataManagerImpl.loadDB(configuration, metaDir); - OMDBDefinition dbDef = new OMDBDefinition(); + OMDBDefinition dbDef = OMDBDefinition.get(); // Get list of tables from DB Definitions - final Collection> columnFamilyDefinitions - = dbDef.getColumnFamilies(); + final Collection> columnFamilyDefinitions = dbDef.getColumnFamilies(); final int countOmDefTables = columnFamilyDefinitions.size(); - ArrayList missingDBDefTables = new ArrayList<>(); + List missingDBDefTables = new ArrayList<>(); - // Get list of tables from the RocksDB Store - final Collection missingOmDBTables = new ArrayList<>(store.getTableNames().values()); - missingOmDBTables.remove("default"); - int countOmDBTables = missingOmDBTables.size(); - // Remove the file if it is found in both the datastructures - for (DBColumnFamilyDefinition definition : columnFamilyDefinitions) { - if (!missingOmDBTables.remove(definition.getName())) { - missingDBDefTables.add(definition.getName()); + try (DBStore store = OmMetadataManagerImpl.loadDB(configuration, metaDir, -1)) { + // Get list of tables from the RocksDB Store + final Collection missingOmDBTables = new ArrayList<>(store.getTableNames().values()); + missingOmDBTables.remove("default"); + int countOmDBTables = missingOmDBTables.size(); + // Remove the file if it is found in both the datastructures + for (DBColumnFamilyDefinition definition : columnFamilyDefinitions) { + if (!missingOmDBTables.remove(definition.getName())) { + missingDBDefTables.add(definition.getName()); + } } - } - assertEquals(0, missingDBDefTables.size(), - "Tables in OmMetadataManagerImpl are:" + missingDBDefTables); - assertEquals(0, missingOmDBTables.size(), - "Tables missing in OMDBDefinition are:" + missingOmDBTables); - assertEquals(countOmDBTables, countOmDefTables); + assertEquals(0, missingDBDefTables.size(), + "Tables in OmMetadataManagerImpl are:" + missingDBDefTables); + assertEquals(0, missingOmDBTables.size(), + "Tables missing in OMDBDefinition are:" + missingOmDBTables); + assertEquals(countOmDBTables, countOmDefTables); + } } } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOMMultiTenantManager.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOMMultiTenantManager.java index a4ced424522..57ac3f29078 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOMMultiTenantManager.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOMMultiTenantManager.java @@ -131,7 +131,8 @@ public void testMultiTenancyRequestsWhenDisabled() throws IOException { final OzoneManager ozoneManager = mock(OzoneManager.class); doCallRealMethod().when(ozoneManager).checkS3MultiTenancyEnabled(); - + final OzoneConfiguration conf = new OzoneConfiguration(); + when(ozoneManager.getConfiguration()).thenReturn(conf); when(ozoneManager.isS3MultiTenancyEnabled()).thenReturn(false); final String tenantId = "test-tenant"; diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOmSnapshotManager.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOmSnapshotManager.java index ebc6bc6cb6c..1d00ec614cd 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOmSnapshotManager.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOmSnapshotManager.java @@ -25,6 +25,7 @@ import org.apache.hadoop.hdds.utils.db.DBStore; import org.apache.hadoop.hdds.utils.db.RDBBatchOperation; import org.apache.hadoop.hdds.utils.db.Table; +import org.apache.hadoop.ozone.om.exceptions.OMException; import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs; import org.apache.hadoop.ozone.om.helpers.SnapshotInfo; @@ -72,6 +73,7 @@ import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertFalse; import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.junit.jupiter.api.Assertions.assertThrows; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.timeout; import static org.mockito.Mockito.times; @@ -314,6 +316,29 @@ public void testHardLinkCreation() throws IOException { getINode(f1FileLink.toPath()), "link matches original file"); } + + @Test + public void testGetSnapshotInfo() throws IOException { + SnapshotInfo s1 = createSnapshotInfo("vol", "buck"); + UUID latestGlobalSnapId = + ((OmMetadataManagerImpl) om.getMetadataManager()).getSnapshotChainManager() + .getLatestGlobalSnapshotId(); + UUID latestPathSnapId = + ((OmMetadataManagerImpl) om.getMetadataManager()).getSnapshotChainManager() + .getLatestPathSnapshotId(String.join("/", "vol", "buck")); + s1.setPathPreviousSnapshotId(latestPathSnapId); + s1.setGlobalPreviousSnapshotId(latestGlobalSnapId); + ((OmMetadataManagerImpl) om.getMetadataManager()).getSnapshotChainManager() + .addSnapshot(s1); + OMException ome = assertThrows(OMException.class, + () -> om.getOmSnapshotManager().getSnapshot(s1.getSnapshotId())); + assertEquals(OMException.ResultCodes.FILE_NOT_FOUND, ome.getResult()); + // not present in snapshot chain too + SnapshotInfo s2 = createSnapshotInfo("vol", "buck"); + ome = assertThrows(OMException.class, + () -> om.getOmSnapshotManager().getSnapshot(s2.getSnapshotId())); + assertEquals(OMException.ResultCodes.FILE_NOT_FOUND, ome.getResult()); + } /* * Test that exclude list is generated correctly. */ diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOzoneConfigUtil.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOzoneConfigUtil.java index 0bd99d49499..41d6c28e2b9 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOzoneConfigUtil.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOzoneConfigUtil.java @@ -20,16 +20,10 @@ import org.apache.hadoop.hdds.client.ECReplicationConfig; import org.apache.hadoop.hdds.client.RatisReplicationConfig; import org.apache.hadoop.hdds.client.ReplicationConfig; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; -import org.apache.hadoop.ozone.OzoneConfigKeys; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; -import java.io.IOException; -import java.util.Arrays; - -import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; @@ -119,42 +113,4 @@ public void testResolveClientSideRepConfigWhenBucketHasEC3() // should return ratis. assertEquals(ratisReplicationConfig, replicationConfig); } - - @Test - public void testS3AdminExtraction() throws IOException { - OzoneConfiguration configuration = new OzoneConfiguration(); - configuration.set(OzoneConfigKeys.OZONE_S3_ADMINISTRATORS, "alice,bob"); - - assertThat(OzoneConfigUtil.getS3AdminsFromConfig(configuration)) - .containsAll(Arrays.asList("alice", "bob")); - } - - @Test - public void testS3AdminExtractionWithFallback() throws IOException { - OzoneConfiguration configuration = new OzoneConfiguration(); - configuration.set(OzoneConfigKeys.OZONE_ADMINISTRATORS, "alice,bob"); - - assertThat(OzoneConfigUtil.getS3AdminsFromConfig(configuration)) - .containsAll(Arrays.asList("alice", "bob")); - } - - @Test - public void testS3AdminGroupExtraction() { - OzoneConfiguration configuration = new OzoneConfiguration(); - configuration.set(OzoneConfigKeys.OZONE_S3_ADMINISTRATORS_GROUPS, - "test1, test2"); - - assertThat(OzoneConfigUtil.getS3AdminsGroupsFromConfig(configuration)) - .containsAll(Arrays.asList("test1", "test2")); - } - - @Test - public void testS3AdminGroupExtractionWithFallback() { - OzoneConfiguration configuration = new OzoneConfiguration(); - configuration.set(OzoneConfigKeys.OZONE_ADMINISTRATORS_GROUPS, - "test1, test2"); - - assertThat(OzoneConfigUtil.getS3AdminsGroupsFromConfig(configuration)) - .containsAll(Arrays.asList("test1", "test2")); - } } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerDoubleBufferWithOMResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerDoubleBufferWithOMResponse.java index 54b04260d55..eb13f97d237 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerDoubleBufferWithOMResponse.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerDoubleBufferWithOMResponse.java @@ -40,6 +40,7 @@ import org.apache.hadoop.ozone.om.response.OMClientResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.BucketInfo; +import org.apache.hadoop.security.UserGroupInformation; import org.apache.ratis.server.protocol.TermIndex; import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.BeforeEach; @@ -107,6 +108,7 @@ public void setup() throws IOException { when(ozoneManager.getMaxUserVolumeCount()).thenReturn(10L); auditLogger = mock(AuditLogger.class); when(ozoneManager.getAuditLogger()).thenReturn(auditLogger); + when(ozoneManager.getConfiguration()).thenReturn(ozoneConfiguration); doNothing().when(auditLogger).logWrite(any(AuditMessage.class)); doubleBuffer = OzoneManagerDoubleBuffer.newBuilder() .setOmMetadataManager(omMetadataManager) @@ -450,6 +452,11 @@ private OMClientResponse createVolume(String volumeName, OMVolumeCreateRequest omVolumeCreateRequest = new OMVolumeCreateRequest(omRequest); + try { + omVolumeCreateRequest.setUGI(UserGroupInformation.getCurrentUser()); + } catch (IOException e) { + throw new RuntimeException(e); + } final TermIndex termIndex = TransactionInfo.getTermIndex(transactionId); OMClientResponse omClientResponse = omVolumeCreateRequest.validateAndUpdateCache(ozoneManager, termIndex); @@ -462,7 +469,7 @@ private OMClientResponse createVolume(String volumeName, * @return OMBucketCreateResponse */ private OMBucketCreateResponse createBucket(String volumeName, - String bucketName, long transactionID) { + String bucketName, long transactionID) { BucketInfo.Builder bucketInfo = newBucketInfoBuilder(bucketName, volumeName) @@ -472,6 +479,10 @@ private OMBucketCreateResponse createBucket(String volumeName, OMBucketCreateRequest omBucketCreateRequest = new OMBucketCreateRequest(omRequest); + try { + omBucketCreateRequest.setUGI(UserGroupInformation.getCurrentUser()); + } catch (IOException e) { + } final TermIndex termIndex = TermIndex.valueOf(term, transactionID); OMClientResponse omClientResponse = omBucketCreateRequest.validateAndUpdateCache(ozoneManager, termIndex); diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/TestBucketLayoutAwareOMKeyFactory.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/TestBucketLayoutAwareOMKeyFactory.java index bb3e3930059..c0f63e4d559 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/TestBucketLayoutAwareOMKeyFactory.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/TestBucketLayoutAwareOMKeyFactory.java @@ -110,9 +110,9 @@ public void testGetRequestInstanceFromMap() { LOG.info("Validated request class instantiation for cmdType " + k); }); - assertEquals(13, omKeyReqsFSO.size()); - assertEquals(14, omKeyReqsLegacy.size()); - assertEquals(14, omKeyReqsOBS.size()); + assertEquals(15, omKeyReqsFSO.size()); + assertEquals(16, omKeyReqsLegacy.size()); + assertEquals(16, omKeyReqsOBS.size()); // Check if the number of instantiated OMKeyRequest classes is equal to // the number of keys in the mapping. assertEquals( diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/bucket/TestOMBucketCreateRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/bucket/TestOMBucketCreateRequest.java index 201c2a759fc..59debe08a61 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/bucket/TestOMBucketCreateRequest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/bucket/TestOMBucketCreateRequest.java @@ -26,6 +26,7 @@ import org.apache.hadoop.ozone.om.OMConfigKeys; import org.apache.hadoop.ozone.om.exceptions.OMException; import org.apache.hadoop.ozone.om.helpers.BucketLayout; +import org.apache.hadoop.security.UserGroupInformation; import org.junit.jupiter.api.Test; import org.apache.hadoop.ozone.om.OMMetadataManager; @@ -41,6 +42,7 @@ import static org.apache.hadoop.ozone.om.request.OMRequestTestUtils.newBucketInfoBuilder; import static org.apache.hadoop.ozone.om.request.OMRequestTestUtils.newCreateBucketRequest; +import static org.junit.jupiter.api.Assertions.assertTrue; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertNotEquals; import static org.junit.jupiter.api.Assertions.assertNotNull; @@ -336,7 +338,7 @@ protected void doValidateAndUpdateCache(String volumeName, String bucketName, assertNull(omMetadataManager.getBucketTable().get(bucketKey)); OMBucketCreateRequest omBucketCreateRequest = new OMBucketCreateRequest(modifiedRequest); - + omBucketCreateRequest.setUGI(UserGroupInformation.getCurrentUser()); OMClientResponse omClientResponse = omBucketCreateRequest.validateAndUpdateCache(ozoneManager, 1); @@ -355,8 +357,7 @@ protected void doValidateAndUpdateCache(String volumeName, String bucketName, dbBucketInfo.getCreationTime()); assertEquals(bucketInfoFromProto.getModificationTime(), dbBucketInfo.getModificationTime()); - assertEquals(bucketInfoFromProto.getAcls(), - dbBucketInfo.getAcls()); + assertTrue(dbBucketInfo.getAcls().containsAll(bucketInfoFromProto.getAcls())); assertEquals(bucketInfoFromProto.getIsVersionEnabled(), dbBucketInfo.getIsVersionEnabled()); assertEquals(bucketInfoFromProto.getStorageType(), diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/bucket/TestOMBucketCreateRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/bucket/TestOMBucketCreateRequestWithFSO.java index 3a1d22f08a8..029b1f9082b 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/bucket/TestOMBucketCreateRequestWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/bucket/TestOMBucketCreateRequestWithFSO.java @@ -26,6 +26,7 @@ import org.apache.hadoop.ozone.om.response.OMClientResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest; +import org.apache.hadoop.security.UserGroupInformation; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; @@ -34,6 +35,7 @@ import static org.apache.hadoop.ozone.om.request.OMRequestTestUtils.newBucketInfoBuilder; import static org.apache.hadoop.ozone.om.request.OMRequestTestUtils.newCreateBucketRequest; import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.BucketLayoutProto.FILE_SYSTEM_OPTIMIZED; +import static org.junit.jupiter.api.Assertions.assertTrue; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertNotNull; import static org.junit.jupiter.api.Assertions.assertNull; @@ -143,7 +145,7 @@ protected void doValidateAndUpdateCache(String volumeName, String bucketName, assertNull(omMetadataManager.getBucketTable().get(bucketKey)); OMBucketCreateRequest omBucketCreateRequest = new OMBucketCreateRequest(modifiedRequest); - + omBucketCreateRequest.setUGI(UserGroupInformation.getCurrentUser()); OMClientResponse omClientResponse = omBucketCreateRequest.validateAndUpdateCache(ozoneManager, 1); @@ -166,8 +168,7 @@ protected void doValidateAndUpdateCache(String volumeName, String bucketName, dbBucketInfo.getCreationTime()); assertEquals(bucketInfoFromProto.getModificationTime(), dbBucketInfo.getModificationTime()); - assertEquals(bucketInfoFromProto.getAcls(), - dbBucketInfo.getAcls()); + assertTrue(dbBucketInfo.getAcls().containsAll(bucketInfoFromProto.getAcls())); assertEquals(bucketInfoFromProto.getIsVersionEnabled(), dbBucketInfo.getIsVersionEnabled()); assertEquals(bucketInfoFromProto.getStorageType(), diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMDirectoryCreateRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMDirectoryCreateRequest.java index 7af60c18d94..9df26293d0e 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMDirectoryCreateRequest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMDirectoryCreateRequest.java @@ -38,6 +38,7 @@ import org.apache.hadoop.ozone.om.response.OMClientResponse; import org.apache.hadoop.hdds.utils.db.cache.CacheKey; import org.apache.hadoop.ozone.om.upgrade.OMLayoutVersionManager; +import org.apache.hadoop.security.UserGroupInformation; import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; @@ -68,6 +69,7 @@ import static org.junit.jupiter.api.Assertions.assertNull; import static org.junit.jupiter.api.Assertions.assertSame; import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.api.Assertions.assertTrue; import static org.mockito.Mockito.any; import static org.mockito.Mockito.anyString; import static org.mockito.Mockito.doNothing; @@ -97,6 +99,7 @@ public void setup() throws Exception { folder.toAbsolutePath().toString()); omMetadataManager = new OmMetadataManagerImpl(ozoneConfiguration, ozoneManager); + when(ozoneManager.getConfiguration()).thenReturn(ozoneConfiguration); when(ozoneManager.getMetrics()).thenReturn(omMetrics); when(ozoneManager.getMetadataManager()).thenReturn(omMetadataManager); AuditLogger auditLogger = mock(AuditLogger.class); @@ -182,7 +185,7 @@ public void testValidateAndUpdateCache() throws Exception { omDirectoryCreateRequest = new OMDirectoryCreateRequest(modifiedOmRequest, getBucketLayout()); - + omDirectoryCreateRequest.setUGI(UserGroupInformation.getCurrentUser()); OMClientResponse omClientResponse = omDirectoryCreateRequest.validateAndUpdateCache(ozoneManager, 100L); @@ -221,6 +224,7 @@ public void testValidateAndUpdateCacheWithNamespaceQuotaExceed() omDirectoryCreateRequest = new OMDirectoryCreateRequest(modifiedOmRequest, getBucketLayout()); + omDirectoryCreateRequest.setUGI(UserGroupInformation.getCurrentUser()); OMClientResponse omClientResponse = omDirectoryCreateRequest.validateAndUpdateCache(ozoneManager, 100L); @@ -309,7 +313,7 @@ public void testValidateAndUpdateCacheWithSubDirectoryInPath() omDirectoryCreateRequest = new OMDirectoryCreateRequest(modifiedOmRequest, getBucketLayout()); - + omDirectoryCreateRequest.setUGI(UserGroupInformation.getCurrentUser()); OMClientResponse omClientResponse = omDirectoryCreateRequest.validateAndUpdateCache(ozoneManager, 100L); @@ -429,6 +433,7 @@ public void testCreateDirectoryOMMetric() omDirectoryCreateRequest = new OMDirectoryCreateRequest(modifiedOmRequest, getBucketLayout()); + omDirectoryCreateRequest.setUGI(UserGroupInformation.getCurrentUser()); assertEquals(0L, omMetrics.getNumKeys()); OMClientResponse omClientResponse = @@ -479,7 +484,7 @@ public void testCreateDirectoryInheritParentDefaultAcls() throws Exception { omDirectoryCreateRequest = new OMDirectoryCreateRequest(modifiedOmRequest, getBucketLayout()); - + omDirectoryCreateRequest.setUGI(UserGroupInformation.getCurrentUser()); OMClientResponse omClientResponse = omDirectoryCreateRequest.validateAndUpdateCache(ozoneManager, 100L); @@ -509,7 +514,7 @@ private void verifyDirectoriesInheritAcls(String volumeName, List omKeyAcls = omKeyInfo.getAcls(); - assertEquals(expectedInheritAcls, omKeyAcls, "Failed to inherit parent acls!,"); + assertTrue(omKeyAcls.containsAll(expectedInheritAcls), "Failed to inherit parent acls!,"); prefix = dirName + OZONE_URI_DELIMITER; expectedInheritAcls = omKeyAcls; diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMDirectoryCreateRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMDirectoryCreateRequestWithFSO.java index e0460ba81a9..fca7efba169 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMDirectoryCreateRequestWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMDirectoryCreateRequestWithFSO.java @@ -47,6 +47,7 @@ import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.KeyArgs; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest; import jakarta.annotation.Nonnull; +import org.apache.hadoop.security.UserGroupInformation; import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; @@ -97,6 +98,7 @@ public void setup() throws Exception { OMRequestTestUtils.configureFSOptimizedPaths(ozoneConfiguration, true); omMetadataManager = new OmMetadataManagerImpl(ozoneConfiguration, ozoneManager); + when(ozoneManager.getConfiguration()).thenReturn(ozoneConfiguration); when(ozoneManager.getMetrics()).thenReturn(omMetrics); when(ozoneManager.getMetadataManager()).thenReturn(omMetadataManager); auditLogger = mock(AuditLogger.class); @@ -168,7 +170,7 @@ public void testValidateAndUpdateCache() throws Exception { omDirCreateRequestFSO = new OMDirectoryCreateRequestWithFSO(modifiedOmReq, BucketLayout.FILE_SYSTEM_OPTIMIZED); - + omDirCreateRequestFSO.setUGI(UserGroupInformation.getCurrentUser()); OMClientResponse omClientResponse = omDirCreateRequestFSO.validateAndUpdateCache(ozoneManager, 100L); @@ -208,6 +210,7 @@ public void testValidateAndUpdateCacheWithNamespaceQuotaExceeded() omDirCreateRequestFSO = new OMDirectoryCreateRequestWithFSO(modifiedOmReq, BucketLayout.FILE_SYSTEM_OPTIMIZED); + omDirCreateRequestFSO.setUGI(UserGroupInformation.getCurrentUser()); OMClientResponse omClientResponse = omDirCreateRequestFSO.validateAndUpdateCache(ozoneManager, 100L); assertSame(omClientResponse.getOMResponse().getStatus(), @@ -316,7 +319,7 @@ public void testValidateAndUpdateCacheWithSubDirectoryInPath() omDirCreateReqFSO = new OMDirectoryCreateRequestWithFSO(modifiedOmReq, BucketLayout.FILE_SYSTEM_OPTIMIZED); - + omDirCreateReqFSO.setUGI(UserGroupInformation.getCurrentUser()); OMClientResponse omClientResponse = omDirCreateReqFSO.validateAndUpdateCache(ozoneManager, 100L); @@ -569,6 +572,7 @@ public void testCreateDirectoryUptoLimitOfMaxLevels255() throws Exception { omDirCreateReqFSO = new OMDirectoryCreateRequestWithFSO(modifiedOmReq, BucketLayout.FILE_SYSTEM_OPTIMIZED); + omDirCreateReqFSO.setUGI(UserGroupInformation.getCurrentUser()); assertEquals(0L, omMetrics.getNumKeys()); OMClientResponse omClientResponse = @@ -603,7 +607,7 @@ public void testCreateDirectoryExceedLimitOfMaxLevels255() throws Exception { omDirCreateReqFSO = new OMDirectoryCreateRequestWithFSO(modifiedOmReq, BucketLayout.FILE_SYSTEM_OPTIMIZED); - + omDirCreateReqFSO.setUGI(UserGroupInformation.getCurrentUser()); assertEquals(0L, omMetrics.getNumKeys()); OMClientResponse omClientResponse = omDirCreateReqFSO.validateAndUpdateCache(ozoneManager, 100L); @@ -642,6 +646,7 @@ public void testCreateDirectoryOMMetric() throws Exception { omDirCreateReqFSO = new OMDirectoryCreateRequestWithFSO(modifiedOmReq, BucketLayout.FILE_SYSTEM_OPTIMIZED); + omDirCreateReqFSO.setUGI(UserGroupInformation.getCurrentUser()); assertEquals(0L, omMetrics.getNumKeys()); OMClientResponse omClientResponse = @@ -694,7 +699,7 @@ public void testCreateDirectoryInheritParentDefaultAcls() throws Exception { omDirCreateReqFSO = new OMDirectoryCreateRequestWithFSO(modifiedOmReq, BucketLayout.FILE_SYSTEM_OPTIMIZED); - + omDirCreateReqFSO.setUGI(UserGroupInformation.getCurrentUser()); OMClientResponse omClientResponse = omDirCreateReqFSO.validateAndUpdateCache(ozoneManager, 100L); assertSame(omClientResponse.getOMResponse().getStatus(), @@ -729,7 +734,7 @@ private void verifyDirectoriesInheritAcls(List dirs, System.out.println( " subdir acls : " + omDirInfo + " ==> " + omDirAcls); - assertEquals(expectedInheritAcls, omDirAcls, + assertTrue(omDirAcls.containsAll(expectedInheritAcls), "Failed to inherit parent DEFAULT acls!"); parentID = omDirInfo.getObjectID(); diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMFileCreateRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMFileCreateRequest.java index 20da9d3e5dc..cdad3bcb18e 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMFileCreateRequest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMFileCreateRequest.java @@ -35,6 +35,7 @@ import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo; import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; import jakarta.annotation.Nonnull; +import org.apache.hadoop.security.UserGroupInformation; import org.junit.jupiter.api.Test; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; @@ -481,7 +482,7 @@ protected void verifyInheritAcls(List dirs, OmKeyInfo omKeyInfo, System.out.println( " subdir acls : " + omDirInfo + " ==> " + omDirAcls); - assertEquals(expectedInheritAcls, omDirAcls, + assertTrue(omDirAcls.containsAll(expectedInheritAcls), "Failed to inherit parent DEFAULT acls!"); parentID = omDirInfo.getObjectID(); @@ -513,9 +514,9 @@ protected void verifyInheritAcls(List dirs, OmKeyInfo omKeyInfo, // Should inherit parent DEFAULT acls // [user:newUser:rw[ACCESS], group:newGroup:rwl[ACCESS]] - assertEquals(parentDefaultAcl.stream() + assertTrue(keyAcls.containsAll(parentDefaultAcl.stream() .map(acl -> acl.withScope(OzoneAcl.AclScope.ACCESS)) - .collect(Collectors.toList()), keyAcls, + .collect(Collectors.toList())), "Failed to inherit bucket DEFAULT acls!"); // Should not inherit parent ACCESS acls assertThat(keyAcls).doesNotContain(parentAccessAcl); @@ -529,7 +530,7 @@ protected void verifyInheritAcls(List dirs, OmKeyInfo omKeyInfo, ".snapshot/a/b/keyName,Cannot create key under path reserved for snapshot: .snapshot/", ".snapshot,Cannot create key with reserved name: .snapshot"}) public void testPreExecuteWithInvalidKeyPrefix(String invalidKeyName, - String expectedErrorMessage) { + String expectedErrorMessage) throws IOException { OMRequest omRequest = createFileRequest(volumeName, bucketName, invalidKeyName, HddsProtos.ReplicationFactor.ONE, @@ -644,8 +645,10 @@ protected OMRequest createFileRequest( * @return OMFileCreateRequest reference */ @Nonnull - protected OMFileCreateRequest getOMFileCreateRequest(OMRequest omRequest) { - return new OMFileCreateRequest(omRequest, getBucketLayout()); + protected OMFileCreateRequest getOMFileCreateRequest(OMRequest omRequest) throws IOException { + OMFileCreateRequest request = new OMFileCreateRequest(omRequest, getBucketLayout()); + request.setUGI(UserGroupInformation.getCurrentUser()); + return request; } } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMFileCreateRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMFileCreateRequestWithFSO.java index e988949c5b8..5a8c638141f 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMFileCreateRequestWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMFileCreateRequestWithFSO.java @@ -28,9 +28,11 @@ import org.apache.hadoop.ozone.om.response.OMClientResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest; +import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.util.StringUtils; import org.junit.jupiter.api.Test; +import java.io.IOException; import java.util.UUID; import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.ONE; @@ -236,9 +238,11 @@ private OmDirectoryInfo getDirInfo(String key) } @Override - protected OMFileCreateRequest getOMFileCreateRequest(OMRequest omRequest) { - return new OMFileCreateRequestWithFSO(omRequest, + protected OMFileCreateRequest getOMFileCreateRequest(OMRequest omRequest) throws IOException { + OMFileCreateRequest request = new OMFileCreateRequestWithFSO(omRequest, BucketLayout.FILE_SYSTEM_OPTIMIZED); + request.setUGI(UserGroupInformation.getCurrentUser()); + return request; } @Override diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCreateRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCreateRequest.java index 4bfdd333296..b9b7c30744e 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCreateRequest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCreateRequest.java @@ -47,6 +47,7 @@ import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo; import org.apache.hadoop.ozone.om.lock.OzoneLockProvider; import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; +import org.apache.hadoop.security.UserGroupInformation; import org.junit.jupiter.api.Test; import org.junit.jupiter.params.ParameterizedTest; import org.junit.jupiter.params.provider.MethodSource; @@ -501,6 +502,7 @@ public void testOverwritingExistingMetadata( createKeyRequest(false, 0, keyName, initialMetadata); OMKeyCreateRequest initialOmKeyCreateRequest = new OMKeyCreateRequest(initialRequest, getBucketLayout()); + initialOmKeyCreateRequest.setUGI(UserGroupInformation.getCurrentUser()); OMClientResponse initialResponse = initialOmKeyCreateRequest.validateAndUpdateCache(ozoneManager, 100L); verifyMetadataInResponse(initialResponse, initialMetadata); @@ -519,6 +521,7 @@ public void testOverwritingExistingMetadata( createKeyRequest(false, 0, keyName, updatedMetadata); OMKeyCreateRequest updatedOmKeyCreateRequest = new OMKeyCreateRequest(updatedRequest, getBucketLayout()); + updatedOmKeyCreateRequest.setUGI(UserGroupInformation.getCurrentUser()); OMClientResponse updatedResponse = updatedOmKeyCreateRequest.validateAndUpdateCache(ozoneManager, 101L); @@ -562,6 +565,7 @@ public void testCreationWithoutMetadataFollowedByOverwriteWithMetadata( createKeyRequest(false, 0, keyName, overwriteMetadata, emptyMap(), emptyList()); OMKeyCreateRequest overwriteOmKeyCreateRequest = new OMKeyCreateRequest(overwriteRequestWithMetadata, getBucketLayout()); + overwriteOmKeyCreateRequest.setUGI(UserGroupInformation.getCurrentUser()); // Perform the overwrite operation and capture the response OMClientResponse overwriteResponse = @@ -989,7 +993,7 @@ public void testAtomicRewrite( // Retrieve the committed key info OmKeyInfo existingKeyInfo = omMetadataManager.getKeyTable(getBucketLayout()).get(getOzoneKey()); List existingAcls = existingKeyInfo.getAcls(); - assertEquals(acls, existingAcls); + assertThat(existingAcls.containsAll(acls)); // Create a request with a generation which doesn't match the current key omRequest = createKeyRequest(false, 0, 100, @@ -1039,9 +1043,9 @@ private void verifyKeyInheritAcls(List keyAcls, .findAny().orElse(null); // Should inherit parent DEFAULT Acls - assertEquals(parentDefaultAcl.stream() + assertTrue(keyAcls.containsAll(parentDefaultAcl.stream() .map(acl -> acl.withScope(OzoneAcl.AclScope.ACCESS)) - .collect(Collectors.toList()), keyAcls, + .collect(Collectors.toList())), "Failed to inherit parent DEFAULT acls!,"); // Should not inherit parent ACCESS Acls @@ -1054,7 +1058,7 @@ protected void addToKeyTable(String keyName) throws Exception { } - private void checkNotAValidPath(String keyName) { + private void checkNotAValidPath(String keyName) throws IOException { OMRequest omRequest = createKeyRequest(false, 0, keyName); OMKeyCreateRequest omKeyCreateRequest = getOMKeyCreateRequest(omRequest); OMException ex = @@ -1137,13 +1141,16 @@ protected String getOzoneKey() throws IOException { return omMetadataManager.getOzoneKey(volumeName, bucketName, keyName); } - protected OMKeyCreateRequest getOMKeyCreateRequest(OMRequest omRequest) { - return new OMKeyCreateRequest(omRequest, getBucketLayout()); + protected OMKeyCreateRequest getOMKeyCreateRequest(OMRequest omRequest) throws IOException { + OMKeyCreateRequest request = new OMKeyCreateRequest(omRequest, getBucketLayout()); + request.setUGI(UserGroupInformation.getCurrentUser()); + return request; } protected OMKeyCreateRequest getOMKeyCreateRequest( - OMRequest omRequest, BucketLayout layout) { - return new OMKeyCreateRequest(omRequest, layout); + OMRequest omRequest, BucketLayout layout) throws IOException { + OMKeyCreateRequest request = new OMKeyCreateRequest(omRequest, layout); + request.setUGI(UserGroupInformation.getCurrentUser()); + return request; } - } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCreateRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCreateRequestWithFSO.java index a5181b25a0e..8f8cc025436 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCreateRequestWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCreateRequestWithFSO.java @@ -31,6 +31,7 @@ import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; import org.apache.hadoop.ozone.om.response.OMClientResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest; +import org.apache.hadoop.security.UserGroupInformation; import org.junit.jupiter.params.ParameterizedTest; import org.junit.jupiter.params.provider.MethodSource; @@ -210,15 +211,19 @@ protected String getOzoneKey() throws IOException { } @Override - protected OMKeyCreateRequest getOMKeyCreateRequest(OMRequest omRequest) { - return new OMKeyCreateRequestWithFSO(omRequest, + protected OMKeyCreateRequest getOMKeyCreateRequest(OMRequest omRequest) throws IOException { + OMKeyCreateRequest request = new OMKeyCreateRequestWithFSO(omRequest, BucketLayout.FILE_SYSTEM_OPTIMIZED); + request.setUGI(UserGroupInformation.getCurrentUser()); + return request; } @Override protected OMKeyCreateRequest getOMKeyCreateRequest( - OMRequest omRequest, BucketLayout layout) { - return new OMKeyCreateRequestWithFSO(omRequest, layout); + OMRequest omRequest, BucketLayout layout) throws IOException { + OMKeyCreateRequest request = new OMKeyCreateRequestWithFSO(omRequest, layout); + request.setUGI(UserGroupInformation.getCurrentUser()); + return request; } @Override diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyRequest.java index e2219d5fcc1..c18e1ee7c3f 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyRequest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyRequest.java @@ -60,6 +60,7 @@ import jakarta.annotation.Nonnull; import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; import org.junit.jupiter.api.io.TempDir; import org.apache.hadoop.hdds.client.ContainerBlockID; @@ -85,6 +86,10 @@ import static org.apache.hadoop.ozone.om.request.OMRequestTestUtils.setupReplicationConfigValidation; import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNull; import static org.mockito.Mockito.any; import static org.mockito.Mockito.anyInt; import static org.mockito.Mockito.anyLong; @@ -322,4 +327,21 @@ protected SnapshotInfo createSnapshot(String snapshotName) throws Exception { return snapshotInfo; } + @Test + public void testValidateKeyArgs() { + OMKeyRequest.ValidateKeyArgs validateKeyArgs1 = new OMKeyRequest.ValidateKeyArgs.Builder() + .setKeyName("tmpKey").setSnapshotReservedWord("tmpSnapshotReservedWord").build(); + assertEquals("tmpSnapshotReservedWord", validateKeyArgs1.getSnapshotReservedWord()); + assertEquals("tmpKey", validateKeyArgs1.getKeyName()); + assertTrue(validateKeyArgs1.isValidateKeyName()); + assertTrue(validateKeyArgs1.isValidateSnapshotReserved()); + + OMKeyRequest.ValidateKeyArgs validateKeyArgs2 = new OMKeyRequest.ValidateKeyArgs.Builder() + .setKeyName("tmpKey2").build(); + assertNull(validateKeyArgs2.getSnapshotReservedWord()); + assertEquals("tmpKey2", validateKeyArgs2.getKeyName()); + assertTrue(validateKeyArgs2.isValidateKeyName()); + assertFalse(validateKeyArgs2.isValidateSnapshotReserved()); + } + } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3ExpiredMultipartUploadsAbortRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3ExpiredMultipartUploadsAbortRequest.java index f02e1ee2367..0220afbc60c 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3ExpiredMultipartUploadsAbortRequest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3ExpiredMultipartUploadsAbortRequest.java @@ -53,6 +53,7 @@ import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.ExpiredMultipartUploadsBucket; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.MultipartUploadsExpiredAbortRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest; +import org.apache.hadoop.security.UserGroupInformation; import org.junit.jupiter.params.ParameterizedTest; import org.junit.jupiter.params.provider.MethodSource; @@ -447,6 +448,7 @@ private List createMPUsWithFSO(String volume, String bucket, S3InitiateMultipartUploadRequest s3InitiateMultipartUploadRequest = new S3InitiateMultipartUploadRequestWithFSO(initiateMPURequest, BucketLayout.FILE_SYSTEM_OPTIMIZED); + s3InitiateMultipartUploadRequest.setUGI(UserGroupInformation.getLoginUser()); OMClientResponse omClientResponse = s3InitiateMultipartUploadRequest .validateAndUpdateCache(ozoneManager, trxnLogIndex); diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3InitiateMultipartUploadRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3InitiateMultipartUploadRequest.java index 30b76801d9e..f9006b852e4 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3InitiateMultipartUploadRequest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3InitiateMultipartUploadRequest.java @@ -250,7 +250,7 @@ private void verifyKeyInheritAcls(List keyAcls, // Should inherit parent DEFAULT Acls // [user:newUser:rw[DEFAULT], group:newGroup:rwl[DEFAULT]] - assertEquals(parentDefaultAcl, keyAcls, + assertTrue(keyAcls.containsAll(parentDefaultAcl), "Failed to inherit parent DEFAULT acls!"); // Should not inherit parent ACCESS Acls diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3InitiateMultipartUploadRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3InitiateMultipartUploadRequestWithFSO.java index 1d4eb5310e0..d92992edf58 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3InitiateMultipartUploadRequestWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3InitiateMultipartUploadRequestWithFSO.java @@ -32,6 +32,7 @@ import org.apache.hadoop.ozone.om.response.OMClientResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest; +import org.apache.hadoop.security.UserGroupInformation; import org.junit.jupiter.api.Test; import java.io.IOException; @@ -166,9 +167,11 @@ private long verifyDirectoriesInDB(List dirs, final long volumeId, @Override protected S3InitiateMultipartUploadRequest getS3InitiateMultipartUploadReq( - OMRequest initiateMPURequest) { - return new S3InitiateMultipartUploadRequestWithFSO(initiateMPURequest, + OMRequest initiateMPURequest) throws IOException { + S3InitiateMultipartUploadRequest request = new S3InitiateMultipartUploadRequestWithFSO(initiateMPURequest, BucketLayout.FILE_SYSTEM_OPTIMIZED); + request.setUGI(UserGroupInformation.getLoginUser()); + return request; } @Test @@ -256,7 +259,7 @@ private void verifyKeyInheritAcls(List dirs, OmKeyInfo fileInfo, List omDirAcls = omDirInfo.getAcls(); System.out.println(" subdir acls : " + omDirInfo + " ==> " + omDirAcls); - assertEquals(expectedInheritAcls, omDirAcls, + assertTrue(omDirAcls.containsAll(expectedInheritAcls), "Failed to inherit parent DEFAULT acls!"); parentID = omDirInfo.getObjectID(); diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartRequest.java index bd93fe176e9..ff920667539 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartRequest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartRequest.java @@ -31,6 +31,7 @@ import org.apache.hadoop.ozone.om.request.OMClientRequest; import org.apache.hadoop.ozone.om.upgrade.OMLayoutVersionManager; import org.apache.hadoop.ozone.security.acl.OzoneNativeAuthorizer; +import org.apache.hadoop.security.UserGroupInformation; import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.io.TempDir; @@ -114,6 +115,7 @@ public void setup() throws Exception { when(lvm.getMetadataLayoutVersion()).thenReturn(0); when(ozoneManager.getVersionManager()).thenReturn(lvm); when(ozoneManager.isRatisEnabled()).thenReturn(true); + when(ozoneManager.getConfiguration()).thenReturn(ozoneConfiguration); } @@ -353,21 +355,27 @@ protected OMRequest doPreExecuteInitiateMPUWithFSO( } protected S3MultipartUploadCompleteRequest getS3MultipartUploadCompleteReq( - OMRequest omRequest) { - return new S3MultipartUploadCompleteRequest(omRequest, + OMRequest omRequest) throws IOException { + S3MultipartUploadCompleteRequest request = new S3MultipartUploadCompleteRequest(omRequest, BucketLayout.DEFAULT); + request.setUGI(UserGroupInformation.getCurrentUser()); + return request; } protected S3MultipartUploadCommitPartRequest getS3MultipartUploadCommitReq( - OMRequest omRequest) { - return new S3MultipartUploadCommitPartRequest(omRequest, + OMRequest omRequest) throws IOException { + S3MultipartUploadCommitPartRequest request = new S3MultipartUploadCommitPartRequest(omRequest, BucketLayout.DEFAULT); + request.setUGI(UserGroupInformation.getCurrentUser()); + return request; } protected S3InitiateMultipartUploadRequest getS3InitiateMultipartUploadReq( - OMRequest initiateMPURequest) { - return new S3InitiateMultipartUploadRequest(initiateMPURequest, + OMRequest initiateMPURequest) throws IOException { + S3InitiateMultipartUploadRequest request = new S3InitiateMultipartUploadRequest(initiateMPURequest, BucketLayout.DEFAULT); + request.setUGI(UserGroupInformation.getCurrentUser()); + return request; } protected S3MultipartUploadAbortRequest getS3MultipartUploadAbortReq( diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadAbortRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadAbortRequestWithFSO.java index 3c710988a56..7e92cf042e7 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadAbortRequestWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadAbortRequestWithFSO.java @@ -22,6 +22,7 @@ import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest; +import org.apache.hadoop.security.UserGroupInformation; import java.io.IOException; import java.util.UUID; @@ -45,9 +46,11 @@ protected S3MultipartUploadAbortRequest getS3MultipartUploadAbortReq( @Override protected S3InitiateMultipartUploadRequest getS3InitiateMultipartUploadReq( - OMRequest initiateMPURequest) { - return new S3InitiateMultipartUploadRequestWithFSO(initiateMPURequest, + OMRequest initiateMPURequest) throws IOException { + S3InitiateMultipartUploadRequest request = new S3InitiateMultipartUploadRequestWithFSO(initiateMPURequest, BucketLayout.FILE_SYSTEM_OPTIMIZED); + request.setUGI(UserGroupInformation.getCurrentUser()); + return request; } @Override diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadCommitPartRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadCommitPartRequest.java index 014b4e021cb..fa901af6457 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadCommitPartRequest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadCommitPartRequest.java @@ -71,7 +71,7 @@ public void testValidateAndUpdateCacheSuccess() throws Exception { bucketName, keyName); S3InitiateMultipartUploadRequest s3InitiateMultipartUploadRequest = - getS3InitiateMultipartUploadReq(initiateMPURequest); + getS3InitiateMultipartUploadReq(initiateMPURequest); OMClientResponse omClientResponse = s3InitiateMultipartUploadRequest.validateAndUpdateCache(ozoneManager, 1L); diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadCommitPartRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadCommitPartRequestWithFSO.java index 24480c249cc..eb2c82af172 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadCommitPartRequestWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadCommitPartRequestWithFSO.java @@ -32,6 +32,7 @@ import org.apache.hadoop.ozone.om.helpers.OzoneFSUtils; import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest; +import org.apache.hadoop.security.UserGroupInformation; import java.io.IOException; import java.util.ArrayList; @@ -49,16 +50,20 @@ public class TestS3MultipartUploadCommitPartRequestWithFSO @Override protected S3MultipartUploadCommitPartRequest getS3MultipartUploadCommitReq( - OMRequest omRequest) { - return new S3MultipartUploadCommitPartRequestWithFSO(omRequest, + OMRequest omRequest) throws IOException { + S3MultipartUploadCommitPartRequest request = new S3MultipartUploadCommitPartRequestWithFSO(omRequest, BucketLayout.FILE_SYSTEM_OPTIMIZED); + request.setUGI(UserGroupInformation.getCurrentUser()); + return request; } @Override protected S3InitiateMultipartUploadRequest getS3InitiateMultipartUploadReq( - OMRequest initiateMPURequest) { - return new S3InitiateMultipartUploadRequestWithFSO(initiateMPURequest, + OMRequest initiateMPURequest) throws IOException { + S3InitiateMultipartUploadRequest request = new S3InitiateMultipartUploadRequestWithFSO(initiateMPURequest, BucketLayout.FILE_SYSTEM_OPTIMIZED); + request.setUGI(UserGroupInformation.getCurrentUser()); + return request; } @Override diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadCompleteRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadCompleteRequestWithFSO.java index 1762f38b44b..dc58254d7d3 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadCompleteRequestWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadCompleteRequestWithFSO.java @@ -30,6 +30,7 @@ import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; import org.apache.hadoop.ozone.om.request.file.OMFileRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest; +import org.apache.hadoop.security.UserGroupInformation; import java.io.IOException; import java.util.ArrayList; @@ -113,23 +114,29 @@ protected String getOzoneDBKey(String volumeName, String bucketName, @Override protected S3MultipartUploadCompleteRequest getS3MultipartUploadCompleteReq( - OMRequest omRequest) { - return new S3MultipartUploadCompleteRequestWithFSO(omRequest, + OMRequest omRequest) throws IOException { + S3MultipartUploadCompleteRequest request = new S3MultipartUploadCompleteRequestWithFSO(omRequest, BucketLayout.FILE_SYSTEM_OPTIMIZED); + request.setUGI(UserGroupInformation.getCurrentUser()); + return request; } @Override protected S3MultipartUploadCommitPartRequest getS3MultipartUploadCommitReq( - OMRequest omRequest) { - return new S3MultipartUploadCommitPartRequestWithFSO(omRequest, + OMRequest omRequest) throws IOException { + S3MultipartUploadCommitPartRequest request = new S3MultipartUploadCommitPartRequestWithFSO(omRequest, BucketLayout.FILE_SYSTEM_OPTIMIZED); + request.setUGI(UserGroupInformation.getCurrentUser()); + return request; } @Override protected S3InitiateMultipartUploadRequest getS3InitiateMultipartUploadReq( - OMRequest initiateMPURequest) { - return new S3InitiateMultipartUploadRequestWithFSO(initiateMPURequest, + OMRequest initiateMPURequest) throws IOException { + S3InitiateMultipartUploadRequest request = new S3InitiateMultipartUploadRequestWithFSO(initiateMPURequest, BucketLayout.FILE_SYSTEM_OPTIMIZED); + request.setUGI(UserGroupInformation.getCurrentUser()); + return request; } @Override diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/tagging/TestS3DeleteObjectTaggingRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/tagging/TestS3DeleteObjectTaggingRequest.java new file mode 100644 index 00000000000..9c307d85671 --- /dev/null +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/tagging/TestS3DeleteObjectTaggingRequest.java @@ -0,0 +1,202 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + *

    + * http://www.apache.org/licenses/LICENSE-2.0 + *

    + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package org.apache.hadoop.ozone.om.request.s3.tagging; + +import org.apache.hadoop.hdds.client.RatisReplicationConfig; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos; +import org.apache.hadoop.ozone.OzoneConsts; +import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; +import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; +import org.apache.hadoop.ozone.om.request.key.TestOMKeyRequest; +import org.apache.hadoop.ozone.om.response.OMClientResponse; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.DeleteObjectTaggingRequest; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.KeyArgs; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Type; +import org.junit.jupiter.api.Test; + +import java.util.HashMap; +import java.util.Map; +import java.util.UUID; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertNotNull; + +/** + * Test delete object tagging request. + */ +public class TestS3DeleteObjectTaggingRequest extends TestOMKeyRequest { + + @Test + public void testPreExecute() throws Exception { + doPreExecute(volumeName, bucketName, keyName); + } + + @Test + public void testValidateAndUpdateCacheSuccess() throws Exception { + OMRequestTestUtils.addVolumeAndBucketToDB(volumeName, bucketName, omMetadataManager, getBucketLayout()); + Map tags = getTags(5); + String ozoneKey = addKeyToTable(tags); + + OmKeyInfo omKeyInfo = omMetadataManager.getKeyTable(getBucketLayout()).get(ozoneKey); + assertNotNull(omKeyInfo); + assertEquals(tags.size(), omKeyInfo.getTags().size()); + + OMRequest originalRequest = createDeleteObjectTaggingRequest(volumeName, bucketName, keyName); + + S3DeleteObjectTaggingRequest request = getDeleteObjectTaggingRequest(originalRequest); + + OMRequest modifiedRequest = request.preExecute(ozoneManager); + + request = getDeleteObjectTaggingRequest(modifiedRequest); + + OMClientResponse omClientResponse = + request.validateAndUpdateCache(ozoneManager, 2L); + OMResponse omResponse = omClientResponse.getOMResponse(); + + assertNotNull(omResponse.getDeleteObjectTaggingResponse()); + assertEquals(OzoneManagerProtocolProtos.Status.OK, omResponse.getStatus()); + assertEquals(Type.DeleteObjectTagging, omResponse.getCmdType()); + + OmKeyInfo updatedKeyInfo = omMetadataManager.getKeyTable(getBucketLayout()).get(ozoneKey); + assertNotNull(updatedKeyInfo); + assertEquals(omKeyInfo.getVolumeName(), updatedKeyInfo.getVolumeName()); + assertEquals(omKeyInfo.getBucketName(), updatedKeyInfo.getBucketName()); + assertEquals(omKeyInfo.getKeyName(), updatedKeyInfo.getKeyName()); + assertEquals(0, updatedKeyInfo.getTags().size()); + } + + @Test + public void testValidateAndUpdateCacheVolumeNotFound() throws Exception { + OMRequest modifiedOmRequest = + doPreExecute(volumeName, bucketName, keyName); + + S3DeleteObjectTaggingRequest request = getDeleteObjectTaggingRequest(modifiedOmRequest); + + OMClientResponse omClientResponse = + request.validateAndUpdateCache(ozoneManager, 2L); + + assertEquals(OzoneManagerProtocolProtos.Status.VOLUME_NOT_FOUND, + omClientResponse.getOMResponse().getStatus()); + } + + @Test + public void testValidateAndUpdateCacheBucketNotFound() throws Exception { + OMRequestTestUtils.addVolumeToDB(volumeName, OzoneConsts.OZONE, omMetadataManager); + + OMRequest modifiedOmRequest = + doPreExecute(volumeName, bucketName, keyName); + + S3DeleteObjectTaggingRequest request = getDeleteObjectTaggingRequest(modifiedOmRequest); + + OMClientResponse omClientResponse = + request.validateAndUpdateCache(ozoneManager, 2L); + + assertEquals(OzoneManagerProtocolProtos.Status.BUCKET_NOT_FOUND, + omClientResponse.getOMResponse().getStatus()); + } + + @Test + public void testValidateAndUpdateCacheKeyNotFound() throws Exception { + OMRequestTestUtils.addVolumeAndBucketToDB(volumeName, bucketName, omMetadataManager, getBucketLayout()); + + OMRequest modifiedOmRequest = + doPreExecute(volumeName, bucketName, keyName); + + S3DeleteObjectTaggingRequest request = getDeleteObjectTaggingRequest(modifiedOmRequest); + + OMClientResponse omClientResponse = + request.validateAndUpdateCache(ozoneManager, 2L); + + assertEquals(OzoneManagerProtocolProtos.Status.KEY_NOT_FOUND, + omClientResponse.getOMResponse().getStatus()); + } + + protected OMRequest doPreExecute(String volumeName, String bucketName, + String keyName) throws Exception { + OMRequest originalRequest = createDeleteObjectTaggingRequest( + volumeName, bucketName, keyName); + + S3DeleteObjectTaggingRequest request = getDeleteObjectTaggingRequest(originalRequest); + + OMRequest modifiedRequest = request.preExecute(ozoneManager); + verifyRequest(modifiedRequest, originalRequest); + + return modifiedRequest; + } + + public OMRequest createDeleteObjectTaggingRequest(String volumeName, + String bucketName, + String keyName) { + KeyArgs.Builder keyArgs = KeyArgs.newBuilder() + .setVolumeName(volumeName) + .setBucketName(bucketName) + .setKeyName(keyName); + + + DeleteObjectTaggingRequest deleteObjectTaggingRequest = + DeleteObjectTaggingRequest.newBuilder() + .setKeyArgs(keyArgs) + .build(); + + return OMRequest.newBuilder() + .setDeleteObjectTaggingRequest(deleteObjectTaggingRequest) + .setCmdType(Type.DeleteObjectTagging) + .setClientId(UUID.randomUUID().toString()) + .build(); + } + + private void verifyRequest(OMRequest modifiedRequest, OMRequest originalRequest) { + + KeyArgs original = originalRequest.getDeleteObjectTaggingRequest().getKeyArgs(); + + KeyArgs updated = modifiedRequest.getDeleteObjectTaggingRequest().getKeyArgs(); + + assertEquals(original.getVolumeName(), updated.getVolumeName()); + assertEquals(original.getBucketName(), updated.getBucketName()); + assertEquals(original.getKeyName(), updated.getKeyName()); + assertEquals(original.getTagsList(), updated.getTagsList()); + // Modification time will not be set for object tagging request + assertFalse(updated.hasModificationTime()); + } + + protected String addKeyToTable(Map tags) throws Exception { + OmKeyInfo omKeyInfo = OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, keyName, + RatisReplicationConfig.getInstance(HddsProtos.ReplicationFactor.ONE)) + .addAllTags(tags) + .build(); + OMRequestTestUtils.addKeyToTable(false, false, omKeyInfo, + clientID, 1L, omMetadataManager); + return omMetadataManager.getOzoneKey(volumeName, bucketName, keyName); + } + + protected S3DeleteObjectTaggingRequest getDeleteObjectTaggingRequest(OMRequest originalRequest) { + return new S3DeleteObjectTaggingRequest(originalRequest, getBucketLayout()); + } + + protected Map getTags(int size) { + Map tags = new HashMap<>(); + for (int i = 0; i < size; i++) { + tags.put("tag-key-" + UUID.randomUUID(), "tag-value-" + UUID.randomUUID()); + } + return tags; + } +} diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/tagging/TestS3DeleteObjectTaggingRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/tagging/TestS3DeleteObjectTaggingRequestWithFSO.java new file mode 100644 index 00000000000..ca3010a9b29 --- /dev/null +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/tagging/TestS3DeleteObjectTaggingRequestWithFSO.java @@ -0,0 +1,74 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + *

    + * http://www.apache.org/licenses/LICENSE-2.0 + *

    + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package org.apache.hadoop.ozone.om.request.s3.tagging; + +import org.apache.hadoop.hdds.client.RatisReplicationConfig; +import org.apache.hadoop.ozone.om.helpers.BucketLayout; +import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; +import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest; + +import java.util.Map; + +import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.ONE; + +/** + * Test delete object tagging request for FSO bucket. + */ +public class TestS3DeleteObjectTaggingRequestWithFSO extends TestS3DeleteObjectTaggingRequest { + + private static final String PARENT_DIR = "c/d/e"; + private static final String FILE_NAME = "file1"; + private static final String FILE_KEY = PARENT_DIR + "/" + FILE_NAME; + + @Override + protected String addKeyToTable(Map tags) throws Exception { + keyName = FILE_KEY; // updated key name + + // Create parent dirs for the path + long parentId = OMRequestTestUtils.addParentsToDirTable(volumeName, + bucketName, PARENT_DIR, omMetadataManager); + + OmKeyInfo omKeyInfo = + OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, FILE_KEY, RatisReplicationConfig.getInstance(ONE)) + .setObjectID(parentId + 1L) + .setParentObjectID(parentId) + .setUpdateID(1L) + .addAllTags(tags) + .build(); + OMRequestTestUtils.addFileToKeyTable(false, false, + FILE_NAME, omKeyInfo, -1, 50, omMetadataManager); + final long volumeId = omMetadataManager.getVolumeId( + omKeyInfo.getVolumeName()); + final long bucketId = omMetadataManager.getBucketId( + omKeyInfo.getVolumeName(), omKeyInfo.getBucketName()); + return omMetadataManager.getOzonePathKey( + volumeId, bucketId, omKeyInfo.getParentObjectID(), + omKeyInfo.getFileName()); + } + + @Override + public BucketLayout getBucketLayout() { + return BucketLayout.FILE_SYSTEM_OPTIMIZED; + } + + @Override + protected S3DeleteObjectTaggingRequest getDeleteObjectTaggingRequest(OMRequest originalRequest) { + return new S3DeleteObjectTaggingRequestWithFSO(originalRequest, getBucketLayout()); + } +} diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/tagging/TestS3PutObjectTaggingRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/tagging/TestS3PutObjectTaggingRequest.java new file mode 100644 index 00000000000..c70c2587332 --- /dev/null +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/tagging/TestS3PutObjectTaggingRequest.java @@ -0,0 +1,254 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + *

    + * http://www.apache.org/licenses/LICENSE-2.0 + *

    + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package org.apache.hadoop.ozone.om.request.s3.tagging; + +import org.apache.hadoop.hdds.client.RatisReplicationConfig; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos; +import org.apache.hadoop.ozone.OzoneConsts; +import org.apache.hadoop.ozone.om.helpers.KeyValueUtil; +import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; +import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; +import org.apache.hadoop.ozone.om.request.key.TestOMKeyRequest; +import org.apache.hadoop.ozone.om.response.OMClientResponse; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.KeyArgs; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.PutObjectTaggingRequest; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Type; +import org.junit.jupiter.api.Test; + +import java.util.HashMap; +import java.util.Map; +import java.util.UUID; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertTrue; + +/** + * Test put object tagging request. + */ +public class TestS3PutObjectTaggingRequest extends TestOMKeyRequest { + + @Test + public void testPreExecute() throws Exception { + Map tags = new HashMap<>(); + getTags(2); + doPreExecute(volumeName, bucketName, keyName, tags); + } + + @Test + public void testValidateAndUpdateCacheSuccess() throws Exception { + OMRequestTestUtils.addVolumeAndBucketToDB(volumeName, bucketName, omMetadataManager, getBucketLayout()); + String ozoneKey = addKeyToTable(); + + OmKeyInfo omKeyInfo = omMetadataManager.getKeyTable(getBucketLayout()).get(ozoneKey); + + assertNotNull(omKeyInfo); + assertTrue(omKeyInfo.getTags().isEmpty()); + + Map tags = getTags(5); + + OMRequest originalRequest = createPutObjectTaggingRequest(volumeName, bucketName, keyName, tags); + + S3PutObjectTaggingRequest request = getPutObjectTaggingRequest(originalRequest); + + OMRequest modifiedRequest = request.preExecute(ozoneManager); + + request = getPutObjectTaggingRequest(modifiedRequest); + + OMClientResponse omClientResponse = + request.validateAndUpdateCache(ozoneManager, 2L); + OMResponse omResponse = omClientResponse.getOMResponse(); + + assertNotNull(omResponse.getPutObjectTaggingResponse()); + assertEquals(OzoneManagerProtocolProtos.Status.OK, omResponse.getStatus()); + assertEquals(Type.PutObjectTagging, omResponse.getCmdType()); + + OmKeyInfo updatedKeyInfo = omMetadataManager.getKeyTable(getBucketLayout()).get(ozoneKey); + assertNotNull(updatedKeyInfo); + assertEquals(omKeyInfo.getVolumeName(), updatedKeyInfo.getVolumeName()); + assertEquals(omKeyInfo.getBucketName(), updatedKeyInfo.getBucketName()); + assertEquals(omKeyInfo.getKeyName(), updatedKeyInfo.getKeyName()); + assertEquals(tags.size(), updatedKeyInfo.getTags().size()); + for (Map.Entry tag: tags.entrySet()) { + String value = updatedKeyInfo.getTags().get(tag.getKey()); + assertNotNull(value); + assertEquals(tag.getValue(), value); + } + } + + @Test + public void testValidateAndUpdateCacheVolumeNotFound() throws Exception { + OMRequest modifiedOmRequest = + doPreExecute(volumeName, bucketName, keyName, getTags(2)); + + S3PutObjectTaggingRequest request = getPutObjectTaggingRequest(modifiedOmRequest); + + OMClientResponse omClientResponse = + request.validateAndUpdateCache(ozoneManager, 2L); + + assertEquals(OzoneManagerProtocolProtos.Status.VOLUME_NOT_FOUND, + omClientResponse.getOMResponse().getStatus()); + } + + @Test + public void testValidateAndUpdateCacheBucketNotFound() throws Exception { + OMRequestTestUtils.addVolumeToDB(volumeName, OzoneConsts.OZONE, omMetadataManager); + + OMRequest modifiedOmRequest = + doPreExecute(volumeName, bucketName, keyName, getTags(2)); + + S3PutObjectTaggingRequest request = getPutObjectTaggingRequest(modifiedOmRequest); + + OMClientResponse omClientResponse = + request.validateAndUpdateCache(ozoneManager, 2L); + + assertEquals(OzoneManagerProtocolProtos.Status.BUCKET_NOT_FOUND, + omClientResponse.getOMResponse().getStatus()); + } + + @Test + public void testValidateAndUpdateCacheKeyNotFound() throws Exception { + OMRequestTestUtils.addVolumeAndBucketToDB(volumeName, bucketName, omMetadataManager, getBucketLayout()); + + OMRequest modifiedOmRequest = + doPreExecute(volumeName, bucketName, keyName, getTags(2)); + + S3PutObjectTaggingRequest request = getPutObjectTaggingRequest(modifiedOmRequest); + + OMClientResponse omClientResponse = + request.validateAndUpdateCache(ozoneManager, 2L); + + assertEquals(OzoneManagerProtocolProtos.Status.KEY_NOT_FOUND, + omClientResponse.getOMResponse().getStatus()); + } + + @Test + public void testValidateAndUpdateCacheEmptyTagSet() throws Exception { + OMRequestTestUtils.addVolumeAndBucketToDB(volumeName, bucketName, omMetadataManager, getBucketLayout()); + String ozoneKey = addKeyToTable(); + + OmKeyInfo omKeyInfo = omMetadataManager.getKeyTable(getBucketLayout()).get(ozoneKey); + assertNotNull(omKeyInfo); + assertTrue(omKeyInfo.getTags().isEmpty()); + + Map tags = getTags(0); + + OMRequest originalRequest = createPutObjectTaggingRequest(volumeName, bucketName, keyName, tags); + + S3PutObjectTaggingRequest request = getPutObjectTaggingRequest(originalRequest); + + OMRequest modifiedRequest = request.preExecute(ozoneManager); + + request = getPutObjectTaggingRequest(modifiedRequest); + + OMClientResponse omClientResponse = + request.validateAndUpdateCache(ozoneManager, 1L); + OMResponse omResponse = omClientResponse.getOMResponse(); + + assertNotNull(omResponse.getPutObjectTaggingResponse()); + assertEquals(OzoneManagerProtocolProtos.Status.OK, omResponse.getStatus()); + assertEquals(Type.PutObjectTagging, omResponse.getCmdType()); + + OmKeyInfo updatedKeyInfo = omMetadataManager.getKeyTable(getBucketLayout()).get(ozoneKey); + assertEquals(omKeyInfo.getVolumeName(), updatedKeyInfo.getVolumeName()); + assertEquals(omKeyInfo.getBucketName(), updatedKeyInfo.getBucketName()); + assertEquals(omKeyInfo.getKeyName(), updatedKeyInfo.getKeyName()); + assertTrue(omKeyInfo.getTags().isEmpty()); + assertEquals(tags.size(), updatedKeyInfo.getTags().size()); + } + + + protected OMRequest doPreExecute(String volumeName, + String bucketName, + String keyName, + Map tags) throws Exception { + OMRequest originalRequest = createPutObjectTaggingRequest( + volumeName, bucketName, keyName, tags); + + S3PutObjectTaggingRequest request = getPutObjectTaggingRequest(originalRequest); + + OMRequest modifiedRequest = request.preExecute(ozoneManager); + verifyRequest(modifiedRequest, originalRequest); + + return modifiedRequest; + } + + private OMRequest createPutObjectTaggingRequest(String volumeName, + String bucketName, + String keyName, + Map tags) { + KeyArgs.Builder keyArgs = KeyArgs.newBuilder() + .setVolumeName(volumeName) + .setBucketName(bucketName) + .setKeyName(keyName); + + if (tags != null && !tags.isEmpty()) { + keyArgs.addAllTags(KeyValueUtil.toProtobuf(tags)); + } + + PutObjectTaggingRequest putObjectTaggingRequest = + PutObjectTaggingRequest.newBuilder() + .setKeyArgs(keyArgs) + .build(); + + return OMRequest.newBuilder() + .setPutObjectTaggingRequest(putObjectTaggingRequest) + .setCmdType(Type.PutObjectTagging) + .setClientId(UUID.randomUUID().toString()) + .build(); + } + + private void verifyRequest(OMRequest modifiedRequest, OMRequest originalRequest) { + + KeyArgs original = originalRequest.getPutObjectTaggingRequest().getKeyArgs(); + + KeyArgs updated = modifiedRequest.getPutObjectTaggingRequest().getKeyArgs(); + + assertEquals(original.getVolumeName(), updated.getVolumeName()); + assertEquals(original.getBucketName(), updated.getBucketName()); + assertEquals(original.getKeyName(), updated.getKeyName()); + assertEquals(original.getTagsList(), updated.getTagsList()); + // Modification time will not be set for object tagging request + assertFalse(updated.hasModificationTime()); + } + + protected String addKeyToTable() throws Exception { + OMRequestTestUtils.addKeyToTable(false, false, volumeName, bucketName, + keyName, clientID, RatisReplicationConfig.getInstance(HddsProtos.ReplicationFactor.ONE), 1L, + omMetadataManager); + + return omMetadataManager.getOzoneKey(volumeName, bucketName, + keyName); + } + + protected S3PutObjectTaggingRequest getPutObjectTaggingRequest(OMRequest originalRequest) { + return new S3PutObjectTaggingRequest(originalRequest, getBucketLayout()); + } + + protected Map getTags(int size) { + Map tags = new HashMap<>(); + for (int i = 0; i < size; i++) { + tags.put("tag-key-" + UUID.randomUUID(), "tag-value-" + UUID.randomUUID()); + } + return tags; + } +} diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/tagging/TestS3PutObjectTaggingRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/tagging/TestS3PutObjectTaggingRequestWithFSO.java new file mode 100644 index 00000000000..38ea5facad2 --- /dev/null +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/tagging/TestS3PutObjectTaggingRequestWithFSO.java @@ -0,0 +1,95 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + *

    + * http://www.apache.org/licenses/LICENSE-2.0 + *

    + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package org.apache.hadoop.ozone.om.request.s3.tagging; + +import org.apache.hadoop.hdds.client.RatisReplicationConfig; +import org.apache.hadoop.ozone.om.helpers.BucketLayout; +import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; +import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; +import org.apache.hadoop.ozone.om.response.OMClientResponse; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest; +import org.junit.jupiter.api.Test; + +import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.ONE; +import static org.junit.jupiter.api.Assertions.assertEquals; + +/** + * Test put object tagging request for FSO bucket. + */ +public class TestS3PutObjectTaggingRequestWithFSO extends TestS3PutObjectTaggingRequest { + + private static final String PARENT_DIR = "c/d/e"; + private static final String FILE_NAME = "file1"; + private static final String FILE_KEY = PARENT_DIR + "/" + FILE_NAME; + + @Test + public void testValidateAndUpdateCachePutObjectTaggingToDir() throws Exception { + OMRequestTestUtils.addVolumeAndBucketToDB(volumeName, bucketName, omMetadataManager, getBucketLayout()); + + addKeyToTable(); + + OMRequest modifiedOmRequest = + doPreExecute(volumeName, bucketName, PARENT_DIR, getTags(2)); + + S3PutObjectTaggingRequest request = getPutObjectTaggingRequest(modifiedOmRequest); + + OMClientResponse omClientResponse = + request.validateAndUpdateCache(ozoneManager, 2L); + + assertEquals(OzoneManagerProtocolProtos.Status.NOT_SUPPORTED_OPERATION, + omClientResponse.getOMResponse().getStatus()); + } + + @Override + protected String addKeyToTable() throws Exception { + keyName = FILE_KEY; // updated key name + + // Create parent dirs for the path + long parentId = OMRequestTestUtils.addParentsToDirTable(volumeName, + bucketName, PARENT_DIR, omMetadataManager); + + OmKeyInfo omKeyInfo = + OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, FILE_KEY, RatisReplicationConfig.getInstance(ONE)) + .setObjectID(parentId + 1L) + .setParentObjectID(parentId) + .setUpdateID(1L) + .build(); + OMRequestTestUtils.addFileToKeyTable(false, false, + FILE_NAME, omKeyInfo, -1, 50, omMetadataManager); + final long volumeId = omMetadataManager.getVolumeId( + omKeyInfo.getVolumeName()); + final long bucketId = omMetadataManager.getBucketId( + omKeyInfo.getVolumeName(), omKeyInfo.getBucketName()); + return omMetadataManager.getOzonePathKey( + volumeId, bucketId, omKeyInfo.getParentObjectID(), + omKeyInfo.getFileName()); + } + + @Override + public BucketLayout getBucketLayout() { + return BucketLayout.FILE_SYSTEM_OPTIMIZED; + } + + @Override + protected S3PutObjectTaggingRequest getPutObjectTaggingRequest(OMRequest originalRequest) { + return new S3PutObjectTaggingRequestWithFSO(originalRequest, getBucketLayout()); + } + + +} diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/snapshot/TestOMSnapshotCreateRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/snapshot/TestOMSnapshotCreateRequest.java index af904382256..b7b7ff0a464 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/snapshot/TestOMSnapshotCreateRequest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/snapshot/TestOMSnapshotCreateRequest.java @@ -18,15 +18,18 @@ package org.apache.hadoop.ozone.om.request.snapshot; +import org.apache.commons.lang3.tuple.Pair; import org.apache.hadoop.hdds.client.RatisReplicationConfig; import org.apache.hadoop.hdds.utils.TransactionInfo; import org.apache.hadoop.hdds.utils.db.Table; import org.apache.hadoop.ozone.om.OzoneManager; +import org.apache.hadoop.ozone.om.ResolvedBucket; import org.apache.hadoop.ozone.om.exceptions.OMException; import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.helpers.SnapshotInfo; +import org.apache.hadoop.ozone.om.request.OMClientRequest; import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; import org.apache.hadoop.ozone.om.response.OMClientResponse; import org.apache.hadoop.ozone.om.response.key.OMKeyRenameResponse; @@ -86,6 +89,29 @@ public void testPreExecute(String snapshotName) throws Exception { doPreExecute(omRequest); } + @ValueSource(strings = { + // '-' is allowed. + "9cdf0e8a-6946-41ad-a2d1-9eb724fab126", + // 3 chars name is allowed. + "sn1", + // less than or equal to 63 chars are allowed. + "snap75795657617173401188448010125899089001363595171500499231286" + }) + @ParameterizedTest + public void testPreExecuteWithLinkedBucket(String snapshotName) throws Exception { + when(getOzoneManager().isOwner(any(), any())).thenReturn(true); + String resolvedBucketName = getBucketName() + "1"; + String resolvedVolumeName = getVolumeName() + "1"; + when(getOzoneManager().resolveBucketLink(any(Pair.class), any(OMClientRequest.class))) + .thenAnswer(i -> new ResolvedBucket(i.getArgument(0), Pair.of(resolvedVolumeName, resolvedBucketName), + "owner", BucketLayout.FILE_SYSTEM_OPTIMIZED)); + OMRequest omRequest = createSnapshotRequest(getVolumeName(), + getBucketName(), snapshotName); + OMSnapshotCreateRequest omSnapshotCreateRequest = doPreExecute(omRequest); + assertEquals(resolvedVolumeName, omSnapshotCreateRequest.getOmRequest().getCreateSnapshotRequest().getVolumeName()); + assertEquals(resolvedBucketName, omSnapshotCreateRequest.getOmRequest().getCreateSnapshotRequest().getBucketName()); + } + @ValueSource(strings = { // ? is not allowed in snapshot name. "a?b", diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/snapshot/TestOMSnapshotDeleteRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/snapshot/TestOMSnapshotDeleteRequest.java index 4c5dc2e77f0..9e19e594843 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/snapshot/TestOMSnapshotDeleteRequest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/snapshot/TestOMSnapshotDeleteRequest.java @@ -19,10 +19,14 @@ package org.apache.hadoop.ozone.om.request.snapshot; +import org.apache.commons.lang3.tuple.Pair; import org.apache.hadoop.hdds.utils.db.cache.CacheKey; import org.apache.hadoop.hdds.utils.db.cache.CacheValue; +import org.apache.hadoop.ozone.om.ResolvedBucket; import org.apache.hadoop.ozone.om.exceptions.OMException; +import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.ozone.om.helpers.SnapshotInfo; +import org.apache.hadoop.ozone.om.request.OMClientRequest; import org.apache.hadoop.ozone.om.response.OMClientResponse; import org.apache.hadoop.ozone.om.snapshot.TestSnapshotRequestAndResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest; @@ -81,6 +85,29 @@ public void testPreExecute(String deleteSnapshotName) throws Exception { doPreExecute(omRequest); } + @ValueSource(strings = { + // '-' is allowed. + "9cdf0e8a-6946-41ad-a2d1-9eb724fab126", + // 3 chars name is allowed. + "sn1", + // less than or equal to 63 chars are allowed. + "snap75795657617173401188448010125899089001363595171500499231286" + }) + @ParameterizedTest + public void testPreExecuteWithLinkedBuckets(String deleteSnapshotName) throws Exception { + when(getOzoneManager().isOwner(any(), any())).thenReturn(true); + String resolvedBucketName = getBucketName() + "1"; + String resolvedVolumeName = getVolumeName() + "1"; + when(getOzoneManager().resolveBucketLink(any(Pair.class), any(OMClientRequest.class))) + .thenAnswer(i -> new ResolvedBucket(i.getArgument(0), Pair.of(resolvedVolumeName, resolvedBucketName), + "owner", BucketLayout.FILE_SYSTEM_OPTIMIZED)); + OMRequest omRequest = deleteSnapshotRequest(getVolumeName(), + getBucketName(), deleteSnapshotName); + OMSnapshotDeleteRequest omSnapshotDeleteRequest = doPreExecute(omRequest); + assertEquals(resolvedVolumeName, omSnapshotDeleteRequest.getOmRequest().getDeleteSnapshotRequest().getVolumeName()); + assertEquals(resolvedBucketName, omSnapshotDeleteRequest.getOmRequest().getDeleteSnapshotRequest().getBucketName()); + } + @ValueSource(strings = { // ? is not allowed in snapshot name. "a?b", diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/snapshot/TestOMSnapshotRenameRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/snapshot/TestOMSnapshotRenameRequest.java index a746597288a..8059c3ce501 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/snapshot/TestOMSnapshotRenameRequest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/snapshot/TestOMSnapshotRenameRequest.java @@ -16,14 +16,18 @@ */ package org.apache.hadoop.ozone.om.request.snapshot; +import org.apache.commons.lang3.tuple.Pair; import org.apache.hadoop.hdds.client.RatisReplicationConfig; import org.apache.hadoop.hdds.utils.db.cache.CacheKey; import org.apache.hadoop.hdds.utils.db.cache.CacheValue; import org.apache.hadoop.ozone.om.OzoneManager; +import org.apache.hadoop.ozone.om.ResolvedBucket; import org.apache.hadoop.ozone.om.exceptions.OMException; +import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.helpers.SnapshotInfo; +import org.apache.hadoop.ozone.om.request.OMClientRequest; import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; import org.apache.hadoop.ozone.om.response.OMClientResponse; import org.apache.hadoop.ozone.om.snapshot.TestSnapshotRequestAndResponse; @@ -83,6 +87,30 @@ public void testPreExecute(String toSnapshotName) throws Exception { doPreExecute(omRequest); } + @ValueSource(strings = { + // '-' is allowed. + "9cdf0e8a-6946-41ad-a2d1-9eb724fab126", + // 3 chars name is allowed. + "sn1", + // less than or equal to 63 chars are allowed. + "snap75795657617173401188448010125899089001363595171500499231286" + }) + @ParameterizedTest + public void testPreExecuteWithLinkedBucket(String toSnapshotName) throws Exception { + when(getOzoneManager().isOwner(any(), any())).thenReturn(true); + String resolvedBucketName = getBucketName() + "1"; + String resolvedVolumeName = getVolumeName() + "1"; + when(getOzoneManager().resolveBucketLink(any(Pair.class), any(OMClientRequest.class))) + .thenAnswer(i -> new ResolvedBucket(i.getArgument(0), Pair.of(resolvedVolumeName, resolvedBucketName), + "owner", BucketLayout.FILE_SYSTEM_OPTIMIZED)); + String currentSnapshotName = "current"; + OzoneManagerProtocolProtos.OMRequest omRequest = renameSnapshotRequest(getVolumeName(), + getBucketName(), currentSnapshotName, toSnapshotName); + OMSnapshotRenameRequest omSnapshotRenameRequest = doPreExecute(omRequest); + assertEquals(resolvedVolumeName, omSnapshotRenameRequest.getOmRequest().getRenameSnapshotRequest().getVolumeName()); + assertEquals(resolvedBucketName, omSnapshotRenameRequest.getOmRequest().getRenameSnapshotRequest().getBucketName()); + } + @ValueSource(strings = { // ? is not allowed in snapshot name. "a?b", diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/volume/TestOMVolumeRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/volume/TestOMVolumeRequest.java index 14f1438b78b..5e0d2db17c9 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/volume/TestOMVolumeRequest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/volume/TestOMVolumeRequest.java @@ -81,6 +81,7 @@ public void setup() throws Exception { auditLogger = mock(AuditLogger.class); when(ozoneManager.getAuditLogger()).thenReturn(auditLogger); doNothing().when(auditLogger).logWrite(any(AuditMessage.class)); + when(ozoneManager.getConfiguration()).thenReturn(ozoneConfiguration); } @AfterEach diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/tagging/TestS3DeleteObjectTaggingResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/tagging/TestS3DeleteObjectTaggingResponse.java new file mode 100644 index 00000000000..26daacf6f28 --- /dev/null +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/tagging/TestS3DeleteObjectTaggingResponse.java @@ -0,0 +1,89 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + *

    + * http://www.apache.org/licenses/LICENSE-2.0 + *

    + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package org.apache.hadoop.ozone.om.response.s3.tagging; + +import org.apache.hadoop.hdds.client.RatisReplicationConfig; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos; +import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; +import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; +import org.apache.hadoop.ozone.om.response.key.TestOMKeyResponse; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; +import org.junit.jupiter.api.Test; + +import java.io.IOException; +import java.util.HashMap; +import java.util.Map; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertNotSame; + +/** + * Test delete object tagging response. + */ +public class TestS3DeleteObjectTaggingResponse extends TestOMKeyResponse { + + @Test + public void testAddToBatch() throws Exception { + OzoneManagerProtocolProtos.OMResponse omResponse = + OzoneManagerProtocolProtos.OMResponse.newBuilder().setDeleteObjectTaggingResponse( + OzoneManagerProtocolProtos.DeleteObjectTaggingResponse.getDefaultInstance()) + .setStatus(OzoneManagerProtocolProtos.Status.OK) + .setCmdType(OzoneManagerProtocolProtos.Type.DeleteObjectTagging) + .build(); + + Map tags = new HashMap<>(); + tags.put("tag-key1", "tag-value1"); + tags.put("tag-key2", "tag-value2"); + + String ozoneKey = addKeyToTable(tags); + OmKeyInfo omKeyInfo = omMetadataManager.getKeyTable(getBucketLayout()).get(ozoneKey); + assertNotNull(omKeyInfo); + assertEquals(2, omKeyInfo.getTags().size()); + + omKeyInfo.getTags().clear(); + + S3DeleteObjectTaggingResponse deleteObjectTaggingResponse = getDeleteObjectTaggingResponse(omKeyInfo, omResponse); + + deleteObjectTaggingResponse.addToDBBatch(omMetadataManager, batchOperation); + + // Do manual commit and see whether addToBatch is successful or not. + omMetadataManager.getStore().commitBatchOperation(batchOperation); + + OmKeyInfo updatedOmKeyInfo = omMetadataManager.getKeyTable(getBucketLayout()).get(ozoneKey); + assertNotSame(omKeyInfo, updatedOmKeyInfo); + assertNotNull(updatedOmKeyInfo); + assertEquals(0, updatedOmKeyInfo.getTags().size()); + } + + protected String addKeyToTable(Map tags) throws Exception { + OmKeyInfo omKeyInfo = OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, keyName, + RatisReplicationConfig.getInstance(HddsProtos.ReplicationFactor.ONE)) + .addAllTags(tags) + .build(); + OMRequestTestUtils.addKeyToTable(false, false, omKeyInfo, + clientID, 1L, omMetadataManager); + return omMetadataManager.getOzoneKey(volumeName, bucketName, keyName); + } + + protected S3DeleteObjectTaggingResponse getDeleteObjectTaggingResponse(OmKeyInfo omKeyInfo, + OzoneManagerProtocolProtos.OMResponse omResponse) + throws IOException { + return new S3DeleteObjectTaggingResponse(omResponse, omKeyInfo); + } +} diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/tagging/TestS3DeleteObjectTaggingResponseWithFSO.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/tagging/TestS3DeleteObjectTaggingResponseWithFSO.java new file mode 100644 index 00000000000..923ff441e98 --- /dev/null +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/tagging/TestS3DeleteObjectTaggingResponseWithFSO.java @@ -0,0 +1,74 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + *

    + * http://www.apache.org/licenses/LICENSE-2.0 + *

    + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package org.apache.hadoop.ozone.om.response.s3.tagging; + +import org.apache.hadoop.hdds.client.RatisReplicationConfig; +import org.apache.hadoop.ozone.om.helpers.BucketLayout; +import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; +import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; + +import java.io.IOException; +import java.util.Map; + +import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.ONE; + +/** + * Test delete object tagging response for FSO bucket. + */ +public class TestS3DeleteObjectTaggingResponseWithFSO extends TestS3DeleteObjectTaggingResponse { + + @Override + public BucketLayout getBucketLayout() { + return BucketLayout.FILE_SYSTEM_OPTIMIZED; + } + + @Override + protected String addKeyToTable(Map tags) throws Exception { + // Add volume, bucket and key entries to OM DB. + OMRequestTestUtils.addVolumeAndBucketToDB(volumeName, bucketName, + omMetadataManager); + + // Create parent dirs for the path + long parentId = OMRequestTestUtils.addParentsToDirTable(volumeName, + bucketName, "", omMetadataManager); + + OmKeyInfo omKeyInfo = + OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, keyName, RatisReplicationConfig.getInstance(ONE)) + .setObjectID(parentId + 1) + .setParentObjectID(parentId) + .setUpdateID(1L) + .addAllTags(tags) + .build(); + OMRequestTestUtils.addFileToKeyTable(false, false, + keyName, omKeyInfo, -1, 50, omMetadataManager); + return omMetadataManager.getOzonePathKey( + omMetadataManager.getVolumeId(volumeName), + omMetadataManager.getBucketId(volumeName, bucketName), + omKeyInfo.getParentObjectID(), keyName); + } + + @Override + protected S3DeleteObjectTaggingResponse getDeleteObjectTaggingResponse(OmKeyInfo omKeyInfo, + OzoneManagerProtocolProtos.OMResponse omResponse) + throws IOException { + return new S3DeleteObjectTaggingResponseWithFSO(omResponse, omKeyInfo, + omMetadataManager.getVolumeId(volumeName), omBucketInfo.getObjectID()); + } + +} diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/tagging/TestS3PutObjectTaggingResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/tagging/TestS3PutObjectTaggingResponse.java new file mode 100644 index 00000000000..af6565a447f --- /dev/null +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/tagging/TestS3PutObjectTaggingResponse.java @@ -0,0 +1,88 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + *

    + * http://www.apache.org/licenses/LICENSE-2.0 + *

    + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package org.apache.hadoop.ozone.om.response.s3.tagging; + +import org.apache.hadoop.hdds.client.RatisReplicationConfig; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos; +import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; +import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; +import org.apache.hadoop.ozone.om.response.key.TestOMKeyResponse; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; +import org.junit.jupiter.api.Test; + +import java.io.IOException; +import java.util.HashMap; +import java.util.Map; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertNotSame; + +/** + * Test put object tagging response. + */ +public class TestS3PutObjectTaggingResponse extends TestOMKeyResponse { + + @Test + public void testAddToDBBatch() throws Exception { + OzoneManagerProtocolProtos.OMResponse omResponse = + OzoneManagerProtocolProtos.OMResponse.newBuilder().setPutObjectTaggingResponse( + OzoneManagerProtocolProtos.PutObjectTaggingResponse.getDefaultInstance()) + .setStatus(OzoneManagerProtocolProtos.Status.OK) + .setCmdType(OzoneManagerProtocolProtos.Type.PutObjectTagging) + .build(); + + String ozoneKey = addKeyToTable(); + OmKeyInfo omKeyInfo = omMetadataManager.getKeyTable(getBucketLayout()).get(ozoneKey); + assertNotNull(omKeyInfo); + assertEquals(0, omKeyInfo.getTags().size()); + + Map tags = new HashMap<>(); + tags.put("tag-key1", "tag-value1"); + tags.put("tag-key2", "tag-value2"); + + omKeyInfo.setTags(tags); + + S3PutObjectTaggingResponse putObjectTaggingResponse = getPutObjectTaggingResponse(omKeyInfo, omResponse); + + putObjectTaggingResponse.addToDBBatch(omMetadataManager, batchOperation); + + // Do manual commit and see whether addToBatch is successful or not. + omMetadataManager.getStore().commitBatchOperation(batchOperation); + + OmKeyInfo updatedOmKeyInfo = omMetadataManager.getKeyTable(getBucketLayout()).get(ozoneKey); + assertNotSame(omKeyInfo, updatedOmKeyInfo); + assertNotNull(updatedOmKeyInfo); + assertEquals(tags.size(), updatedOmKeyInfo.getTags().size()); + } + + protected String addKeyToTable() throws Exception { + OMRequestTestUtils.addKeyToTable(false, false, volumeName, bucketName, + keyName, clientID, RatisReplicationConfig.getInstance(HddsProtos.ReplicationFactor.ONE), + omMetadataManager); + + return omMetadataManager.getOzoneKey(volumeName, bucketName, + keyName); + } + + protected S3PutObjectTaggingResponse getPutObjectTaggingResponse(OmKeyInfo omKeyInfo, + OzoneManagerProtocolProtos.OMResponse omResponse) + throws IOException { + return new S3PutObjectTaggingResponse(omResponse, omKeyInfo); + } +} diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/tagging/TestS3PutObjectTaggingResponseWithFSO.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/tagging/TestS3PutObjectTaggingResponseWithFSO.java new file mode 100644 index 00000000000..1c93a527711 --- /dev/null +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/tagging/TestS3PutObjectTaggingResponseWithFSO.java @@ -0,0 +1,70 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + *

    + * http://www.apache.org/licenses/LICENSE-2.0 + *

    + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package org.apache.hadoop.ozone.om.response.s3.tagging; + +import org.apache.hadoop.hdds.client.RatisReplicationConfig; +import org.apache.hadoop.ozone.om.helpers.BucketLayout; +import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; +import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse; + +import java.io.IOException; + +import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.ONE; + +/** + * Test put object tagging response for FSO bucket. + */ +public class TestS3PutObjectTaggingResponseWithFSO extends TestS3PutObjectTaggingResponse { + + @Override + public BucketLayout getBucketLayout() { + return BucketLayout.FILE_SYSTEM_OPTIMIZED; + } + + @Override + protected String addKeyToTable() throws Exception { + // Add volume, bucket and key entries to OM DB. + OMRequestTestUtils.addVolumeAndBucketToDB(volumeName, bucketName, + omMetadataManager); + + // Create parent dirs for the path + long parentId = OMRequestTestUtils.addParentsToDirTable(volumeName, + bucketName, "", omMetadataManager); + + OmKeyInfo omKeyInfo = + OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, keyName, RatisReplicationConfig.getInstance(ONE)) + .setObjectID(parentId + 1) + .setParentObjectID(parentId) + .setUpdateID(1L) + .build(); + OMRequestTestUtils.addFileToKeyTable(false, false, + keyName, omKeyInfo, -1, 50, omMetadataManager); + return omMetadataManager.getOzonePathKey( + omMetadataManager.getVolumeId(volumeName), + omMetadataManager.getBucketId(volumeName, bucketName), + omKeyInfo.getParentObjectID(), keyName); + } + + @Override + protected S3PutObjectTaggingResponse getPutObjectTaggingResponse(OmKeyInfo omKeyInfo, OMResponse omResponse) + throws IOException { + return new S3PutObjectTaggingResponseWithFSO(omResponse, omKeyInfo, + omMetadataManager.getVolumeId(volumeName), omBucketInfo.getObjectID()); + } +} diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/volume/TestOMVolumeCreateResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/volume/TestOMVolumeCreateResponse.java index 9d8de4bbb20..075dad5ee03 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/volume/TestOMVolumeCreateResponse.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/volume/TestOMVolumeCreateResponse.java @@ -18,10 +18,8 @@ package org.apache.hadoop.ozone.om.response.volume; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.ozone.om.OMConfigKeys; +import org.apache.hadoop.hdds.utils.db.BatchOperation; import org.apache.hadoop.ozone.om.OMMetadataManager; -import org.apache.hadoop.ozone.om.OmMetadataManagerImpl; import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos @@ -31,13 +29,8 @@ import org.apache.hadoop.ozone.storage.proto. OzoneManagerStorageProtos.PersistedUserVolumeInfo; import org.apache.hadoop.util.Time; -import org.apache.hadoop.hdds.utils.db.BatchOperation; -import org.junit.jupiter.api.AfterEach; -import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; -import org.junit.jupiter.api.io.TempDir; -import java.nio.file.Path; import java.util.UUID; import static org.junit.jupiter.api.Assertions.assertEquals; @@ -45,33 +38,12 @@ /** * This class tests OMVolumeCreateResponse. */ -public class TestOMVolumeCreateResponse { - - @TempDir - private Path folder; - - private OMMetadataManager omMetadataManager; - private BatchOperation batchOperation; - - @BeforeEach - public void setup() throws Exception { - OzoneConfiguration ozoneConfiguration = new OzoneConfiguration(); - ozoneConfiguration.set(OMConfigKeys.OZONE_OM_DB_DIRS, - folder.toAbsolutePath().toString()); - omMetadataManager = new OmMetadataManagerImpl(ozoneConfiguration, null); - batchOperation = omMetadataManager.getStore().initBatchOperation(); - } - - @AfterEach - public void tearDown() { - if (batchOperation != null) { - batchOperation.close(); - } - } +public class TestOMVolumeCreateResponse extends TestOMVolumeResponse { @Test public void testAddToDBBatch() throws Exception { - + OMMetadataManager omMetadataManager = getOmMetadataManager(); + BatchOperation batchOperation = getBatchOperation(); String volumeName = UUID.randomUUID().toString(); String userName = "user1"; PersistedUserVolumeInfo volumeList = PersistedUserVolumeInfo.newBuilder() @@ -79,10 +51,10 @@ public void testAddToDBBatch() throws Exception { .addVolumeNames(volumeName).build(); OMResponse omResponse = OMResponse.newBuilder() - .setCmdType(OzoneManagerProtocolProtos.Type.CreateVolume) - .setStatus(OzoneManagerProtocolProtos.Status.OK) - .setSuccess(true) - .setCreateVolumeResponse(CreateVolumeResponse.getDefaultInstance()) + .setCmdType(OzoneManagerProtocolProtos.Type.CreateVolume) + .setStatus(OzoneManagerProtocolProtos.Status.OK) + .setSuccess(true) + .setCreateVolumeResponse(CreateVolumeResponse.getDefaultInstance()) .build(); OmVolumeArgs omVolumeArgs = OmVolumeArgs.newBuilder() @@ -109,7 +81,8 @@ public void testAddToDBBatch() throws Exception { @Test void testAddToDBBatchNoOp() throws Exception { - + OMMetadataManager omMetadataManager = getOmMetadataManager(); + BatchOperation batchOperation = getBatchOperation(); OMResponse omResponse = OMResponse.newBuilder() .setCmdType(OzoneManagerProtocolProtos.Type.CreateVolume) .setStatus(OzoneManagerProtocolProtos.Status.VOLUME_ALREADY_EXISTS) @@ -125,6 +98,4 @@ void testAddToDBBatchNoOp() throws Exception { assertEquals(0, omMetadataManager.countRowsInTable( omMetadataManager.getVolumeTable())); } - - } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/volume/TestOMVolumeDeleteResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/volume/TestOMVolumeDeleteResponse.java index 70dd23a7b04..e4b93881137 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/volume/TestOMVolumeDeleteResponse.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/volume/TestOMVolumeDeleteResponse.java @@ -18,10 +18,8 @@ package org.apache.hadoop.ozone.om.response.volume; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.ozone.om.OMConfigKeys; +import org.apache.hadoop.hdds.utils.db.BatchOperation; import org.apache.hadoop.ozone.om.OMMetadataManager; -import org.apache.hadoop.ozone.om.OmMetadataManagerImpl; import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos @@ -30,14 +28,9 @@ .OMResponse; import org.apache.hadoop.ozone.storage.proto.OzoneManagerStorageProtos.PersistedUserVolumeInfo; import org.apache.hadoop.util.Time; -import org.apache.hadoop.hdds.utils.db.BatchOperation; -import org.junit.jupiter.api.AfterEach; -import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; -import org.junit.jupiter.api.io.TempDir; import java.util.UUID; -import java.nio.file.Path; import static org.junit.jupiter.api.Assertions.assertDoesNotThrow; import static org.junit.jupiter.api.Assertions.assertNull; @@ -45,33 +38,12 @@ /** * This class tests OMVolumeCreateResponse. */ -public class TestOMVolumeDeleteResponse { - - @TempDir - private Path folder; - - private OMMetadataManager omMetadataManager; - private BatchOperation batchOperation; - - @BeforeEach - public void setup() throws Exception { - OzoneConfiguration ozoneConfiguration = new OzoneConfiguration(); - ozoneConfiguration.set(OMConfigKeys.OZONE_OM_DB_DIRS, - folder.toAbsolutePath().toString()); - omMetadataManager = new OmMetadataManagerImpl(ozoneConfiguration, null); - batchOperation = omMetadataManager.getStore().initBatchOperation(); - } - - @AfterEach - public void tearDown() { - if (batchOperation != null) { - batchOperation.close(); - } - } +public class TestOMVolumeDeleteResponse extends TestOMVolumeResponse { @Test public void testAddToDBBatch() throws Exception { - + OMMetadataManager omMetadataManager = getOmMetadataManager(); + BatchOperation batchOperation = getBatchOperation(); String volumeName = UUID.randomUUID().toString(); String userName = "user1"; PersistedUserVolumeInfo volumeList = PersistedUserVolumeInfo.newBuilder() @@ -95,7 +67,7 @@ public void testAddToDBBatch() throws Exception { // As we are deleting updated volume list should be empty. PersistedUserVolumeInfo updatedVolumeList = PersistedUserVolumeInfo.newBuilder() - .setObjectID(1).setUpdateID(1).build(); + .setObjectID(1).setUpdateID(1).build(); OMVolumeDeleteResponse omVolumeDeleteResponse = new OMVolumeDeleteResponse(omResponse, volumeName, userName, updatedVolumeList); @@ -107,7 +79,7 @@ public void testAddToDBBatch() throws Exception { omMetadataManager.getStore().commitBatchOperation(batchOperation); assertNull(omMetadataManager.getVolumeTable().get( - omMetadataManager.getVolumeKey(volumeName))); + omMetadataManager.getVolumeKey(volumeName))); assertNull(omMetadataManager.getUserTable().get( omMetadataManager.getUserKey(userName))); @@ -115,7 +87,8 @@ public void testAddToDBBatch() throws Exception { @Test public void testAddToDBBatchNoOp() { - + OMMetadataManager omMetadataManager = getOmMetadataManager(); + BatchOperation batchOperation = getBatchOperation(); OMResponse omResponse = OMResponse.newBuilder() .setCmdType(OzoneManagerProtocolProtos.Type.DeleteVolume) .setStatus(OzoneManagerProtocolProtos.Status.VOLUME_NOT_FOUND) @@ -127,5 +100,4 @@ public void testAddToDBBatchNoOp() { omResponse); assertDoesNotThrow(() -> omVolumeDeleteResponse.checkAndUpdateDB(omMetadataManager, batchOperation)); } - } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/volume/TestOMVolumeResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/volume/TestOMVolumeResponse.java new file mode 100644 index 00000000000..7edbaedf2dd --- /dev/null +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/volume/TestOMVolumeResponse.java @@ -0,0 +1,64 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

    + * http://www.apache.org/licenses/LICENSE-2.0 + *

    + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.om.response.volume; + +import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.utils.db.BatchOperation; +import org.apache.hadoop.ozone.om.OMConfigKeys; +import org.apache.hadoop.ozone.om.OMMetadataManager; +import org.apache.hadoop.ozone.om.OmMetadataManagerImpl; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.io.TempDir; + +import java.nio.file.Path; + +/** + * Base test class for OM volume response. + */ +public class TestOMVolumeResponse { + @TempDir + private Path folder; + + private OMMetadataManager omMetadataManager; + private BatchOperation batchOperation; + + @BeforeEach + public void setup() throws Exception { + OzoneConfiguration ozoneConfiguration = new OzoneConfiguration(); + ozoneConfiguration.set(OMConfigKeys.OZONE_OM_DB_DIRS, + folder.toAbsolutePath().toString()); + omMetadataManager = new OmMetadataManagerImpl(ozoneConfiguration, null); + batchOperation = omMetadataManager.getStore().initBatchOperation(); + } + + @AfterEach + public void tearDown() { + if (batchOperation != null) { + batchOperation.close(); + } + } + + protected OMMetadataManager getOmMetadataManager() { + return omMetadataManager; + } + protected BatchOperation getBatchOperation() { + return batchOperation; + } +} diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/volume/TestOMVolumeSetOwnerResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/volume/TestOMVolumeSetOwnerResponse.java index aa640067ca4..00da2029c1e 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/volume/TestOMVolumeSetOwnerResponse.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/volume/TestOMVolumeSetOwnerResponse.java @@ -18,10 +18,8 @@ package org.apache.hadoop.ozone.om.response.volume; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.ozone.om.OMConfigKeys; +import org.apache.hadoop.hdds.utils.db.BatchOperation; import org.apache.hadoop.ozone.om.OMMetadataManager; -import org.apache.hadoop.ozone.om.OmMetadataManagerImpl; import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos @@ -30,14 +28,9 @@ .OMResponse; import org.apache.hadoop.ozone.storage.proto.OzoneManagerStorageProtos.PersistedUserVolumeInfo; import org.apache.hadoop.util.Time; -import org.apache.hadoop.hdds.utils.db.BatchOperation; import org.apache.hadoop.hdds.utils.db.Table; -import org.junit.jupiter.api.AfterEach; -import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; -import org.junit.jupiter.api.io.TempDir; -import java.nio.file.Path; import java.util.UUID; import static org.junit.jupiter.api.Assertions.assertEquals; @@ -45,34 +38,12 @@ /** * This class tests OMVolumeCreateResponse. */ -public class TestOMVolumeSetOwnerResponse { - - @TempDir - private Path folder; - - private OMMetadataManager omMetadataManager; - private BatchOperation batchOperation; - - @BeforeEach - public void setup() throws Exception { - OzoneConfiguration ozoneConfiguration = new OzoneConfiguration(); - ozoneConfiguration.set(OMConfigKeys.OZONE_OM_DB_DIRS, - folder.toAbsolutePath().toString()); - omMetadataManager = new OmMetadataManagerImpl(ozoneConfiguration, null); - batchOperation = omMetadataManager.getStore().initBatchOperation(); - } - - @AfterEach - public void tearDown() { - if (batchOperation != null) { - batchOperation.close(); - } - } - +public class TestOMVolumeSetOwnerResponse extends TestOMVolumeResponse { @Test public void testAddToDBBatch() throws Exception { - + OMMetadataManager omMetadataManager = getOmMetadataManager(); + BatchOperation batchOperation = getBatchOperation(); String volumeName = UUID.randomUUID().toString(); String oldOwner = "user1"; PersistedUserVolumeInfo volumeList = PersistedUserVolumeInfo.newBuilder() @@ -94,25 +65,24 @@ public void testAddToDBBatch() throws Exception { new OMVolumeCreateResponse(omResponse, omVolumeArgs, volumeList); - String newOwner = "user2"; PersistedUserVolumeInfo newOwnerVolumeList = PersistedUserVolumeInfo.newBuilder() - .setObjectID(1) - .setUpdateID(1) - .addVolumeNames(volumeName).build(); + .setObjectID(1) + .setUpdateID(1) + .addVolumeNames(volumeName).build(); PersistedUserVolumeInfo oldOwnerVolumeList = PersistedUserVolumeInfo.newBuilder() - .setObjectID(2) - .setUpdateID(2) - .build(); + .setObjectID(2) + .setUpdateID(2) + .build(); OmVolumeArgs newOwnerVolumeArgs = OmVolumeArgs.newBuilder() .setOwnerName(newOwner).setAdminName(newOwner) .setVolume(volumeName).setCreationTime(omVolumeArgs.getCreationTime()) .build(); OMVolumeSetOwnerResponse omVolumeSetOwnerResponse = - new OMVolumeSetOwnerResponse(omResponse, oldOwner, oldOwnerVolumeList, + new OMVolumeSetOwnerResponse(omResponse, oldOwner, oldOwnerVolumeList, newOwnerVolumeList, newOwnerVolumeArgs); omVolumeCreateResponse.addToDBBatch(omMetadataManager, batchOperation); @@ -139,7 +109,8 @@ public void testAddToDBBatch() throws Exception { @Test void testAddToDBBatchNoOp() throws Exception { - + OMMetadataManager omMetadataManager = getOmMetadataManager(); + BatchOperation batchOperation = getBatchOperation(); OMResponse omResponse = OMResponse.newBuilder() .setCmdType(OzoneManagerProtocolProtos.Type.SetVolumeProperty) .setStatus(OzoneManagerProtocolProtos.Status.VOLUME_NOT_FOUND) @@ -155,6 +126,4 @@ void testAddToDBBatchNoOp() throws Exception { assertEquals(0, omMetadataManager.countRowsInTable( omMetadataManager.getVolumeTable())); } - - } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/volume/TestOMVolumeSetQuotaResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/volume/TestOMVolumeSetQuotaResponse.java index fbc8e3c944d..c33e9d174a9 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/volume/TestOMVolumeSetQuotaResponse.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/volume/TestOMVolumeSetQuotaResponse.java @@ -18,10 +18,8 @@ package org.apache.hadoop.ozone.om.response.volume; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.ozone.om.OMConfigKeys; +import org.apache.hadoop.hdds.utils.db.BatchOperation; import org.apache.hadoop.ozone.om.OMMetadataManager; -import org.apache.hadoop.ozone.om.OmMetadataManagerImpl; import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos @@ -29,14 +27,9 @@ import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos .OMResponse; import org.apache.hadoop.util.Time; -import org.apache.hadoop.hdds.utils.db.BatchOperation; import org.apache.hadoop.hdds.utils.db.Table; -import org.junit.jupiter.api.AfterEach; -import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; -import org.junit.jupiter.api.io.TempDir; -import java.nio.file.Path; import java.util.UUID; import static org.junit.jupiter.api.Assertions.assertEquals; @@ -44,34 +37,12 @@ /** * This class tests OMVolumeCreateResponse. */ -public class TestOMVolumeSetQuotaResponse { - - @TempDir - private Path folder; - - private OMMetadataManager omMetadataManager; - private BatchOperation batchOperation; - - @BeforeEach - public void setup() throws Exception { - OzoneConfiguration ozoneConfiguration = new OzoneConfiguration(); - ozoneConfiguration.set(OMConfigKeys.OZONE_OM_DB_DIRS, - folder.toAbsolutePath().toString()); - omMetadataManager = new OmMetadataManagerImpl(ozoneConfiguration, null); - batchOperation = omMetadataManager.getStore().initBatchOperation(); - } - - @AfterEach - public void tearDown() { - if (batchOperation != null) { - batchOperation.close(); - } - } - +public class TestOMVolumeSetQuotaResponse extends TestOMVolumeResponse { @Test public void testAddToDBBatch() throws Exception { - + OMMetadataManager omMetadataManager = getOmMetadataManager(); + BatchOperation batchOperation = getBatchOperation(); String volumeName = UUID.randomUUID().toString(); String userName = "user1"; @@ -107,7 +78,8 @@ public void testAddToDBBatch() throws Exception { @Test void testAddToDBBatchNoOp() throws Exception { - + OMMetadataManager omMetadataManager = getOmMetadataManager(); + BatchOperation batchOperation = getBatchOperation(); OMResponse omResponse = OMResponse.newBuilder() .setCmdType(OzoneManagerProtocolProtos.Type.CreateVolume) .setStatus(OzoneManagerProtocolProtos.Status.VOLUME_NOT_FOUND) @@ -123,6 +95,4 @@ void testAddToDBBatchNoOp() throws Exception { assertEquals(0, omMetadataManager.countRowsInTable( omMetadataManager.getVolumeTable())); } - - } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/service/TestDirectoryDeletingService.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/service/TestDirectoryDeletingService.java index 8dcb030d637..681b24b8e42 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/service/TestDirectoryDeletingService.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/service/TestDirectoryDeletingService.java @@ -50,6 +50,10 @@ import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.ONE; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_DIR_DELETING_SERVICE_INTERVAL; +import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_PATH_DELETING_LIMIT_PER_TASK_DEFAULT; +import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_PATH_DELETING_LIMIT_PER_TASK; +import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_THREAD_NUMBER_DIR_DELETION_DEFAULT; +import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_THREAD_NUMBER_DIR_DELETION; import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertTrue; @@ -161,4 +165,60 @@ public void testDeleteDirectoryCrossingSizeLimit() throws Exception { 500, 60000); assertThat(dirDeletingService.getRunCount().get()).isGreaterThanOrEqualTo(1); } + + @Test + public void testDeleteDirectoryFlatDirsHavingNoChilds() throws Exception { + OzoneConfiguration conf = createConfAndInitValues(); + OmTestManagers omTestManagers + = new OmTestManagers(conf); + KeyManager keyManager = omTestManagers.getKeyManager(); + writeClient = omTestManagers.getWriteClient(); + om = omTestManagers.getOzoneManager(); + + OMRequestTestUtils.addVolumeAndBucketToDB(volumeName, bucketName, + om.getMetadataManager(), BucketLayout.FILE_SYSTEM_OPTIMIZED); + String bucketKey = om.getMetadataManager().getBucketKey(volumeName, bucketName); + OmBucketInfo bucketInfo = om.getMetadataManager().getBucketTable().get(bucketKey); + + int dirCreatesCount = OZONE_PATH_DELETING_LIMIT_PER_TASK_DEFAULT * 2 + 100; + long parentId = 1; + OmDirectoryInfo baseDir = new OmDirectoryInfo.Builder().setName("dir_base") + .setCreationTime(Time.now()).setModificationTime(Time.now()) + .setObjectID(parentId).setParentObjectID(bucketInfo.getObjectID()) + .setUpdateID(0).build(); + OMRequestTestUtils.addDirKeyToDirTable(true, baseDir, volumeName, bucketName, + 1L, om.getMetadataManager()); + for (int i = 0; i < dirCreatesCount; ++i) { + OmDirectoryInfo dir1 = new OmDirectoryInfo.Builder().setName("dir" + i) + .setCreationTime(Time.now()).setModificationTime(Time.now()).setParentObjectID(parentId) + .setObjectID(i + 100).setUpdateID(i).build(); + OMRequestTestUtils.addDirKeyToDirTable(true, dir1, volumeName, bucketName, + 1L, om.getMetadataManager()); + } + + DirectoryDeletingService dirDeletingService = keyManager.getDirDeletingService(); + long[] delDirCnt = new long[2]; + delDirCnt[0] = dirDeletingService.getDeletedDirsCount(); + + OmKeyArgs delArgs = new OmKeyArgs.Builder() + .setVolumeName(volumeName).setBucketName(bucketName).setKeyName("dir_base") + .setReplicationConfig(StandaloneReplicationConfig.getInstance(ONE)) + .setDataSize(0).setRecursive(true).build(); + writeClient.deleteKey(delArgs); + int pathDelLimit = conf.getInt(OZONE_PATH_DELETING_LIMIT_PER_TASK, + OZONE_PATH_DELETING_LIMIT_PER_TASK_DEFAULT); + int numThread = conf.getInt(OZONE_THREAD_NUMBER_DIR_DELETION, + OZONE_THREAD_NUMBER_DIR_DELETION_DEFAULT); + + // check if difference between each run should not cross the directory deletion limit + // and wait till all dir is removed + GenericTestUtils.waitFor(() -> { + delDirCnt[1] = dirDeletingService.getDeletedDirsCount(); + assertTrue( + delDirCnt[1] - delDirCnt[0] <= ((long) pathDelLimit * numThread), + "base: " + delDirCnt[0] + ", new: " + delDirCnt[1]); + delDirCnt[0] = delDirCnt[1]; + return dirDeletingService.getDeletedDirsCount() >= dirCreatesCount; + }, 500, 300000); + } } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/service/TestOpenKeyCleanupService.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/service/TestOpenKeyCleanupService.java index 014865f919f..ab22b353bd7 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/service/TestOpenKeyCleanupService.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/service/TestOpenKeyCleanupService.java @@ -166,8 +166,7 @@ public void testCleanupExpiredOpenKeys( // wait for submitted tasks to complete Thread.sleep(SERVICE_INTERVAL); final long oldkeyCount = openKeyCleanupService.getSubmittedOpenKeyCount(); - final long oldrunCount = openKeyCleanupService.getRunCount(); - LOG.info("oldkeyCount={}, oldrunCount={}", oldkeyCount, oldrunCount); + LOG.info("oldkeyCount={}", oldkeyCount); final OMMetrics metrics = om.getMetrics(); long numKeyHSyncs = metrics.getNumKeyHSyncs(); @@ -189,9 +188,6 @@ public void testCleanupExpiredOpenKeys( GenericTestUtils.waitFor( () -> openKeyCleanupService.getSubmittedOpenKeyCount() >= oldkeyCount + keyCount, SERVICE_INTERVAL, WAIT_TIME); - GenericTestUtils.waitFor( - () -> openKeyCleanupService.getRunCount() >= oldrunCount + 2, - SERVICE_INTERVAL, WAIT_TIME); waitForOpenKeyCleanup(false, BucketLayout.DEFAULT); waitForOpenKeyCleanup(hsync, BucketLayout.FILE_SYSTEM_OPTIMIZED); @@ -332,8 +328,7 @@ public void testExcludeMPUOpenKeys( // wait for submitted tasks to complete Thread.sleep(SERVICE_INTERVAL); final long oldkeyCount = openKeyCleanupService.getSubmittedOpenKeyCount(); - final long oldrunCount = openKeyCleanupService.getRunCount(); - LOG.info("oldMpuKeyCount={}, oldMpuRunCount={}", oldkeyCount, oldrunCount); + LOG.info("oldMpuKeyCount={}", oldkeyCount); final OMMetrics metrics = om.getMetrics(); long numKeyHSyncs = metrics.getNumKeyHSyncs(); @@ -353,13 +348,8 @@ public void testExcludeMPUOpenKeys( BucketLayout.FILE_SYSTEM_OPTIMIZED); openKeyCleanupService.resume(); - - GenericTestUtils.waitFor( - () -> openKeyCleanupService.getRunCount() >= oldrunCount + 2, - SERVICE_INTERVAL, WAIT_TIME); - - // wait for requests to complete - Thread.sleep(SERVICE_INTERVAL); + // wait for openKeyCleanupService to complete at least once + Thread.sleep(SERVICE_INTERVAL * 2); // No expired open keys fetched assertEquals(openKeyCleanupService.getSubmittedOpenKeyCount(), oldkeyCount); @@ -397,8 +387,7 @@ public void testCleanupExpiredOpenMPUPartKeys( // wait for submitted tasks to complete Thread.sleep(SERVICE_INTERVAL); final long oldkeyCount = openKeyCleanupService.getSubmittedOpenKeyCount(); - final long oldrunCount = openKeyCleanupService.getRunCount(); - LOG.info("oldMpuKeyCount={}, oldMpuRunCount={}", oldkeyCount, oldrunCount); + LOG.info("oldMpuKeyCount={},", oldkeyCount); final OMMetrics metrics = om.getMetrics(); long numOpenKeysCleaned = metrics.getNumOpenKeysCleaned(); @@ -423,9 +412,6 @@ public void testCleanupExpiredOpenMPUPartKeys( GenericTestUtils.waitFor( () -> openKeyCleanupService.getSubmittedOpenKeyCount() >= oldkeyCount + partCount, SERVICE_INTERVAL, WAIT_TIME); - GenericTestUtils.waitFor( - () -> openKeyCleanupService.getRunCount() >= oldrunCount + 2, - SERVICE_INTERVAL, WAIT_TIME); // No expired MPU parts fetched waitForOpenKeyCleanup(false, BucketLayout.DEFAULT); diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotDiffManager.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotDiffManager.java index 0f2ab615066..037e54d0008 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotDiffManager.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotDiffManager.java @@ -69,6 +69,7 @@ import org.apache.ratis.util.TimeDuration; import jakarta.annotation.Nonnull; import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; @@ -82,6 +83,7 @@ import org.mockito.Mock; import org.mockito.MockedConstruction; import org.mockito.MockedStatic; +import org.mockito.Mockito; import org.mockito.junit.jupiter.MockitoExtension; import org.mockito.junit.jupiter.MockitoSettings; import org.mockito.quality.Strictness; @@ -92,7 +94,6 @@ import org.rocksdb.RocksIterator; import java.io.File; -import java.io.FileNotFoundException; import java.io.IOException; import java.util.ArrayList; import java.util.Arrays; @@ -411,7 +412,7 @@ private SnapshotInfo getMockedSnapshotInfo(UUID snapshotId) { } @ParameterizedTest - @ValueSource(ints = {1, 2, 5, 10, 100, 1000, 10000}) + @ValueSource(ints = {0, 1, 2, 5, 10, 100, 1000, 10000}) public void testGetDeltaFilesWithDag(int numberOfFiles) throws IOException { UUID snap1 = UUID.randomUUID(); UUID snap2 = UUID.randomUUID(); @@ -429,7 +430,7 @@ public void testGetDeltaFilesWithDag(int numberOfFiles) throws IOException { any(DifferSnapshotInfo.class), any(DifferSnapshotInfo.class), eq(diffDir)) - ).thenReturn(Lists.newArrayList(randomStrings)); + ).thenReturn(Optional.of(Lists.newArrayList(randomStrings))); ReferenceCounted rcFromSnapshot = omSnapshotManager.getActiveSnapshot(VOLUME_NAME, BUCKET_NAME, snap1.toString()); @@ -441,14 +442,20 @@ public void testGetDeltaFilesWithDag(int numberOfFiles) throws IOException { SnapshotInfo fromSnapshotInfo = getMockedSnapshotInfo(snap1); SnapshotInfo toSnapshotInfo = getMockedSnapshotInfo(snap2); when(jobTableIterator.isValid()).thenReturn(false); - Set deltaFiles = snapshotDiffManager.getDeltaFiles( - fromSnapshot, - toSnapshot, - Arrays.asList("cf1", "cf2"), fromSnapshotInfo, - toSnapshotInfo, false, - Collections.emptyMap(), diffDir); - assertEquals(randomStrings, deltaFiles); - + try (MockedStatic mockedRdbUtil = Mockito.mockStatic(RdbUtil.class, Mockito.CALLS_REAL_METHODS); + MockedStatic mockedRocksDiffUtils = Mockito.mockStatic(RocksDiffUtils.class, + Mockito.CALLS_REAL_METHODS)) { + mockedRdbUtil.when(() -> RdbUtil.getSSTFilesForComparison(any(), any())) + .thenReturn(Collections.singleton(RandomStringUtils.randomAlphabetic(10))); + mockedRocksDiffUtils.when(() -> RocksDiffUtils.filterRelevantSstFiles(any(), any())).thenAnswer(i -> null); + Set deltaFiles = snapshotDiffManager.getDeltaFiles( + fromSnapshot, + toSnapshot, + Arrays.asList("cf1", "cf2"), fromSnapshotInfo, + toSnapshotInfo, false, + Collections.emptyMap(), diffDir); + assertEquals(randomStrings, deltaFiles); + } rcFromSnapshot.close(); rcToSnapshot.close(); } @@ -476,7 +483,8 @@ public void testGetDeltaFilesWithFullDiff(int numberOfFiles, }); mockedRocksDiffUtils.when(() -> - RocksDiffUtils.filterRelevantSstFiles(anySet(), anyMap())) + RocksDiffUtils.filterRelevantSstFiles(anySet(), anyMap(), anyMap(), any(ManagedRocksDB.class), + any(ManagedRocksDB.class))) .thenAnswer((Answer) invocationOnMock -> { invocationOnMock.getArgument(0, Set.class).stream() .findAny().ifPresent(val -> { @@ -497,7 +505,7 @@ public void testGetDeltaFilesWithFullDiff(int numberOfFiles, any(DifferSnapshotInfo.class), any(DifferSnapshotInfo.class), anyString())) - .thenReturn(Collections.emptyList()); + .thenReturn(Optional.ofNullable(Collections.emptyList())); } ReferenceCounted rcFromSnapshot = @@ -543,7 +551,8 @@ public void testGetDeltaFilesWithDifferThrowException(int numberOfFiles) }); mockedRocksDiffUtils.when(() -> - RocksDiffUtils.filterRelevantSstFiles(anySet(), anyMap())) + RocksDiffUtils.filterRelevantSstFiles(anySet(), anyMap(), anyMap(), any(ManagedRocksDB.class), + any(ManagedRocksDB.class))) .thenAnswer((Answer) invocationOnMock -> { invocationOnMock.getArgument(0, Set.class).stream() .findAny().ifPresent(val -> { @@ -560,7 +569,7 @@ public void testGetDeltaFilesWithDifferThrowException(int numberOfFiles) when(snapshotInfoTable.get(SnapshotInfo.getTableKey(VOLUME_NAME, BUCKET_NAME, snap2.toString()))) .thenReturn(getSnapshotInfoInstance(VOLUME_NAME, BUCKET_NAME, snap2.toString(), snap2)); - doThrow(new FileNotFoundException("File not found exception.")) + doThrow(new RuntimeException("File not found exception.")) .when(differ) .getSSTDiffListWithFullPath( any(DifferSnapshotInfo.class), @@ -1511,6 +1520,27 @@ private void setupMocksForRunningASnapDiff( when(bucketInfoTable.get(bucketKey)).thenReturn(bucketInfo); } + @Test + public void testGetDeltaFilesWithFullDiff() throws IOException { + SnapshotDiffManager spy = spy(snapshotDiffManager); + OmSnapshot fromSnapshot = getMockedOmSnapshot(UUID.randomUUID()); + OmSnapshot toSnapshot = getMockedOmSnapshot(UUID.randomUUID()); + Mockito.doAnswer(invocation -> { + OmSnapshot snapshot = invocation.getArgument(0); + if (snapshot == fromSnapshot) { + return Sets.newHashSet("1", "2", "3"); + } + if (snapshot == toSnapshot) { + return Sets.newHashSet("3", "4", "5"); + } + return Sets.newHashSet("6", "7", "8"); + }).when(spy).getSSTFileListForSnapshot(Mockito.any(OmSnapshot.class), + Mockito.anyList()); + Set deltaFiles = spy.getDeltaFiles(fromSnapshot, toSnapshot, Collections.emptyList(), snapshotInfo, + snapshotInfo, true, Collections.emptyMap(), null); + Assertions.assertEquals(Sets.newHashSet("1", "2", "3", "4", "5"), deltaFiles); + } + @Test public void testGetSnapshotDiffReportHappyCase() throws Exception { SnapshotInfo fromSnapInfo = snapshotInfo; diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotRequestAndResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotRequestAndResponse.java index e60e23de22a..b037b68fd72 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotRequestAndResponse.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotRequestAndResponse.java @@ -32,9 +32,12 @@ import org.apache.hadoop.ozone.om.OmMetadataManagerImpl; import org.apache.hadoop.ozone.om.OmSnapshotManager; import org.apache.hadoop.ozone.om.OzoneManager; +import org.apache.hadoop.ozone.om.ResolvedBucket; +import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup; import org.apache.hadoop.ozone.om.helpers.SnapshotInfo; +import org.apache.hadoop.ozone.om.request.OMClientRequest; import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; import org.apache.hadoop.ozone.om.request.snapshot.OMSnapshotCreateRequest; import org.apache.hadoop.ozone.om.request.snapshot.TestOMSnapshotCreateRequest; @@ -140,6 +143,9 @@ public void baseSetup() throws Exception { omMetadataManager = new OmMetadataManagerImpl(ozoneConfiguration, ozoneManager); when(ozoneManager.getConfiguration()).thenReturn(ozoneConfiguration); + when(ozoneManager.resolveBucketLink(any(Pair.class), any(OMClientRequest.class))) + .thenAnswer(i -> new ResolvedBucket(i.getArgument(0), + i.getArgument(0), "dummyBucketOwner", BucketLayout.FILE_SYSTEM_OPTIMIZED)); when(ozoneManager.getMetrics()).thenReturn(omMetrics); when(ozoneManager.getMetadataManager()).thenReturn(omMetadataManager); when(ozoneManager.isRatisEnabled()).thenReturn(true); diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/protocolPB/TestOzoneManagerRequestHandler.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/protocolPB/TestOzoneManagerRequestHandler.java new file mode 100644 index 00000000000..996cab08277 --- /dev/null +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/protocolPB/TestOzoneManagerRequestHandler.java @@ -0,0 +1,175 @@ +package org.apache.hadoop.ozone.protocolPB; + +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos; +import org.apache.hadoop.ozone.om.OzoneManager; +import org.apache.hadoop.ozone.om.helpers.BasicOmKeyInfo; +import org.apache.hadoop.ozone.om.helpers.ListKeysLightResult; +import org.apache.hadoop.ozone.om.helpers.ListKeysResult; +import org.apache.hadoop.ozone.om.helpers.OmKeyArgs; +import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; +import org.apache.hadoop.ozone.om.helpers.OzoneFileStatus; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; +import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.ValueSource; +import org.mockito.Mockito; + +import java.io.IOException; +import java.util.Arrays; +import java.util.List; +import java.util.stream.Collectors; +import java.util.stream.IntStream; + +import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_SERVER_LIST_MAX_SIZE; + +/** + * Test class to test out OzoneManagerRequestHandler. + */ +public class TestOzoneManagerRequestHandler { + + + private OzoneManagerRequestHandler getRequestHandler(int limitListKeySize) { + OzoneConfiguration conf = new OzoneConfiguration(); + conf.setInt(OZONE_OM_SERVER_LIST_MAX_SIZE, limitListKeySize); + OzoneManager ozoneManager = Mockito.mock(OzoneManager.class); + Mockito.when(ozoneManager.getConfiguration()).thenReturn(conf); + return new OzoneManagerRequestHandler(ozoneManager); + } + + private OmKeyInfo getMockedOmKeyInfo() { + OmKeyInfo keyInfo = Mockito.mock(OmKeyInfo.class); + OzoneManagerProtocolProtos.KeyInfo info = + OzoneManagerProtocolProtos.KeyInfo.newBuilder().setBucketName("bucket").setKeyName("key").setVolumeName( + "volume").setDataSize(0).setType(HddsProtos.ReplicationType.RATIS).setCreationTime(0) + .setModificationTime(0).build(); + Mockito.when(keyInfo.getProtobuf(Mockito.anyBoolean(), Mockito.anyInt())).thenReturn(info); + Mockito.when(keyInfo.getProtobuf(Mockito.anyInt())).thenReturn(info); + return keyInfo; + } + + private BasicOmKeyInfo getMockedBasicOmKeyInfo() { + BasicOmKeyInfo keyInfo = Mockito.mock(BasicOmKeyInfo.class); + Mockito.when(keyInfo.getProtobuf()).thenReturn( + OzoneManagerProtocolProtos.BasicKeyInfo.newBuilder().setKeyName("key").setDataSize(0) + .setType(HddsProtos.ReplicationType.RATIS).setCreationTime(0).setModificationTime(0) + .build()); + return keyInfo; + } + + private OzoneFileStatus getMockedOzoneFileStatus() { + return new OzoneFileStatus(getMockedOmKeyInfo(), 256, false); + } + + private void mockOmRequest(OzoneManagerProtocolProtos.OMRequest request, + OzoneManagerProtocolProtos.Type cmdType, + int requestSize) { + Mockito.when(request.getTraceID()).thenReturn("traceId"); + Mockito.when(request.getCmdType()).thenReturn(cmdType); + switch (cmdType) { + case ListKeysLight: + case ListKeys: + Mockito.when(request.getListKeysRequest()).thenReturn(OzoneManagerProtocolProtos.ListKeysRequest.newBuilder() + .setCount(requestSize).setBucketName("bucket").setVolumeName("volume").setPrefix("").setStartKey("") + .build()); + break; + case ListStatus: + Mockito.when(request.getListStatusRequest()).thenReturn(OzoneManagerProtocolProtos.ListStatusRequest.newBuilder() + .setNumEntries(requestSize).setKeyArgs(OzoneManagerProtocolProtos.KeyArgs.newBuilder().setBucketName( + "bucket").setVolumeName("volume").setKeyName("keyName") + .setLatestVersionLocation(true).setHeadOp(true)).setRecursive(true).setStartKey("") + .build()); + break; + default: + break; + } + } + + @ParameterizedTest + @ValueSource(ints = {0, 9, 10, 11, 50}) + public void testListKeysResponseSize(int resultSize) throws IOException { + List keyInfos = IntStream.range(0, resultSize).mapToObj(i -> getMockedOmKeyInfo()).collect( + Collectors.toList()); + OzoneManagerRequestHandler requestHandler = getRequestHandler(10); + OzoneManager ozoneManager = requestHandler.getOzoneManager(); + Mockito.when(ozoneManager.listKeys(Mockito.anyString(), Mockito.anyString(), Mockito.anyString(), + Mockito.anyString(), Mockito.anyInt())).thenAnswer(i -> { + int maxSize = Math.max(Math.min(resultSize, i.getArgument(4)), 0); + return new ListKeysResult(keyInfos.isEmpty() ? keyInfos : keyInfos.subList(0, maxSize), + maxSize < resultSize); + }); + OzoneManagerProtocolProtos.OMRequest request = Mockito.mock(OzoneManagerProtocolProtos.OMRequest.class); + for (int requestSize : Arrays.asList(0, resultSize - 1, resultSize, resultSize + 1, Integer.MAX_VALUE)) { + mockOmRequest(request, OzoneManagerProtocolProtos.Type.ListKeys, requestSize); + OzoneManagerProtocolProtos.OMResponse omResponse = requestHandler.handleReadRequest(request); + int expectedSize = Math.max(Math.min(Math.min(10, requestSize), resultSize), 0); + Assertions.assertEquals(expectedSize, omResponse.getListKeysResponse().getKeyInfoList().size()); + Assertions.assertEquals(expectedSize < resultSize, omResponse.getListKeysResponse().getIsTruncated()); + } + } + + @ParameterizedTest + @ValueSource(ints = {0, 9, 10, 11, 50}) + public void testListLightKeysResponseSize(int resultSize) throws IOException { + List keyInfos = IntStream.range(0, resultSize).mapToObj(i -> getMockedBasicOmKeyInfo()).collect( + Collectors.toList()); + OzoneManagerRequestHandler requestHandler = getRequestHandler(10); + OzoneManager ozoneManager = requestHandler.getOzoneManager(); + Mockito.when(ozoneManager.listKeysLight(Mockito.anyString(), Mockito.anyString(), Mockito.anyString(), + Mockito.anyString(), Mockito.anyInt())).thenAnswer(i -> { + int maxSize = Math.max(Math.min(resultSize, i.getArgument(4)), 0); + return new ListKeysLightResult(keyInfos.isEmpty() ? keyInfos : keyInfos.subList(0, maxSize), + maxSize < resultSize); + }); + OzoneManagerProtocolProtos.OMRequest request = Mockito.mock(OzoneManagerProtocolProtos.OMRequest.class); + for (int requestSize : Arrays.asList(0, resultSize - 1, resultSize, resultSize + 1, Integer.MAX_VALUE)) { + mockOmRequest(request, OzoneManagerProtocolProtos.Type.ListKeysLight, requestSize); + OzoneManagerProtocolProtos.OMResponse omResponse = requestHandler.handleReadRequest(request); + int expectedSize = Math.max(Math.min(Math.min(10, requestSize), resultSize), 0); + Assertions.assertEquals(expectedSize, omResponse.getListKeysLightResponse().getBasicKeyInfoList().size()); + Assertions.assertEquals(expectedSize < resultSize, + omResponse.getListKeysLightResponse().getIsTruncated()); + } + } + + @ParameterizedTest + @ValueSource(ints = {0, 9, 10, 11, 50}) + public void testListStatusResponseSize(int resultSize) throws IOException { + List statusList = IntStream.range(0, resultSize).mapToObj(i -> getMockedOzoneFileStatus()) + .collect(Collectors.toList()); + OzoneManagerRequestHandler requestHandler = getRequestHandler(10); + OzoneManager ozoneManager = requestHandler.getOzoneManager(); + Mockito.when(ozoneManager.listStatus(Mockito.any(OmKeyArgs.class), Mockito.anyBoolean(), Mockito.anyString(), + Mockito.anyLong(), Mockito.anyBoolean())).thenAnswer(i -> { + long maxSize = i.getArgument(3); + maxSize = Math.max(Math.min(resultSize, maxSize), 0); + return statusList.isEmpty() ? statusList : statusList.subList(0, (int) maxSize); + }); + OzoneManagerProtocolProtos.OMRequest request = Mockito.mock(OzoneManagerProtocolProtos.OMRequest.class); + for (int requestSize : Arrays.asList(0, resultSize - 1, resultSize, resultSize + 1, Integer.MAX_VALUE)) { + mockOmRequest(request, OzoneManagerProtocolProtos.Type.ListStatus, requestSize); + OzoneManagerProtocolProtos.OMResponse omResponse = requestHandler.handleReadRequest(request); + int expectedSize = Math.max(Math.min(Math.min(10, requestSize), resultSize), 0); + Assertions.assertEquals(expectedSize, omResponse.getListStatusResponse().getStatusesList().size()); + } + } +} diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/SecretKeyTestClient.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/SecretKeyTestClient.java new file mode 100644 index 00000000000..32ef5988e10 --- /dev/null +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/SecretKeyTestClient.java @@ -0,0 +1,73 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + *

    + * http://www.apache.org/licenses/LICENSE-2.0 + *

    + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package org.apache.hadoop.ozone.security; + +import org.apache.hadoop.hdds.security.symmetric.ManagedSecretKey; +import org.apache.hadoop.hdds.security.symmetric.SecretKeyClient; + +import javax.crypto.KeyGenerator; +import javax.crypto.SecretKey; +import java.security.NoSuchAlgorithmException; +import java.time.Duration; +import java.time.Instant; +import java.util.HashMap; +import java.util.Map; +import java.util.UUID; + +/** + * Test implementation of {@link SecretKeyClient}. + */ +public class SecretKeyTestClient implements SecretKeyClient { + private final Map keysMap = new HashMap<>(); + private ManagedSecretKey current; + + public SecretKeyTestClient() { + rotate(); + } + + public void rotate() { + this.current = generateKey(); + keysMap.put(current.getId(), current); + } + + @Override + public ManagedSecretKey getCurrentSecretKey() { + return current; + } + + @Override + public ManagedSecretKey getSecretKey(UUID id) { + return keysMap.get(id); + } + + private ManagedSecretKey generateKey() { + KeyGenerator keyGen = null; + try { + keyGen = KeyGenerator.getInstance("HmacSHA256"); + } catch (NoSuchAlgorithmException e) { + throw new RuntimeException("Should never happen", e); + } + SecretKey secretKey = keyGen.generateKey(); + return new ManagedSecretKey( + UUID.randomUUID(), + Instant.now(), + Instant.now().plus(Duration.ofHours(1)), + secretKey + ); + } +} diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/TestOzoneDelegationTokenSecretManager.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/TestOzoneDelegationTokenSecretManager.java index d94f59b8fb8..c0fdb7a8c21 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/TestOzoneDelegationTokenSecretManager.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/TestOzoneDelegationTokenSecretManager.java @@ -24,7 +24,6 @@ import java.security.KeyPair; import java.security.PrivateKey; import java.security.PublicKey; -import java.security.Signature; import java.security.cert.CertPath; import java.security.cert.CertificateFactory; import java.security.cert.X509Certificate; @@ -35,6 +34,8 @@ import com.google.common.collect.ImmutableList; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.security.SecurityConfig; +import org.apache.hadoop.hdds.security.symmetric.ManagedSecretKey; +import org.apache.hadoop.hdds.security.symmetric.SecretKeyClient; import org.apache.hadoop.hdds.security.x509.certificate.utils.CertificateCodec; import org.apache.hadoop.hdds.server.ServerUtils; import org.apache.hadoop.io.Text; @@ -50,6 +51,8 @@ import org.apache.hadoop.ozone.om.exceptions.OMLeaderNotReadyException; import org.apache.hadoop.ozone.om.exceptions.OMNotLeaderException; import org.apache.hadoop.ozone.om.helpers.S3SecretValue; +import org.apache.hadoop.ozone.om.upgrade.OMLayoutVersionManager; +import org.apache.hadoop.ozone.upgrade.LayoutFeature; import org.apache.hadoop.security.AccessControlException; import org.apache.hadoop.security.ssl.KeyStoreTestUtil; import org.apache.hadoop.security.token.SecretManager; @@ -64,11 +67,14 @@ import static org.junit.jupiter.api.Assertions.assertNull; import static org.junit.jupiter.api.Assertions.assertThrows; import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.mockito.ArgumentMatchers.any; import static org.mockito.Mockito.doNothing; import static org.mockito.Mockito.doThrow; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; +import org.slf4j.event.Level; +import org.apache.ozone.test.GenericTestUtils; import org.apache.ratis.protocol.RaftPeerId; import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.BeforeEach; @@ -87,6 +93,7 @@ public class TestOzoneDelegationTokenSecretManager { private OzoneDelegationTokenSecretManager secretManager; private SecurityConfig securityConfig; private OMCertificateClient certificateClient; + private SecretKeyClient secretKeyClient; private long expiryTime; private Text serviceRpcAdd; private OzoneConfiguration conf; @@ -102,6 +109,7 @@ public void setUp() throws Exception { securityConfig = new SecurityConfig(conf); certificateClient = setupCertificateClient(); certificateClient.init(); + secretKeyClient = new SecretKeyTestClient(); expiryTime = Time.monotonicNow() + 60 * 60 * 24; serviceRpcAdd = new Text("localhost"); final Map s3Secrets = new HashMap<>(); @@ -112,6 +120,9 @@ public void setUp() throws Exception { om = mock(OzoneManager.class); OMMetadataManager metadataManager = new OmMetadataManagerImpl(conf, om); when(om.getMetadataManager()).thenReturn(metadataManager); + OMLayoutVersionManager versionManager = mock(OMLayoutVersionManager.class); + when(versionManager.isAllowed(any(LayoutFeature.class))).thenReturn(true); + when(om.getVersionManager()).thenReturn(versionManager); s3SecretManager = new S3SecretLockedManager( new S3SecretManagerImpl(new S3SecretStoreMap(s3Secrets), mock(S3SecretCache.class)), @@ -368,12 +379,28 @@ public void testVerifySignatureSuccess() throws Exception { expiryTime, TOKEN_REMOVER_SCAN_INTERVAL); secretManager.start(certificateClient); OzoneTokenIdentifier id = new OzoneTokenIdentifier(); + id.setMaxDate(Time.now() + 60 * 60 * 24); + id.setOwner(new Text("test")); + id.setSecretKeyId(secretKeyClient.getCurrentSecretKey().getId().toString()); + assertTrue(secretManager.verifySignature(id, secretKeyClient.getCurrentSecretKey().sign(id.getBytes()))); + } + + @Test + public void testVerifyAsymmetricSignatureSuccess() throws Exception { + GenericTestUtils.setLogLevel(OzoneDelegationTokenSecretManager.LOG, Level.DEBUG); + GenericTestUtils.LogCapturer logCapturer = + GenericTestUtils.LogCapturer.captureLogs(OzoneDelegationTokenSecretManager.LOG); + secretManager = createSecretManager(conf, TOKEN_MAX_LIFETIME, + expiryTime, TOKEN_REMOVER_SCAN_INTERVAL); + secretManager.start(certificateClient); + OzoneTokenIdentifier id = new OzoneTokenIdentifier(); id.setOmCertSerialId(certificateClient.getCertificate() .getSerialNumber().toString()); id.setMaxDate(Time.now() + 60 * 60 * 24); id.setOwner(new Text("test")); - assertTrue(secretManager.verifySignature(id, - certificateClient.signData(id.getBytes()))); + assertTrue(secretManager.verifySignature(id, certificateClient.signData(id.getBytes()))); + assertTrue(logCapturer.getOutput().contains("Verify an asymmetric key signed Token")); + logCapturer.stopCapturing(); } @Test @@ -461,12 +488,9 @@ public void testValidateS3AUTHINFOFailure() throws Exception { * Validate hash using public key of KeyPair. */ private void validateHash(byte[] hash, byte[] identifier) throws Exception { - Signature rsaSignature = - Signature.getInstance(securityConfig.getSignatureAlgo(), - securityConfig.getProvider()); - rsaSignature.initVerify(certificateClient.getPublicKey()); - rsaSignature.update(identifier); - assertTrue(rsaSignature.verify(hash)); + OzoneTokenIdentifier ozoneTokenIdentifier = OzoneTokenIdentifier.readProtoBuf(identifier); + ManagedSecretKey verifyKey = secretKeyClient.getSecretKey(UUID.fromString(ozoneTokenIdentifier.getSecretKeyId())); + verifyKey.isValidSignature(identifier, hash); } /** @@ -485,6 +509,7 @@ private void validateHash(byte[] hash, byte[] identifier) throws Exception { .setS3SecretManager(s3SecretManager) .setCertificateClient(certificateClient) .setOmServiceId(OzoneConsts.OM_SERVICE_ID_DEFAULT) + .setSecretKeyClient(secretKeyClient) .build(); } } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/acl/TestOzoneNativeAuthorizer.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/acl/TestOzoneNativeAuthorizer.java index c354864a529..ab773f6d718 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/acl/TestOzoneNativeAuthorizer.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/acl/TestOzoneNativeAuthorizer.java @@ -169,8 +169,7 @@ private void createKey(String volume, .setReplicationConfig(StandaloneReplicationConfig .getInstance(HddsProtos.ReplicationFactor.ONE)) .setDataSize(0) - .setAcls(OzoneAclUtil.getAclList(testUgi.getUserName(), - testUgi.getGroupNames(), ALL, ALL)) + .setAcls(OzoneAclUtil.getAclList(testUgi, ALL, ALL)) .setOwnerName(UserGroupInformation.getCurrentUser().getShortUserName()) .build(); diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/acl/TestParentAcl.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/acl/TestParentAcl.java index c3ec7843a6f..a6e6f13ae34 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/acl/TestParentAcl.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/acl/TestParentAcl.java @@ -344,8 +344,7 @@ private OzoneObjInfo createKey(String volume, String bucket, String keyName) HddsProtos.ReplicationFactor.ONE)) .setDataSize(0) // here we give test ugi full access - .setAcls(OzoneAclUtil.getAclList(testUgi.getUserName(), - testUgi.getGroupNames(), ALL, ALL)) + .setAcls(OzoneAclUtil.getAclList(testUgi, ALL, ALL)) .setOwnerName(UserGroupInformation.getCurrentUser().getShortUserName()) .build(); diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/acl/TestVolumeOwner.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/acl/TestVolumeOwner.java index 7c1aad0723b..98e7ce7be85 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/acl/TestVolumeOwner.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/acl/TestVolumeOwner.java @@ -142,11 +142,9 @@ private static void prepareTestKeys() throws IOException { UserGroupInformation.getCurrentUser().getShortUserName()) .setDataSize(0); if (k == 0) { - keyArgsBuilder.setAcls(OzoneAclUtil.getAclList( - testUgi.getUserName(), testUgi.getGroupNames(), ALL, ALL)); + keyArgsBuilder.setAcls(OzoneAclUtil.getAclList(testUgi, ALL, ALL)); } else { - keyArgsBuilder.setAcls(OzoneAclUtil.getAclList( - testUgi.getUserName(), testUgi.getGroupNames(), NONE, NONE)); + keyArgsBuilder.setAcls(OzoneAclUtil.getAclList(testUgi, NONE, NONE)); } OmKeyArgs keyArgs = keyArgsBuilder.build(); OpenKeySession keySession = writeClient.createFile(keyArgs, true, diff --git a/hadoop-ozone/ozonefs-common/pom.xml b/hadoop-ozone/ozonefs-common/pom.xml index 6132f9bc125..18839deaee5 100644 --- a/hadoop-ozone/ozonefs-common/pom.xml +++ b/hadoop-ozone/ozonefs-common/pom.xml @@ -19,12 +19,12 @@ org.apache.ozone ozone - 1.5.0-SNAPSHOT + 2.0.0-SNAPSHOT ozone-filesystem-common Apache Ozone FileSystem Common jar - 1.5.0-SNAPSHOT + 2.0.0-SNAPSHOT UTF-8 true @@ -112,35 +112,10 @@ - - org.apache.ozone - hdds-container-service - test - org.apache.ozone hdds-hadoop-dependency-test test - - org.apache.ozone - hdds-server-framework - test - - - org.apache.ozone - hdds-server-scm - test - - - org.apache.ozone - hdds-test-utils - test - - - org.apache.ozone - ozone-manager - test - diff --git a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicOzFs.java b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicOzFs.java index acab6d168cc..9f7551aa8f0 100644 --- a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicOzFs.java +++ b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicOzFs.java @@ -46,4 +46,13 @@ public BasicOzFs(URI theUri, Configuration conf) public int getUriDefaultPort() { return -1; } + + /** + * Close the file system; the FileContext API doesn't have an explicit close. + */ + @Override + protected void finalize() throws Throwable { + fsImpl.close(); + super.finalize(); + } } diff --git a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneClientAdapterImpl.java b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneClientAdapterImpl.java index df8ece03486..689e340ff5d 100644 --- a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneClientAdapterImpl.java +++ b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneClientAdapterImpl.java @@ -29,6 +29,8 @@ import java.util.List; import com.google.common.annotations.VisibleForTesting; +import org.apache.commons.collections.CollectionUtils; +import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.crypto.key.KeyProvider; import org.apache.hadoop.fs.BlockLocation; import org.apache.hadoop.fs.FileAlreadyExistsException; @@ -42,7 +44,6 @@ import org.apache.hadoop.hdds.client.StandaloneReplicationConfig; import org.apache.hadoop.hdds.conf.ConfigurationSource; import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; import org.apache.hadoop.hdds.scm.OzoneClientConfig; import org.apache.hadoop.hdds.scm.XceiverClientFactory; @@ -68,21 +69,23 @@ import org.apache.hadoop.ozone.client.rpc.RpcClient; import org.apache.hadoop.ozone.container.common.helpers.BlockData; import org.apache.hadoop.ozone.om.exceptions.OMException; +import org.apache.hadoop.ozone.om.helpers.BasicOmKeyInfo; +import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.ozone.om.helpers.LeaseKeyInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyArgs; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup; -import org.apache.hadoop.ozone.om.helpers.BucketLayout; -import org.apache.hadoop.ozone.om.helpers.OzoneFileStatus; import org.apache.hadoop.ozone.om.helpers.OzoneFSUtils; +import org.apache.hadoop.ozone.om.helpers.OzoneFileStatus; +import org.apache.hadoop.ozone.om.helpers.OzoneFileStatusLight; import org.apache.hadoop.ozone.security.OzoneTokenIdentifier; import org.apache.hadoop.ozone.snapshot.SnapshotDiffReportOzone; import org.apache.hadoop.ozone.snapshot.SnapshotDiffResponse; import org.apache.hadoop.security.token.Token; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; -import org.apache.commons.collections.CollectionUtils; -import org.apache.commons.lang3.StringUtils; import static org.apache.hadoop.ozone.OzoneConsts.OZONE_URI_DELIMITER; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.DIRECTORY_NOT_FOUND; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.FILE_NOT_FOUND; @@ -90,9 +93,6 @@ import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.NOT_A_FILE; import static org.apache.hadoop.ozone.snapshot.SnapshotDiffResponse.JobStatus.DONE; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - /** * Basic Implementation of the OzoneFileSystem calls. *

    @@ -193,18 +193,24 @@ public BasicOzoneClientAdapterImpl(String omHost, int omPort, OzoneClientFactory.getRpcClient(conf); } objectStore = ozoneClient.getObjectStore(); - this.volume = objectStore.getVolume(volumeStr); - this.bucket = volume.getBucket(bucketStr); - bucketReplicationConfig = this.bucket.getReplicationConfig(); - nextReplicationConfigRefreshTime = - clock.millis() + bucketRepConfigRefreshPeriodMS; + try { + this.volume = objectStore.getVolume(volumeStr); + this.bucket = volume.getBucket(bucketStr); + bucketReplicationConfig = this.bucket.getReplicationConfig(); + nextReplicationConfigRefreshTime = clock.millis() + bucketRepConfigRefreshPeriodMS; - // resolve the bucket layout in case of Link Bucket - BucketLayout resolvedBucketLayout = - OzoneClientUtils.resolveLinkBucketLayout(bucket, objectStore, - new HashSet<>()); + // resolve the bucket layout in case of Link Bucket + BucketLayout resolvedBucketLayout = + OzoneClientUtils.resolveLinkBucketLayout(bucket, objectStore, new HashSet<>()); - OzoneFSUtils.validateBucketLayout(bucket.getName(), resolvedBucketLayout); + OzoneFSUtils.validateBucketLayout(bucket.getName(), resolvedBucketLayout); + } catch (IOException | RuntimeException exception) { + // in case of exception, the adapter object will not be + // initialised making the client object unreachable, close the client + // to release resources in this case and rethrow. + ozoneClient.close(); + throw exception; + } this.configuredDnPort = conf.getInt( OzoneConfigKeys.HDDS_CONTAINER_IPC_PORT, @@ -432,15 +438,22 @@ public Iterator listKeys(String pathKey) throws IOException { @Override public List listStatus(String keyName, boolean recursive, String startKey, long numEntries, URI uri, - Path workingDir, String username) throws IOException { + Path workingDir, String username, boolean lite) throws IOException { try { incrementCounter(Statistic.OBJECTS_LIST, 1); - List statuses = bucket - .listStatus(keyName, recursive, startKey, numEntries); - List result = new ArrayList<>(); - for (OzoneFileStatus status : statuses) { - result.add(toFileStatusAdapter(status, username, uri, workingDir)); + if (lite) { + List statuses = bucket + .listStatusLight(keyName, recursive, startKey, numEntries); + for (OzoneFileStatusLight status : statuses) { + result.add(toFileStatusAdapter(status, username, uri, workingDir)); + } + } else { + List statuses = bucket + .listStatus(keyName, recursive, startKey, numEntries); + for (OzoneFileStatus status : statuses) { + result.add(toFileStatusAdapter(status, username, uri, workingDir)); + } } return result; } catch (OMException e) { @@ -545,6 +558,31 @@ private FileStatusAdapter toFileStatusAdapter(OzoneFileStatus status, ); } + private FileStatusAdapter toFileStatusAdapter(OzoneFileStatusLight status, + String owner, URI defaultUri, Path workingDir) { + BasicOmKeyInfo keyInfo = status.getKeyInfo(); + short replication = (short) keyInfo.getReplicationConfig() + .getRequiredNodes(); + return new FileStatusAdapter( + keyInfo.getDataSize(), + keyInfo.getReplicatedSize(), + new Path(OZONE_URI_DELIMITER + keyInfo.getKeyName()) + .makeQualified(defaultUri, workingDir), + status.isDirectory(), + replication, + status.getBlockSize(), + keyInfo.getModificationTime(), + keyInfo.getModificationTime(), + status.isDirectory() ? (short) 00777 : (short) 00666, + StringUtils.defaultIfEmpty(keyInfo.getOwnerName(), owner), + owner, + null, + getBlockLocations(null), + false, + OzoneClientUtils.isKeyErasureCode(keyInfo) + ); + } + /** * Helper method to get List of BlockLocation from OM Key info. * @param fileStatus Ozone key file status. @@ -581,16 +619,15 @@ private BlockLocation[] getBlockLocations(OzoneFileStatus fileStatus) { omKeyLocationInfo.getPipeline().getNodes() .forEach(dn -> { hostList.add(dn.getHostName()); - int port = dn.getPort( - DatanodeDetails.Port.Name.STANDALONE).getValue(); + int port = dn.getStandalonePort().getValue(); if (port == 0) { port = configuredDnPort; } nameList.add(dn.getHostName() + ":" + port); }); - String[] hosts = hostList.toArray(new String[hostList.size()]); - String[] names = nameList.toArray(new String[nameList.size()]); + String[] hosts = hostList.toArray(new String[0]); + String[] names = nameList.toArray(new String[0]); BlockLocation blockLocation = new BlockLocation( names, hosts, offsetOfBlockInFile, omKeyLocationInfo.getLength()); diff --git a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneFileSystem.java b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneFileSystem.java index ed5574af32b..ed8d99d67fa 100644 --- a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneFileSystem.java +++ b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneFileSystem.java @@ -18,6 +18,7 @@ package org.apache.hadoop.fs.ozone; +import com.google.common.base.Function; import com.google.common.base.Preconditions; import org.apache.hadoop.conf.Configuration; @@ -684,28 +685,30 @@ public FileStatus[] listStatus(Path f) throws IOException { LinkedList statuses = new LinkedList<>(); List tmpStatusList; String startKey = ""; - + int entriesAdded; do { tmpStatusList = adapter.listStatus(pathToKey(f), false, startKey, numEntries, uri, - workingDir, getUsername()) + workingDir, getUsername(), true) .stream() .map(this::convertFileStatus) .collect(Collectors.toList()); - + entriesAdded = 0; if (!tmpStatusList.isEmpty()) { if (startKey.isEmpty() || !statuses.getLast().getPath().toString() .equals(tmpStatusList.get(0).getPath().toString())) { statuses.addAll(tmpStatusList); + entriesAdded += tmpStatusList.size(); } else { statuses.addAll(tmpStatusList.subList(1, tmpStatusList.size())); + entriesAdded += tmpStatusList.size() - 1; } startKey = pathToKey(statuses.getLast().getPath()); } // listStatus returns entries numEntries in size if available. // Any lesser number of entries indicate that the required entries have // exhausted. - } while (tmpStatusList.size() == numEntries); + } while (entriesAdded > 0); return statuses.toArray(new FileStatus[0]); @@ -947,13 +950,15 @@ public RemoteIterator listFiles(Path f, boolean recursive) public RemoteIterator listLocatedStatus(Path f) throws IOException { incrementCounter(Statistic.INVOCATION_LIST_LOCATED_STATUS); - return super.listLocatedStatus(f); + return new OzoneFileStatusIterator<>(f, + (stat) -> stat instanceof LocatedFileStatus ? (LocatedFileStatus) stat : new LocatedFileStatus(stat, null), + false); } @Override public RemoteIterator listStatusIterator(Path f) throws IOException { - return new OzoneFileStatusIterator<>(f); + return new OzoneFileStatusIterator<>(f, stat -> stat, true); } @Override @@ -986,7 +991,6 @@ public void setTimes(Path f, long mtime, long atime) throws IOException { String key = pathToKey(qualifiedPath); adapter.setTimes(key, mtime, atime); } - /** * A private class implementation for iterating list of file status. * @@ -999,18 +1003,24 @@ private final class OzoneFileStatusIterator private Path p; private T curStat = null; private String startPath = ""; + private boolean lite; + private Function transformFunc; /** * Constructor to initialize OzoneFileStatusIterator. * Get the first batch of entry for iteration. * * @param p path to file/directory. + * @param transformFunc function to convert FileStatus into an expected type. + * @param lite if true it should look into fetching a lightweight keys from server. * @throws IOException */ - private OzoneFileStatusIterator(Path p) throws IOException { + private OzoneFileStatusIterator(Path p, Function transformFunc, boolean lite) throws IOException { this.p = p; + this.lite = lite; + this.transformFunc = transformFunc; // fetch the first batch of entries in the directory - thisListing = listFileStatus(p, startPath); + thisListing = listFileStatus(p, startPath, lite); if (thisListing != null && !thisListing.isEmpty()) { startPath = pathToKey( thisListing.get(thisListing.size() - 1).getPath()); @@ -1029,7 +1039,7 @@ public boolean hasNext() throws IOException { while (curStat == null && hasNextNoFilter()) { T next; FileStatus fileStat = thisListing.get(i++); - next = (T) (fileStat); + next = this.transformFunc.apply(fileStat); curStat = next; } return curStat != null; @@ -1047,10 +1057,9 @@ private boolean hasNextNoFilter() throws IOException { return false; } if (i >= thisListing.size()) { - if (startPath != null && (thisListing.size() == listingPageSize || - thisListing.size() == listingPageSize - 1)) { + if (startPath != null && (!thisListing.isEmpty())) { // current listing is exhausted & fetch a new listing - thisListing = listFileStatus(p, startPath); + thisListing = listFileStatus(p, startPath, lite); if (thisListing != null && !thisListing.isEmpty()) { startPath = pathToKey( thisListing.get(thisListing.size() - 1).getPath()); @@ -1085,10 +1094,11 @@ public T next() throws IOException { * * @param f * @param startPath + * @param lite if true return lightweight keys * @return list of file status. * @throws IOException */ - private List listFileStatus(Path f, String startPath) + private List listFileStatus(Path f, String startPath, boolean lite) throws IOException { incrementCounter(Statistic.INVOCATION_LIST_STATUS, 1); statistics.incrementReadOps(1); @@ -1096,7 +1106,7 @@ private List listFileStatus(Path f, String startPath) List statusList; statusList = adapter.listStatus(pathToKey(f), false, startPath, - listingPageSize, uri, workingDir, getUsername()) + listingPageSize, uri, workingDir, getUsername(), lite) .stream() .map(this::convertFileStatus) .collect(Collectors.toList()); diff --git a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicRootedOzoneClientAdapterImpl.java b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicRootedOzoneClientAdapterImpl.java index 31889ed2a58..9896ab722de 100644 --- a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicRootedOzoneClientAdapterImpl.java +++ b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicRootedOzoneClientAdapterImpl.java @@ -32,6 +32,7 @@ import com.google.common.base.Preconditions; import org.apache.commons.collections.CollectionUtils; +import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.crypto.key.KeyProvider; import org.apache.hadoop.fs.BlockLocation; import org.apache.hadoop.fs.FileAlreadyExistsException; @@ -49,7 +50,6 @@ import org.apache.hadoop.hdds.client.StandaloneReplicationConfig; import org.apache.hadoop.hdds.conf.ConfigurationSource; import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.scm.OzoneClientConfig; @@ -65,50 +65,48 @@ import org.apache.hadoop.ozone.OmUtils; import org.apache.hadoop.ozone.OzoneConfigKeys; import org.apache.hadoop.ozone.OzoneFsServerDefaults; +import org.apache.hadoop.ozone.client.BucketArgs; import org.apache.hadoop.ozone.client.ObjectStore; import org.apache.hadoop.ozone.client.OzoneBucket; import org.apache.hadoop.ozone.client.OzoneClient; import org.apache.hadoop.ozone.client.OzoneClientFactory; import org.apache.hadoop.ozone.client.OzoneKey; -import org.apache.hadoop.ozone.client.OzoneVolume; import org.apache.hadoop.ozone.client.OzoneSnapshot; +import org.apache.hadoop.ozone.client.OzoneVolume; import org.apache.hadoop.ozone.client.io.OzoneDataStreamOutput; import org.apache.hadoop.ozone.client.io.OzoneOutputStream; import org.apache.hadoop.ozone.client.protocol.ClientProtocol; -import org.apache.hadoop.ozone.client.BucketArgs; import org.apache.hadoop.ozone.client.rpc.RpcClient; import org.apache.hadoop.ozone.container.common.helpers.BlockData; import org.apache.hadoop.ozone.om.exceptions.OMException; +import org.apache.hadoop.ozone.om.helpers.BasicOmKeyInfo; import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.ozone.om.helpers.LeaseKeyInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyArgs; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup; -import org.apache.hadoop.ozone.om.helpers.OzoneFileStatus; import org.apache.hadoop.ozone.om.helpers.OzoneFSUtils; +import org.apache.hadoop.ozone.om.helpers.OzoneFileStatus; +import org.apache.hadoop.ozone.om.helpers.OzoneFileStatusLight; import org.apache.hadoop.ozone.om.helpers.SnapshotInfo; import org.apache.hadoop.ozone.security.OzoneTokenIdentifier; import org.apache.hadoop.ozone.snapshot.SnapshotDiffReportOzone; import org.apache.hadoop.ozone.snapshot.SnapshotDiffResponse; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.token.Token; - -import org.apache.commons.lang3.StringUtils; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import static org.apache.hadoop.ozone.OzoneConsts.OM_SNAPSHOT_INDICATOR; import static org.apache.hadoop.ozone.OzoneConsts.OZONE_URI_DELIMITER; -import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes - .BUCKET_ALREADY_EXISTS; +import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.BUCKET_ALREADY_EXISTS; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.BUCKET_NOT_FOUND; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.DIRECTORY_NOT_FOUND; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.FILE_NOT_FOUND; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.KEY_NOT_FOUND; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.NOT_A_FILE; -import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes - .VOLUME_ALREADY_EXISTS; +import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.VOLUME_ALREADY_EXISTS; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.VOLUME_NOT_FOUND; import static org.apache.hadoop.ozone.snapshot.SnapshotDiffResponse.JobStatus.DONE; @@ -221,7 +219,15 @@ public BasicRootedOzoneClientAdapterImpl(String omHost, int omPort, OzoneConfigKeys.HDDS_CONTAINER_IPC_PORT_DEFAULT); // Fetches the bucket layout to be used by OFS. - initDefaultFsBucketLayout(conf); + try { + initDefaultFsBucketLayout(conf); + } catch (IOException | RuntimeException exception) { + // in case of exception, the adapter object will not be + // initialised making the client object unreachable, close the client + // to release resources in this case and rethrow. + ozoneClient.close(); + throw exception; + } config = conf; } finally { @@ -780,7 +786,7 @@ public Iterator listKeys(String pathStr) throws IOException { */ private List listStatusRoot( boolean recursive, String startPath, long numEntries, - URI uri, Path workingDir, String username) throws IOException { + URI uri, Path workingDir, String username, boolean lite) throws IOException { OFSPath ofsStartPath = new OFSPath(startPath, config); // list volumes @@ -793,7 +799,7 @@ private List listStatusRoot( if (recursive) { String pathStrNextVolume = volume.getName(); res.addAll(listStatus(pathStrNextVolume, recursive, startPath, - numEntries - res.size(), uri, workingDir, username)); + numEntries - res.size(), uri, workingDir, username, lite)); } } return res; @@ -802,9 +808,10 @@ private List listStatusRoot( /** * Helper for OFS listStatus on a volume. */ + @SuppressWarnings("checkstyle:ParameterNumber") private List listStatusVolume(String volumeStr, boolean recursive, String startPath, long numEntries, - URI uri, Path workingDir, String username) throws IOException { + URI uri, Path workingDir, String username, boolean lite) throws IOException { OFSPath ofsStartPath = new OFSPath(startPath, config); // list buckets in the volume @@ -818,7 +825,7 @@ private List listStatusVolume(String volumeStr, if (recursive) { String pathStrNext = volumeStr + OZONE_URI_DELIMITER + bucket.getName(); res.addAll(listStatus(pathStrNext, recursive, startPath, - numEntries - res.size(), uri, workingDir, username)); + numEntries - res.size(), uri, workingDir, username, lite)); } } return res; @@ -828,7 +835,7 @@ private List listStatusVolume(String volumeStr, * Helper for OFS listStatus on a bucket to get all snapshots. */ private List listStatusBucketSnapshot( - String volumeName, String bucketName, URI uri) throws IOException { + String volumeName, String bucketName, URI uri, String prevSnapshot, long numberOfEntries) throws IOException { OzoneBucket ozoneBucket = getBucket(volumeName, bucketName, false); UserGroupInformation ugi = @@ -837,9 +844,9 @@ private List listStatusBucketSnapshot( String group = getGroupName(ugi); List res = new ArrayList<>(); - Iterator snapshotIter = objectStore.listSnapshot(volumeName, bucketName, null, null); + Iterator snapshotIter = objectStore.listSnapshot(volumeName, bucketName, null, prevSnapshot); - while (snapshotIter.hasNext()) { + while (snapshotIter.hasNext() && res.size() < numberOfEntries) { OzoneSnapshot ozoneSnapshot = snapshotIter.next(); if (SnapshotInfo.SnapshotStatus.SNAPSHOT_ACTIVE.name().equals(ozoneSnapshot.getSnapshotStatus())) { res.add(getFileStatusAdapterForBucketSnapshot( @@ -870,13 +877,15 @@ private List listStatusBucketSnapshot( * Used in making the return path qualified. * @param username User name. * Used in making the return path qualified. + * @param lite true if lightweight response needs to be returned otherwise false. * @return A list of FileStatusAdapter. * @throws IOException Bucket exception or FileNotFoundException. */ + @SuppressWarnings("checkstyle:ParameterNumber") @Override public List listStatus(String pathStr, boolean recursive, String startPath, long numEntries, URI uri, - Path workingDir, String username) throws IOException { + Path workingDir, String username, boolean lite) throws IOException { incrementCounter(Statistic.OBJECTS_LIST, 1); // Remove authority from startPath if it exists @@ -895,44 +904,53 @@ public List listStatus(String pathStr, boolean recursive, OFSPath ofsPath = new OFSPath(pathStr, config); if (ofsPath.isRoot()) { return listStatusRoot( - recursive, startPath, numEntries, uri, workingDir, username); + recursive, startPath, numEntries, uri, workingDir, username, lite); } OFSPath ofsStartPath = new OFSPath(startPath, config); if (ofsPath.isVolume()) { String startBucketPath = ofsStartPath.getNonKeyPath(); return listStatusVolume(ofsPath.getVolumeName(), - recursive, startBucketPath, numEntries, uri, workingDir, username); + recursive, startBucketPath, numEntries, uri, workingDir, username, lite); } if (ofsPath.isSnapshotPath()) { return listStatusBucketSnapshot(ofsPath.getVolumeName(), - ofsPath.getBucketName(), uri); + ofsPath.getBucketName(), uri, ofsStartPath.getSnapshotName(), numEntries); } - + List result = new ArrayList<>(); String keyName = ofsPath.getKeyName(); // Internally we need startKey to be passed into bucket.listStatus String startKey = ofsStartPath.getKeyName(); try { OzoneBucket bucket = getBucket(ofsPath, false); - List statuses; + List statuses = Collections.emptyList(); + List lightStatuses = Collections.emptyList(); if (bucket.isSourcePathExist()) { - statuses = bucket - .listStatus(keyName, recursive, startKey, numEntries); + if (lite) { + lightStatuses = bucket.listStatusLight(keyName, recursive, startKey, numEntries); + } else { + statuses = bucket.listStatus(keyName, recursive, startKey, numEntries); + } + } else { LOG.warn("Source Bucket does not exist, link bucket {} is orphan " + "and returning empty list of files inside it", bucket.getName()); - statuses = Collections.emptyList(); } // Note: result in statuses above doesn't have volume/bucket path since // they are from the server. String ofsPathPrefix = ofsPath.getNonKeyPath(); - List result = new ArrayList<>(); - for (OzoneFileStatus status : statuses) { - result.add(toFileStatusAdapter(status, username, uri, workingDir, - ofsPathPrefix)); + if (lite) { + for (OzoneFileStatusLight status : lightStatuses) { + result.add(toFileStatusAdapter(status, username, uri, workingDir, ofsPathPrefix)); + } + } else { + for (OzoneFileStatus status : statuses) { + result.add(toFileStatusAdapter(status, username, uri, workingDir, ofsPathPrefix)); + } } + return result; } catch (OMException e) { if (e.getResult() == OMException.ResultCodes.FILE_NOT_FOUND) { @@ -1035,6 +1053,31 @@ private FileStatusAdapter toFileStatusAdapter(OzoneFileStatus status, ); } + private FileStatusAdapter toFileStatusAdapter(OzoneFileStatusLight status, + String owner, URI defaultUri, Path workingDir, String ofsPathPrefix) { + BasicOmKeyInfo keyInfo = status.getKeyInfo(); + short replication = (short) keyInfo.getReplicationConfig() + .getRequiredNodes(); + return new FileStatusAdapter( + keyInfo.getDataSize(), + keyInfo.getReplicatedSize(), + new Path(ofsPathPrefix + OZONE_URI_DELIMITER + keyInfo.getKeyName()) + .makeQualified(defaultUri, workingDir), + status.isDirectory(), + replication, + status.getBlockSize(), + keyInfo.getModificationTime(), + keyInfo.getModificationTime(), + status.isDirectory() ? (short) 00777 : (short) 00666, + StringUtils.defaultIfEmpty(keyInfo.getOwnerName(), owner), + owner, + null, + getBlockLocations(null), + false, + OzoneClientUtils.isKeyErasureCode(keyInfo) + ); + } + /** * Helper method to get List of BlockLocation from OM Key info. * @param fileStatus Ozone key file status. @@ -1071,16 +1114,15 @@ private BlockLocation[] getBlockLocations(OzoneFileStatus fileStatus) { omKeyLocationInfo.getPipeline().getNodes() .forEach(dn -> { hostList.add(dn.getHostName()); - int port = dn.getPort( - DatanodeDetails.Port.Name.STANDALONE).getValue(); + int port = dn.getStandalonePort().getValue(); if (port == 0) { port = configuredDnPort; } nameList.add(dn.getHostName() + ":" + port); }); - String[] hosts = hostList.toArray(new String[hostList.size()]); - String[] names = nameList.toArray(new String[nameList.size()]); + String[] hosts = hostList.toArray(new String[0]); + String[] names = nameList.toArray(new String[0]); BlockLocation blockLocation = new BlockLocation( names, hosts, offsetOfBlockInFile, omKeyLocationInfo.getLength()); diff --git a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicRootedOzoneFileSystem.java b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicRootedOzoneFileSystem.java index 3e0a3730627..66b0037cf33 100644 --- a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicRootedOzoneFileSystem.java +++ b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicRootedOzoneFileSystem.java @@ -17,6 +17,7 @@ */ package org.apache.hadoop.fs.ozone; +import com.google.common.base.Function; import com.google.common.base.Preconditions; import io.opentracing.Span; import io.opentracing.util.GlobalTracer; @@ -915,7 +916,7 @@ private boolean o3Exists(final Path f) throws IOException { @Override public FileStatus[] listStatus(Path f) throws IOException { return TracingUtil.executeInNewSpan("ofs listStatus", - () -> convertFileStatusArr(listStatusAdapter(f))); + () -> convertFileStatusArr(listStatusAdapter(f, true))); } private FileStatus[] convertFileStatusArr( @@ -929,7 +930,7 @@ private FileStatus[] convertFileStatusArr( } - public List listStatusAdapter(Path f) throws IOException { + private List listStatusAdapter(Path f, boolean lite) throws IOException { incrementCounter(Statistic.INVOCATION_LIST_STATUS, 1); statistics.incrementReadOps(1); LOG.trace("listStatus() path:{}", f); @@ -937,25 +938,27 @@ public List listStatusAdapter(Path f) throws IOException { LinkedList statuses = new LinkedList<>(); List tmpStatusList; String startPath = ""; - + int entriesAdded; do { tmpStatusList = adapter.listStatus(pathToKey(f), false, startPath, - numEntries, uri, workingDir, getUsername()); - + numEntries, uri, workingDir, getUsername(), lite); + entriesAdded = 0; if (!tmpStatusList.isEmpty()) { if (startPath.isEmpty() || !statuses.getLast().getPath().toString() .equals(tmpStatusList.get(0).getPath().toString())) { statuses.addAll(tmpStatusList); + entriesAdded += tmpStatusList.size(); } else { statuses.addAll(tmpStatusList.subList(1, tmpStatusList.size())); + entriesAdded += tmpStatusList.size() - 1; } startPath = pathToKey(statuses.getLast().getPath()); } // listStatus returns entries numEntries in size if available. // Any lesser number of entries indicate that the required entries have // exhausted. - } while (tmpStatusList.size() == numEntries); + } while (entriesAdded > 0); return statuses; } @@ -1178,7 +1181,9 @@ public RemoteIterator listFiles(Path f, boolean recursive) public RemoteIterator listLocatedStatus(Path f) throws IOException { incrementCounter(Statistic.INVOCATION_LIST_LOCATED_STATUS); - return super.listLocatedStatus(f); + return new OzoneFileStatusIterator<>(f, + (stat) -> stat instanceof LocatedFileStatus ? (LocatedFileStatus) stat : new LocatedFileStatus(stat, null), + false); } @Override @@ -1193,7 +1198,7 @@ public RemoteIterator listStatusIterator(Path f) "Instead use 'ozone sh key list " + "' command"); } - return new OzoneFileStatusIterator<>(f); + return new OzoneFileStatusIterator<>(f, stat -> stat, true); } /** @@ -1203,23 +1208,29 @@ public RemoteIterator listStatusIterator(Path f) */ private final class OzoneFileStatusIterator implements RemoteIterator { + private final Function transformFunc; private List thisListing; private int i; private Path p; private T curStat = null; private String startPath = ""; + private boolean lite; /** * Constructor to initialize OzoneFileStatusIterator. * Get the first batch of entry for iteration. * * @param p path to file/directory. + * @param transformFunc function to convert FileStatus into an expected type. + * @param lite if true it should look into fetching a lightweight keys from server. * @throws IOException */ - private OzoneFileStatusIterator(Path p) throws IOException { + private OzoneFileStatusIterator(Path p, Function transformFunc, boolean lite) throws IOException { this.p = p; + this.lite = lite; + this.transformFunc = transformFunc; // fetch the first batch of entries in the directory - thisListing = listFileStatus(p, startPath); + thisListing = listFileStatus(p, startPath, lite); if (thisListing != null && !thisListing.isEmpty()) { startPath = pathToKey( thisListing.get(thisListing.size() - 1).getPath()); @@ -1238,7 +1249,7 @@ public boolean hasNext() throws IOException { while (curStat == null && hasNextNoFilter()) { T next; FileStatus fileStat = thisListing.get(i++); - next = (T) (fileStat); + next = transformFunc.apply(fileStat); curStat = next; } return curStat != null; @@ -1256,10 +1267,9 @@ private boolean hasNextNoFilter() throws IOException { return false; } if (i >= thisListing.size()) { - if (startPath != null && (thisListing.size() == listingPageSize || - thisListing.size() == listingPageSize - 1)) { + if (startPath != null && (!thisListing.isEmpty())) { // current listing is exhausted & fetch a new listing - thisListing = listFileStatus(p, startPath); + thisListing = listFileStatus(p, startPath, lite); if (thisListing != null && !thisListing.isEmpty()) { startPath = pathToKey( thisListing.get(thisListing.size() - 1).getPath()); @@ -1294,10 +1304,11 @@ public T next() throws IOException { * * @param f * @param startPath + * @param lite if true return lightweight keys * @return list of file status. * @throws IOException */ - private List listFileStatus(Path f, String startPath) + private List listFileStatus(Path f, String startPath, boolean lite) throws IOException { incrementCounter(Statistic.INVOCATION_LIST_STATUS, 1); statistics.incrementReadOps(1); @@ -1305,7 +1316,7 @@ private List listFileStatus(Path f, String startPath) List statusList; statusList = adapter.listStatus(pathToKey(f), false, startPath, - listingPageSize, uri, workingDir, getUsername()) + listingPageSize, uri, workingDir, getUsername(), lite) .stream() .map(this::convertFileStatus) .collect(Collectors.toList()); @@ -1442,7 +1453,7 @@ boolean iterate() throws IOException { ofsPath.getNonKeyPathNoPrefixDelim() + OZONE_URI_DELIMITER; if (isFSO) { List fileStatuses; - fileStatuses = listStatusAdapter(path); + fileStatuses = listStatusAdapter(path, true); for (FileStatusAdapter fileStatus : fileStatuses) { String keyName = new OFSPath(fileStatus.getPath().toString(), @@ -1567,7 +1578,7 @@ private ContentSummary getContentSummaryInSpan(Path f) throws IOException { // f is a directory long[] summary = {0, 0, 0, 1}; int i = 0; - for (FileStatusAdapter s : listStatusAdapter(f)) { + for (FileStatusAdapter s : listStatusAdapter(f, true)) { long length = s.getLength(); long spaceConsumed = s.getDiskConsumed(); ContentSummary c = s.isDir() ? getContentSummary(s.getPath()) : diff --git a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/OzoneClientAdapter.java b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/OzoneClientAdapter.java index e468ac498c4..24ff692e1b4 100644 --- a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/OzoneClientAdapter.java +++ b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/OzoneClientAdapter.java @@ -69,9 +69,10 @@ OzoneFSDataStreamOutput createStreamFile(String key, short replication, Iterator listKeys(String pathKey) throws IOException; + @SuppressWarnings("checkstyle:ParameterNumber") List listStatus(String keyName, boolean recursive, String startKey, long numEntries, URI uri, - Path workingDir, String username) throws IOException; + Path workingDir, String username, boolean lite) throws IOException; Token getDelegationToken(String renewer) throws IOException; diff --git a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/OzoneClientUtils.java b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/OzoneClientUtils.java index 383ad6db495..6c9fb3ccc7b 100644 --- a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/OzoneClientUtils.java +++ b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/OzoneClientUtils.java @@ -32,6 +32,7 @@ import org.apache.hadoop.ozone.client.checksum.ChecksumHelperFactory; import org.apache.hadoop.ozone.client.protocol.ClientProtocol; import org.apache.hadoop.ozone.om.exceptions.OMException; +import org.apache.hadoop.ozone.om.helpers.BasicOmKeyInfo; import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.ozone.om.helpers.OmKeyArgs; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; @@ -246,6 +247,11 @@ public static boolean isKeyErasureCode(OmKeyInfo keyInfo) { HddsProtos.ReplicationType.EC; } + public static boolean isKeyErasureCode(BasicOmKeyInfo keyInfo) { + return keyInfo.getReplicationConfig().getReplicationType() == + HddsProtos.ReplicationType.EC; + } + public static boolean isKeyEncrypted(OmKeyInfo keyInfo) { return !Objects.isNull(keyInfo.getFileEncryptionInfo()); } diff --git a/hadoop-ozone/ozonefs-common/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFsShell.java b/hadoop-ozone/ozonefs-common/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFsShell.java index a15da5228f3..96506933952 100644 --- a/hadoop-ozone/ozonefs-common/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFsShell.java +++ b/hadoop-ozone/ozonefs-common/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFsShell.java @@ -22,7 +22,6 @@ import org.apache.hadoop.util.ToolRunner; import java.io.ByteArrayOutputStream; -import java.io.IOException; import java.io.PrintStream; import java.util.Arrays; @@ -32,7 +31,6 @@ import org.junit.jupiter.api.Test; - /** * Tests the behavior of OzoneFsShell. */ @@ -40,7 +38,7 @@ public class TestOzoneFsShell { // tests command handler for FsShell bound to OzoneDelete class @Test - public void testOzoneFsShellRegisterDeleteCmd() throws IOException { + public void testOzoneFsShellRegisterDeleteCmd() throws Exception { final String rmCmdName = "rm"; final String rmCmd = "-" + rmCmdName; final String arg = "arg1"; @@ -52,16 +50,17 @@ public void testOzoneFsShellRegisterDeleteCmd() throws IOException { System.setErr(bytesPrintStream); try { ToolRunner.run(shell, argv); - } catch (Exception e) { - } finally { + // test command bindings for "rm" command handled by OzoneDelete class CommandFactory factory = shell.getCommandFactory(); + assertNotNull(factory); assertEquals(1, Arrays.stream(factory.getNames()) .filter(c -> c.equals(rmCmd)).count()); Command instance = factory.getInstance(rmCmd); assertNotNull(instance); assertEquals(OzoneFsDelete.Rm.class, instance.getClass()); assertEquals(rmCmdName, instance.getCommandName()); + } finally { shell.close(); System.setErr(oldErr); } diff --git a/hadoop-ozone/ozonefs-hadoop2/pom.xml b/hadoop-ozone/ozonefs-hadoop2/pom.xml index fad83ea86c1..8585a9dd544 100644 --- a/hadoop-ozone/ozonefs-hadoop2/pom.xml +++ b/hadoop-ozone/ozonefs-hadoop2/pom.xml @@ -19,12 +19,12 @@ org.apache.ozone ozone - 1.5.0-SNAPSHOT + 2.0.0-SNAPSHOT ozone-filesystem-hadoop2 Apache Ozone FS Hadoop 2.x compatibility jar - 1.5.0-SNAPSHOT + 2.0.0-SNAPSHOT org.apache.hadoop.ozone.shaded diff --git a/hadoop-ozone/ozonefs-hadoop2/src/main/java/org/apache/hadoop/fs/ozone/OzFs.java b/hadoop-ozone/ozonefs-hadoop2/src/main/java/org/apache/hadoop/fs/ozone/OzFs.java index b5012f95c4e..e1cb391da53 100644 --- a/hadoop-ozone/ozonefs-hadoop2/src/main/java/org/apache/hadoop/fs/ozone/OzFs.java +++ b/hadoop-ozone/ozonefs-hadoop2/src/main/java/org/apache/hadoop/fs/ozone/OzFs.java @@ -45,4 +45,13 @@ public OzFs(URI theUri, Configuration conf) public int getUriDefaultPort() { return -1; } + + /** + * Close the file system; the FileContext API doesn't have an explicit close. + */ + @Override + protected void finalize() throws Throwable { + fsImpl.close(); + super.finalize(); + } } diff --git a/hadoop-ozone/ozonefs-hadoop2/src/main/java/org/apache/hadoop/fs/ozone/RootedOzFs.java b/hadoop-ozone/ozonefs-hadoop2/src/main/java/org/apache/hadoop/fs/ozone/RootedOzFs.java index 4cd04da9c86..0f421a85523 100644 --- a/hadoop-ozone/ozonefs-hadoop2/src/main/java/org/apache/hadoop/fs/ozone/RootedOzFs.java +++ b/hadoop-ozone/ozonefs-hadoop2/src/main/java/org/apache/hadoop/fs/ozone/RootedOzFs.java @@ -46,4 +46,13 @@ public RootedOzFs(URI theUri, Configuration conf) public int getUriDefaultPort() { return -1; } + + /** + * Close the file system; the FileContext API doesn't have an explicit close. + */ + @Override + protected void finalize() throws Throwable { + fsImpl.close(); + super.finalize(); + } } diff --git a/hadoop-ozone/ozonefs-hadoop3-client/pom.xml b/hadoop-ozone/ozonefs-hadoop3-client/pom.xml index f27bd411db7..2f23a5d318e 100644 --- a/hadoop-ozone/ozonefs-hadoop3-client/pom.xml +++ b/hadoop-ozone/ozonefs-hadoop3-client/pom.xml @@ -19,7 +19,7 @@ org.apache.ozone ozone - 1.5.0-SNAPSHOT + 2.0.0-SNAPSHOT org.apache.hadoop.ozone.shaded diff --git a/hadoop-ozone/ozonefs-hadoop3/src/main/java/org/apache/hadoop/fs/ozone/OzFs.java b/hadoop-ozone/ozonefs-hadoop3/src/main/java/org/apache/hadoop/fs/ozone/OzFs.java index 914832e2cfa..b1e046547fa 100644 --- a/hadoop-ozone/ozonefs-hadoop3/src/main/java/org/apache/hadoop/fs/ozone/OzFs.java +++ b/hadoop-ozone/ozonefs-hadoop3/src/main/java/org/apache/hadoop/fs/ozone/OzFs.java @@ -40,4 +40,13 @@ public OzFs(URI theUri, Configuration conf) super(theUri, new OzoneFileSystem(), conf, OzoneConsts.OZONE_URI_SCHEME, false); } + + /** + * Close the file system; the FileContext API doesn't have an explicit close. + */ + @Override + protected void finalize() throws Throwable { + fsImpl.close(); + super.finalize(); + } } diff --git a/hadoop-ozone/ozonefs-hadoop3/src/main/java/org/apache/hadoop/fs/ozone/RootedOzFs.java b/hadoop-ozone/ozonefs-hadoop3/src/main/java/org/apache/hadoop/fs/ozone/RootedOzFs.java index 076287eaac1..81bbaacd7c8 100644 --- a/hadoop-ozone/ozonefs-hadoop3/src/main/java/org/apache/hadoop/fs/ozone/RootedOzFs.java +++ b/hadoop-ozone/ozonefs-hadoop3/src/main/java/org/apache/hadoop/fs/ozone/RootedOzFs.java @@ -41,4 +41,13 @@ public RootedOzFs(URI theUri, Configuration conf) super(theUri, new RootedOzoneFileSystem(), conf, OzoneConsts.OZONE_OFS_URI_SCHEME, false); } + + /** + * Close the file system; the FileContext API doesn't have an explicit close. + */ + @Override + protected void finalize() throws Throwable { + fsImpl.close(); + super.finalize(); + } } diff --git a/hadoop-ozone/ozonefs-shaded/pom.xml b/hadoop-ozone/ozonefs-shaded/pom.xml index df6c724883c..9e77ffd7c33 100644 --- a/hadoop-ozone/ozonefs-shaded/pom.xml +++ b/hadoop-ozone/ozonefs-shaded/pom.xml @@ -19,12 +19,12 @@ org.apache.ozone ozone - 1.5.0-SNAPSHOT + 2.0.0-SNAPSHOT ozone-filesystem-shaded Apache Ozone FileSystem Shaded jar - 1.5.0-SNAPSHOT + 2.0.0-SNAPSHOT true @@ -78,6 +78,12 @@ + + com.google.protobuf + protobuf-java + 2.5.0 + compile + diff --git a/hadoop-ozone/ozonefs/pom.xml b/hadoop-ozone/ozonefs/pom.xml index 176f21b9860..aa554c422e5 100644 --- a/hadoop-ozone/ozonefs/pom.xml +++ b/hadoop-ozone/ozonefs/pom.xml @@ -19,12 +19,12 @@ org.apache.ozone ozone - 1.5.0-SNAPSHOT + 2.0.0-SNAPSHOT ozone-filesystem Apache Ozone FileSystem jar - 1.5.0-SNAPSHOT + 2.0.0-SNAPSHOT UTF-8 true diff --git a/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/OzFs.java b/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/OzFs.java index 346b994a3ae..548e11f5d48 100644 --- a/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/OzFs.java +++ b/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/OzFs.java @@ -41,4 +41,13 @@ public OzFs(URI theUri, Configuration conf) super(theUri, new OzoneFileSystem(), conf, OzoneConsts.OZONE_URI_SCHEME, false); } + + /** + * Close the file system; the FileContext API doesn't have an explicit close. + */ + @Override + protected void finalize() throws Throwable { + fsImpl.close(); + super.finalize(); + } } diff --git a/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/RootedOzFs.java b/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/RootedOzFs.java index 076287eaac1..81bbaacd7c8 100644 --- a/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/RootedOzFs.java +++ b/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/RootedOzFs.java @@ -41,4 +41,13 @@ public RootedOzFs(URI theUri, Configuration conf) super(theUri, new RootedOzoneFileSystem(), conf, OzoneConsts.OZONE_OFS_URI_SCHEME, false); } + + /** + * Close the file system; the FileContext API doesn't have an explicit close. + */ + @Override + protected void finalize() throws Throwable { + fsImpl.close(); + super.finalize(); + } } diff --git a/hadoop-ozone/pom.xml b/hadoop-ozone/pom.xml index d91d488c434..22cd10085dd 100644 --- a/hadoop-ozone/pom.xml +++ b/hadoop-ozone/pom.xml @@ -16,10 +16,10 @@ org.apache.ozone ozone-main - 1.5.0-SNAPSHOT + 2.0.0-SNAPSHOT ozone - 1.5.0-SNAPSHOT + 2.0.0-SNAPSHOT Apache Ozone Project Apache Ozone pom @@ -38,6 +38,8 @@ ozonefs-common ozonefs datanode + recon + recon-codegen s3gateway dist csi @@ -47,13 +49,6 @@ s3-secret-store - - - apache.snapshots.https - https://repository.apache.org/content/repositories/snapshots - - - @@ -396,18 +391,6 @@ ozonefs-hadoop2 - - build-with-recon - - - !skipRecon - - - - recon - recon-codegen - - parallel-tests @@ -455,36 +438,5 @@ - - add-classpath-descriptor - - - src/main/java - - - - - - org.apache.maven.plugins - maven-dependency-plugin - - - add-classpath-descriptor - prepare-package - - build-classpath - - - ${project.build.outputDirectory}/${project.artifactId}.classpath - $HDDS_LIB_JARS_DIR - true - runtime - - - - - - - diff --git a/hadoop-ozone/recon-codegen/pom.xml b/hadoop-ozone/recon-codegen/pom.xml index bb7756a9de3..b8345c7d343 100644 --- a/hadoop-ozone/recon-codegen/pom.xml +++ b/hadoop-ozone/recon-codegen/pom.xml @@ -18,7 +18,7 @@ ozone org.apache.ozone - 1.5.0-SNAPSHOT + 2.0.0-SNAPSHOT 4.0.0 ozone-reconcodegen diff --git a/hadoop-ozone/recon-codegen/src/main/java/org/hadoop/ozone/recon/codegen/ReconSchemaGenerationModule.java b/hadoop-ozone/recon-codegen/src/main/java/org/hadoop/ozone/recon/codegen/ReconSchemaGenerationModule.java index 8272c2bd6da..d59ab8acd6b 100644 --- a/hadoop-ozone/recon-codegen/src/main/java/org/hadoop/ozone/recon/codegen/ReconSchemaGenerationModule.java +++ b/hadoop-ozone/recon-codegen/src/main/java/org/hadoop/ozone/recon/codegen/ReconSchemaGenerationModule.java @@ -22,6 +22,7 @@ import org.hadoop.ozone.recon.schema.ReconSchemaDefinition; import org.hadoop.ozone.recon.schema.StatsSchemaDefinition; import org.hadoop.ozone.recon.schema.UtilizationSchemaDefinition; +import org.hadoop.ozone.recon.schema.SchemaVersionTableDefinition; import com.google.inject.AbstractModule; import com.google.inject.multibindings.Multibinder; @@ -40,5 +41,6 @@ protected void configure() { schemaBinder.addBinding().to(ContainerSchemaDefinition.class); schemaBinder.addBinding().to(ReconTaskSchemaDefinition.class); schemaBinder.addBinding().to(StatsSchemaDefinition.class); + schemaBinder.addBinding().to(SchemaVersionTableDefinition.class); } } diff --git a/hadoop-ozone/recon-codegen/src/main/java/org/hadoop/ozone/recon/schema/ContainerSchemaDefinition.java b/hadoop-ozone/recon-codegen/src/main/java/org/hadoop/ozone/recon/schema/ContainerSchemaDefinition.java index 0882de3bf4f..abf1ef7ac9d 100644 --- a/hadoop-ozone/recon-codegen/src/main/java/org/hadoop/ozone/recon/schema/ContainerSchemaDefinition.java +++ b/hadoop-ozone/recon-codegen/src/main/java/org/hadoop/ozone/recon/schema/ContainerSchemaDefinition.java @@ -126,4 +126,8 @@ private void createUnhealthyContainersTable() { public DSLContext getDSLContext() { return dslContext; } + + public DataSource getDataSource() { + return dataSource; + } } diff --git a/hadoop-ozone/recon-codegen/src/main/java/org/hadoop/ozone/recon/schema/SchemaVersionTableDefinition.java b/hadoop-ozone/recon-codegen/src/main/java/org/hadoop/ozone/recon/schema/SchemaVersionTableDefinition.java new file mode 100644 index 00000000000..f7e538f31ad --- /dev/null +++ b/hadoop-ozone/recon-codegen/src/main/java/org/hadoop/ozone/recon/schema/SchemaVersionTableDefinition.java @@ -0,0 +1,69 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.hadoop.ozone.recon.schema; + +import com.google.inject.Inject; +import com.google.inject.Singleton; +import org.jooq.DSLContext; +import org.jooq.impl.DSL; +import org.jooq.impl.SQLDataType; + +import javax.sql.DataSource; +import java.sql.Connection; +import java.sql.SQLException; + +import static org.hadoop.ozone.recon.codegen.SqlDbUtils.TABLE_EXISTS_CHECK; + +/** + * Class for managing the schema of the SchemaVersion table. + */ +@Singleton +public class SchemaVersionTableDefinition implements ReconSchemaDefinition { + + public static final String SCHEMA_VERSION_TABLE_NAME = "RECON_SCHEMA_VERSION"; + private final DataSource dataSource; + private DSLContext dslContext; + + @Inject + public SchemaVersionTableDefinition(DataSource dataSource) { + this.dataSource = dataSource; + } + + @Override + public void initializeSchema() throws SQLException { + Connection conn = dataSource.getConnection(); + dslContext = DSL.using(conn); + + if (!TABLE_EXISTS_CHECK.test(conn, SCHEMA_VERSION_TABLE_NAME)) { + createSchemaVersionTable(); + } + } + + /** + * Create the Schema Version table. + */ + private void createSchemaVersionTable() throws SQLException { + dslContext.createTableIfNotExists(SCHEMA_VERSION_TABLE_NAME) + .column("version_number", SQLDataType.INTEGER.nullable(false)) + .column("applied_on", SQLDataType.TIMESTAMP.defaultValue(DSL.currentTimestamp())) + .execute(); + } + +} diff --git a/hadoop-ozone/recon/pom.xml b/hadoop-ozone/recon/pom.xml index a24252c1ed6..f203689b669 100644 --- a/hadoop-ozone/recon/pom.xml +++ b/hadoop-ozone/recon/pom.xml @@ -18,12 +18,13 @@ org.apache.ozone ozone - 1.5.0-SNAPSHOT + 2.0.0-SNAPSHOT Apache Ozone Recon 4.0.0 ozone-recon + false 8.15.7 @@ -209,6 +210,9 @@ ${basedir}/src/main/resources/webapps/recon/ozone-recon-web/build + + static/** + true @@ -229,6 +233,7 @@ + woff woff2 diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ConfigurationProvider.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ConfigurationProvider.java index 6312365bf4b..9f0a9796e28 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ConfigurationProvider.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ConfigurationProvider.java @@ -53,6 +53,8 @@ private static void addDeprecations() { @VisibleForTesting public static void setConfiguration(OzoneConfiguration conf) { + // Nullity check is used in case the configuration was already set + // in the MiniOzoneCluster if (configuration == null) { ConfigurationProvider.configuration = conf; } diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconConstants.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconConstants.java index ed657931e03..5768166c950 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconConstants.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconConstants.java @@ -43,20 +43,20 @@ private ReconConstants() { public static final int DISK_USAGE_TOP_RECORDS_LIMIT = 30; public static final String DEFAULT_OPEN_KEY_INCLUDE_NON_FSO = "false"; public static final String DEFAULT_OPEN_KEY_INCLUDE_FSO = "false"; - public static final String DEFAULT_START_PREFIX = "/"; public static final String DEFAULT_FETCH_COUNT = "1000"; public static final String DEFAULT_KEY_SIZE = "0"; public static final String DEFAULT_BATCH_NUMBER = "1"; public static final String RECON_QUERY_BATCH_PARAM = "batchNum"; public static final String RECON_QUERY_PREVKEY = "prevKey"; + public static final String RECON_QUERY_START_PREFIX = "startPrefix"; public static final String RECON_OPEN_KEY_INCLUDE_NON_FSO = "includeNonFso"; public static final String RECON_OPEN_KEY_INCLUDE_FSO = "includeFso"; - public static final String RECON_OPEN_KEY_DEFAULT_SEARCH_LIMIT = "1000"; - public static final String RECON_OPEN_KEY_SEARCH_DEFAULT_PREV_KEY = ""; + public static final String RECON_OM_INSIGHTS_DEFAULT_START_PREFIX = "/"; + public static final String RECON_OM_INSIGHTS_DEFAULT_SEARCH_LIMIT = "1000"; + public static final String RECON_OM_INSIGHTS_DEFAULT_SEARCH_PREV_KEY = ""; public static final String RECON_QUERY_FILTER = "missingIn"; public static final String PREV_CONTAINER_ID_DEFAULT_VALUE = "0"; - public static final String PREV_DELETED_BLOCKS_TRANSACTION_ID_DEFAULT_VALUE = - "0"; + public static final String PREV_DELETED_BLOCKS_TRANSACTION_ID_DEFAULT_VALUE = "0"; // Only include containers that are missing in OM by default public static final String DEFAULT_FILTER_FOR_MISSING_CONTAINERS = "SCM"; public static final String RECON_QUERY_LIMIT = "limit"; diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconContext.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconContext.java index c9875cb826b..a98603a7e9c 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconContext.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconContext.java @@ -128,7 +128,10 @@ public enum ErrorCode { Arrays.asList("Overview (OM Data)", "OM DB Insights")), GET_SCM_DB_SNAPSHOT_FAILED( "SCM DB Snapshot sync failed !!!", - Arrays.asList("Containers", "Pipelines")); + Arrays.asList("Containers", "Pipelines")), + UPGRADE_FAILURE( + "Schema upgrade failed. Recon encountered an issue while finalizing the layout upgrade.", + Arrays.asList("Recon startup", "Metadata Layout Version")); private final String message; private final List impacts; diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconResponseUtils.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconResponseUtils.java index 41235ae5428..dc53f195f67 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconResponseUtils.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconResponseUtils.java @@ -44,7 +44,7 @@ public static Response noMatchedKeysResponse(String startPrefix) { String jsonResponse = String.format( "{\"message\": \"No keys matched the search prefix: '%s'.\"}", startPrefix); - return Response.status(Response.Status.NOT_FOUND) + return Response.status(Response.Status.NO_CONTENT) .entity(jsonResponse) .type(MediaType.APPLICATION_JSON) .build(); diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconSchemaVersionTableManager.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconSchemaVersionTableManager.java new file mode 100644 index 00000000000..e01d52b89cd --- /dev/null +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconSchemaVersionTableManager.java @@ -0,0 +1,105 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.hadoop.ozone.recon; + +import com.google.inject.Inject; +import org.jooq.DSLContext; +import org.jooq.impl.DSL; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import javax.sql.DataSource; +import java.sql.Connection; +import java.sql.SQLException; + +import static org.jooq.impl.DSL.name; + +/** + * Manager for handling the Recon Schema Version table. + * This class provides methods to get and update the current schema version. + */ +public class ReconSchemaVersionTableManager { + + private static final Logger LOG = LoggerFactory.getLogger(ReconSchemaVersionTableManager.class); + public static final String RECON_SCHEMA_VERSION_TABLE_NAME = "RECON_SCHEMA_VERSION"; + private DSLContext dslContext; + private final DataSource dataSource; + + @Inject + public ReconSchemaVersionTableManager(DataSource dataSource) throws SQLException { + this.dataSource = dataSource; + this.dslContext = DSL.using(dataSource.getConnection()); + } + + /** + * Get the current schema version from the RECON_SCHEMA_VERSION table. + * If the table is empty, or if it does not exist, it will return 0. + * @return The current schema version. + */ + public int getCurrentSchemaVersion() throws SQLException { + try { + return dslContext.select(DSL.field(name("version_number"))) + .from(DSL.table(RECON_SCHEMA_VERSION_TABLE_NAME)) + .fetchOptional() + .map(record -> record.get( + DSL.field(name("version_number"), Integer.class))) + .orElse(-1); // Return -1 if no version is found + } catch (Exception e) { + LOG.error("Failed to fetch the current schema version.", e); + throw new SQLException("Unable to read schema version from the table.", e); + } + } + + /** + * Update the schema version in the RECON_SCHEMA_VERSION table after all tables are upgraded. + * + * @param newVersion The new version to set. + */ + public void updateSchemaVersion(int newVersion, Connection conn) { + dslContext = DSL.using(conn); + boolean recordExists = dslContext.fetchExists(dslContext.selectOne() + .from(DSL.table(RECON_SCHEMA_VERSION_TABLE_NAME))); + + if (recordExists) { + // Update the existing schema version record + dslContext.update(DSL.table(RECON_SCHEMA_VERSION_TABLE_NAME)) + .set(DSL.field(name("version_number")), newVersion) + .set(DSL.field(name("applied_on")), DSL.currentTimestamp()) + .execute(); + LOG.info("Updated schema version to '{}'.", newVersion); + } else { + // Insert a new schema version record + dslContext.insertInto(DSL.table(RECON_SCHEMA_VERSION_TABLE_NAME)) + .columns(DSL.field(name("version_number")), + DSL.field(name("applied_on"))) + .values(newVersion, DSL.currentTimestamp()) + .execute(); + LOG.info("Inserted new schema version '{}'.", newVersion); + } + } + + /** + * Provides the data source used by this manager. + * @return The DataSource instance. + */ + public DataSource getDataSource() { + return dataSource; + } +} diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconServer.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconServer.java index 3295eb4524c..24b5c10952a 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconServer.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconServer.java @@ -42,6 +42,7 @@ import org.apache.hadoop.ozone.recon.spi.ReconNamespaceSummaryManager; import org.apache.hadoop.ozone.recon.spi.StorageContainerServiceProvider; import org.apache.hadoop.ozone.recon.spi.impl.ReconDBProvider; +import org.apache.hadoop.ozone.recon.upgrade.ReconLayoutVersionManager; import org.apache.hadoop.ozone.util.OzoneNetUtils; import org.apache.hadoop.ozone.util.OzoneVersionInfo; import org.apache.hadoop.ozone.util.ShutdownHookManager; @@ -105,6 +106,7 @@ public Void call() throws Exception { ReconServer.class, originalArgs, LOG, configuration); ConfigurationProvider.setConfiguration(configuration); + injector = Guice.createInjector(new ReconControllerModule(), new ReconRestServletModule(configuration), new ReconSchemaGenerationModule()); @@ -136,8 +138,11 @@ public Void call() throws Exception { this.reconNamespaceSummaryManager = injector.getInstance(ReconNamespaceSummaryManager.class); + ReconContext reconContext = injector.getInstance(ReconContext.class); + ReconSchemaManager reconSchemaManager = injector.getInstance(ReconSchemaManager.class); + LOG.info("Creating Recon Schema."); reconSchemaManager.createReconSchema(); LOG.debug("Recon schema creation done."); @@ -153,6 +158,17 @@ public Void call() throws Exception { this.reconTaskStatusMetrics = injector.getInstance(ReconTaskStatusMetrics.class); + // Handle Recon Schema Versioning + ReconSchemaVersionTableManager versionTableManager = + injector.getInstance(ReconSchemaVersionTableManager.class); + + ReconLayoutVersionManager layoutVersionManager = + new ReconLayoutVersionManager(versionTableManager, reconContext); + // Run the upgrade framework to finalize layout features if needed + ReconStorageContainerManagerFacade reconStorageContainerManagerFacade = + (ReconStorageContainerManagerFacade) this.getReconStorageContainerManager(); + layoutVersionManager.finalizeLayoutFeatures(reconStorageContainerManagerFacade); + LOG.info("Initializing support of Recon Features..."); FeatureProvider.initFeatureSupport(configuration); diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconUtils.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconUtils.java index 1a2a705fc0f..12139e17723 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconUtils.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconUtils.java @@ -32,11 +32,14 @@ import java.text.ParseException; import java.text.SimpleDateFormat; import java.time.Instant; -import java.util.List; -import java.util.TimeZone; +import java.util.ArrayList; +import java.util.Collections; import java.util.Date; +import java.util.LinkedHashMap; +import java.util.List; +import java.util.Map; import java.util.Set; -import java.util.ArrayList; +import java.util.TimeZone; import java.util.concurrent.BlockingQueue; import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; @@ -52,8 +55,11 @@ import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.scm.ScmUtils; import org.apache.hadoop.hdds.scm.ha.SCMNodeDetails; +import org.apache.hadoop.hdds.scm.server.OzoneStorageContainerManager; import org.apache.hadoop.hdds.scm.server.SCMDatanodeHeartbeatDispatcher; import org.apache.hadoop.hdds.utils.HddsServerUtil; +import org.apache.hadoop.hdds.utils.db.Table; +import org.apache.hadoop.hdds.utils.db.TableIterator; import org.apache.hadoop.hdfs.web.URLConnectionFactory; import org.apache.hadoop.io.IOUtils; @@ -73,8 +79,13 @@ import org.apache.hadoop.ozone.OmUtils; import org.apache.hadoop.ozone.OzoneConsts; +import org.apache.hadoop.ozone.recon.api.handlers.EntityHandler; import org.apache.hadoop.ozone.om.helpers.BucketLayout; +import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; +import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; +import org.apache.hadoop.ozone.recon.api.handlers.BucketHandler; +import org.apache.hadoop.ozone.recon.api.ServiceNotReadyException; import org.apache.hadoop.ozone.recon.api.types.NSSummary; import org.apache.hadoop.ozone.recon.api.types.DUResponse; import org.apache.hadoop.ozone.recon.recovery.ReconOMMetadataManager; @@ -286,11 +297,64 @@ public void untarCheckpointFile(File tarFile, Path destPath) */ public static String constructFullPath(OmKeyInfo omKeyInfo, ReconNamespaceSummaryManager reconNamespaceSummaryManager, - ReconOMMetadataManager omMetadataManager) - throws IOException { + ReconOMMetadataManager omMetadataManager) throws IOException { + return constructFullPath(omKeyInfo.getKeyName(), omKeyInfo.getParentObjectID(), omKeyInfo.getVolumeName(), + omKeyInfo.getBucketName(), reconNamespaceSummaryManager, omMetadataManager); + } - StringBuilder fullPath = new StringBuilder(omKeyInfo.getKeyName()); - long parentId = omKeyInfo.getParentObjectID(); + /** + * Constructs the full path of a key from its key name and parent ID using a bottom-up approach, starting from the + * leaf node. + * + * The method begins with the leaf node (the key itself) and recursively prepends parent directory names, fetched + * via NSSummary objects, until reaching the parent bucket (parentId is -1). It effectively builds the path from + * bottom to top, finally prepending the volume and bucket names to complete the full path. If the directory structure + * is currently being rebuilt (indicated by the rebuildTriggered flag), this method returns an empty string to signify + * that path construction is temporarily unavailable. + * + * @param keyName The name of the key + * @param initialParentId The parent ID of the key + * @param volumeName The name of the volume + * @param bucketName The name of the bucket + * @return The constructed full path of the key as a String, or an empty string if a rebuild is in progress and + * the path cannot be constructed at this time. + * @throws IOException + */ + public static String constructFullPath(String keyName, long initialParentId, String volumeName, String bucketName, + ReconNamespaceSummaryManager reconNamespaceSummaryManager, + ReconOMMetadataManager omMetadataManager) throws IOException { + StringBuilder fullPath = constructFullPathPrefix(initialParentId, volumeName, bucketName, + reconNamespaceSummaryManager, omMetadataManager); + if (fullPath.length() == 0) { + return ""; + } + fullPath.append(keyName); + return fullPath.toString(); + } + + + /** + * Constructs the prefix path to a key from its key name and parent ID using a bottom-up approach, starting from the + * leaf node. + * + * The method begins with the leaf node (the key itself) and recursively prepends parent directory names, fetched + * via NSSummary objects, until reaching the parent bucket (parentId is -1). It effectively builds the path from + * bottom to top, finally prepending the volume and bucket names to complete the full path. If the directory structure + * is currently being rebuilt (indicated by the rebuildTriggered flag), this method returns an empty string to signify + * that path construction is temporarily unavailable. + * + * @param initialParentId The parent ID of the key + * @param volumeName The name of the volume + * @param bucketName The name of the bucket + * @return A StringBuilder containing the constructed prefix path of the key, or an empty string builder if a rebuild + * is in progress. + * @throws IOException + */ + public static StringBuilder constructFullPathPrefix(long initialParentId, String volumeName, + String bucketName, ReconNamespaceSummaryManager reconNamespaceSummaryManager, + ReconOMMetadataManager omMetadataManager) throws IOException { + StringBuilder fullPath = new StringBuilder(); + long parentId = initialParentId; boolean isDirectoryPresent = false; while (parentId != 0) { @@ -298,16 +362,19 @@ public static String constructFullPath(OmKeyInfo omKeyInfo, if (nsSummary == null) { log.warn("NSSummary tree is currently being rebuilt or the directory could be in the progress of " + "deletion, returning empty string for path construction."); - return ""; + throw new ServiceNotReadyException("Service is initializing. Please try again later."); } if (nsSummary.getParentId() == -1) { if (rebuildTriggered.compareAndSet(false, true)) { triggerRebuild(reconNamespaceSummaryManager, omMetadataManager); } log.warn("NSSummary tree is currently being rebuilt, returning empty string for path construction."); - return ""; + throw new ServiceNotReadyException("Service is initializing. Please try again later."); + } + // On the last pass, dir-name will be empty and parent will be zero, indicating the loop should end. + if (!nsSummary.getDirName().isEmpty()) { + fullPath.insert(0, nsSummary.getDirName() + OM_KEY_PREFIX); } - fullPath.insert(0, nsSummary.getDirName() + OM_KEY_PREFIX); // Move to the parent ID of the current directory parentId = nsSummary.getParentId(); @@ -315,13 +382,113 @@ public static String constructFullPath(OmKeyInfo omKeyInfo, } // Prepend the volume and bucket to the constructed path - String volumeName = omKeyInfo.getVolumeName(); - String bucketName = omKeyInfo.getBucketName(); fullPath.insert(0, volumeName + OM_KEY_PREFIX + bucketName + OM_KEY_PREFIX); + // TODO - why is this needed? It seems lke it should handle double slashes in the path name, + // but its not clear how they get there. This normalize call is quite expensive as it + // creates several objects (URI, PATH, back to string). There was a bug fixed above + // where the last parent dirName was empty, which always caused a double // after the + // bucket name, but with that fixed, it seems like this should not be needed. All tests + // pass without it for key listing. if (isDirectoryPresent) { - return OmUtils.normalizeKey(fullPath.toString(), true); + String path = fullPath.toString(); + fullPath.setLength(0); + fullPath.append(OmUtils.normalizeKey(path, true)); } - return fullPath.toString(); + return fullPath; + } + + /** + * Converts a key prefix into an object path for FSO buckets, using IDs. + * + * This method transforms a user-provided path (e.g., "volume/bucket/dir1") into + * a database-friendly format ("/volumeID/bucketID/ParentId/") by replacing names + * with their corresponding IDs. It simplifies database queries for FSO bucket operations. + *

    +   * {@code
    +   * Examples:
    +   * - Input: "volume/bucket/key" -> Output: "/volumeID/bucketID/parentDirID/key"
    +   * - Input: "volume/bucket/dir1" -> Output: "/volumeID/bucketID/dir1ID/"
    +   * - Input: "volume/bucket/dir1/key1" -> Output: "/volumeID/bucketID/dir1ID/key1"
    +   * - Input: "volume/bucket/dir1/dir2" -> Output: "/volumeID/bucketID/dir2ID/"
    +   * }
    +   * 
    + * @param prevKeyPrefix The path to be converted. + * @return The object path as "/volumeID/bucketID/ParentId/" or an empty string if an error occurs. + * @throws IOException If database access fails. + * @throws IllegalArgumentException If the provided path is invalid or cannot be converted. + */ + public static String convertToObjectPathForOpenKeySearch(String prevKeyPrefix, + ReconOMMetadataManager omMetadataManager, + ReconNamespaceSummaryManager reconNamespaceSummaryManager, + OzoneStorageContainerManager reconSCM) + throws IOException { + try { + String[] names = EntityHandler.parseRequestPath(EntityHandler.normalizePath( + prevKeyPrefix, BucketLayout.FILE_SYSTEM_OPTIMIZED)); + Table openFileTable = omMetadataManager.getOpenKeyTable( + BucketLayout.FILE_SYSTEM_OPTIMIZED); + + // Root-Level: Return the original path + if (names.length == 0 || names[0].isEmpty()) { + return prevKeyPrefix; + } + + // Volume-Level: Fetch the volumeID + String volumeName = names[0]; + validateNames(volumeName); + String volumeKey = omMetadataManager.getVolumeKey(volumeName); + long volumeId = omMetadataManager.getVolumeTable().getSkipCache(volumeKey).getObjectID(); + if (names.length == 1) { + return constructObjectPathWithPrefix(volumeId); + } + + // Bucket-Level: Fetch the bucketID + String bucketName = names[1]; + validateNames(bucketName); + String bucketKey = omMetadataManager.getBucketKey(volumeName, bucketName); + OmBucketInfo bucketInfo = omMetadataManager.getBucketTable().getSkipCache(bucketKey); + long bucketId = bucketInfo.getObjectID(); + if (names.length == 2 || bucketInfo.getBucketLayout() != BucketLayout.FILE_SYSTEM_OPTIMIZED) { + return constructObjectPathWithPrefix(volumeId, bucketId); + } + + // Directory or Key-Level: Check both key and directory + BucketHandler handler = + BucketHandler.getBucketHandler(reconNamespaceSummaryManager, omMetadataManager, reconSCM, bucketInfo); + + if (names.length >= 3) { + String lastEntiry = names[names.length - 1]; + + // Check if the directory exists + OmDirectoryInfo dirInfo = handler.getDirInfo(names); + if (dirInfo != null && dirInfo.getName().equals(lastEntiry)) { + return constructObjectPathWithPrefix(volumeId, bucketId, dirInfo.getObjectID()) + OM_KEY_PREFIX; + } + + // Check if the key exists + long dirID = handler.getDirObjectId(names, names.length); + String keyKey = constructObjectPathWithPrefix(volumeId, bucketId, dirID) + + OM_KEY_PREFIX + lastEntiry; + OmKeyInfo keyInfo = openFileTable.getSkipCache(keyKey); + if (keyInfo != null && keyInfo.getFileName().equals(lastEntiry)) { + return constructObjectPathWithPrefix(volumeId, bucketId, + keyInfo.getParentObjectID()) + OM_KEY_PREFIX + lastEntiry; + } + + return prevKeyPrefix; + } + } catch (IllegalArgumentException e) { + log.error( + "IllegalArgumentException encountered while converting key prefix to object path: {}", + prevKeyPrefix, e); + throw e; + } catch (RuntimeException e) { + log.error( + "RuntimeException encountered while converting key prefix to object path: {}", + prevKeyPrefix, e); + return prevKeyPrefix; + } + return prevKeyPrefix; } private static void triggerRebuild(ReconNamespaceSummaryManager reconNamespaceSummaryManager, @@ -596,6 +763,109 @@ public static long convertToEpochMillis(String dateString, String dateFormat, Ti } } + public static boolean validateStartPrefix(String startPrefix) { + + // Ensure startPrefix starts with '/' for non-empty values + startPrefix = startPrefix.startsWith("/") ? startPrefix : "/" + startPrefix; + + // Split the path to ensure it's at least at the bucket level (volume/bucket). + String[] pathComponents = startPrefix.split("/"); + if (pathComponents.length < 3 || pathComponents[2].isEmpty()) { + return false; // Invalid if not at bucket level or deeper + } + + return true; + } + + /** + * Retrieves keys from the specified table based on pagination and prefix filtering. + * This method handles different scenarios based on the presence of {@code startPrefix} + * and {@code prevKey}, enabling efficient key retrieval from the table. + * + * The method handles the following cases: + * + * 1. {@code prevKey} provided, {@code startPrefix} empty: + * - Seeks to {@code prevKey}, skips it, and returns subsequent records up to the limit. + * + * 2. {@code prevKey} empty, {@code startPrefix} empty: + * - Iterates from the beginning of the table, retrieving all records up to the limit. + * + * 3. {@code startPrefix} provided, {@code prevKey} empty: + * - Seeks to the first key matching {@code startPrefix} and returns all matching keys up to the limit. + * + * 4. {@code startPrefix} provided, {@code prevKey} provided: + * - Seeks to {@code prevKey}, skips it, and returns subsequent keys that match {@code startPrefix}, + * up to the limit. + * + * This method also handles the following {@code limit} scenarios: + * - If {@code limit == 0} or {@code limit < -1}, no records are returned. + * - If {@code limit == -1}, all records are returned. + * - For positive {@code limit}, it retrieves records up to the specified {@code limit}. + * + * @param table The table to retrieve keys from. + * @param startPrefix The search prefix to match keys against. + * @param limit The maximum number of keys to retrieve. + * @param prevKey The key to start after for the next set of records. + * @return A map of keys and their corresponding {@code OmKeyInfo} or {@code RepeatedOmKeyInfo} objects. + * @throws IOException If there are problems accessing the table. + */ + public static Map extractKeysFromTable( + Table table, String startPrefix, int limit, String prevKey) + throws IOException { + + Map matchedKeys = new LinkedHashMap<>(); + + // Null check for the table to prevent NPE during omMetaManager initialization + if (table == null) { + log.error("Table object is null. omMetaManager might still be initializing."); + return Collections.emptyMap(); + } + + // If limit = 0, return an empty result set + if (limit == 0 || limit < -1) { + return matchedKeys; + } + + // If limit = -1, set it to Integer.MAX_VALUE to return all records + int actualLimit = (limit == -1) ? Integer.MAX_VALUE : limit; + + try (TableIterator> keyIter = table.iterator()) { + + // Scenario 1 & 4: prevKey is provided (whether startPrefix is empty or not) + if (!prevKey.isEmpty()) { + keyIter.seek(prevKey); + if (keyIter.hasNext()) { + keyIter.next(); // Skip the previous key record + } + } else if (!startPrefix.isEmpty()) { + // Scenario 3: startPrefix is provided but prevKey is empty, so seek to startPrefix + keyIter.seek(startPrefix); + } + + // Scenario 2: Both startPrefix and prevKey are empty (iterate from the start of the table) + // No seeking needed; just start iterating from the first record in the table + // This is implicit in the following loop, as the iterator will start from the beginning + + // Iterate through the keys while adhering to the limit (if the limit is not zero) + while (keyIter.hasNext() && matchedKeys.size() < actualLimit) { + Table.KeyValue entry = keyIter.next(); + String dbKey = entry.getKey(); + + // Scenario 3 & 4: If startPrefix is provided, ensure the key matches startPrefix + if (!startPrefix.isEmpty() && !dbKey.startsWith(startPrefix)) { + break; // If the key no longer matches the prefix, exit the loop + } + + // Add the valid key-value pair to the results + matchedKeys.put(dbKey, entry.getValue()); + } + } catch (IOException exception) { + log.error("Error retrieving keys from table for path: {}", startPrefix, exception); + throw exception; + } + return matchedKeys; + } + /** * Finds all subdirectories under a parent directory in an FSO bucket. It builds * a list of paths for these subdirectories. These sub-directories are then used diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/NodeEndpoint.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/NodeEndpoint.java index 7f0efe97dd9..543b8e388a9 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/NodeEndpoint.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/NodeEndpoint.java @@ -152,29 +152,28 @@ public Response getDatanodes() { } }); try { - builder.withContainers(nodeManager.getContainerCount(datanode)); - builder.withOpenContainers(openContainers.get()); + builder.setContainers(nodeManager.getContainerCount(datanode)); + builder.setOpenContainers(openContainers.get()); } catch (NodeNotFoundException ex) { LOG.warn("Cannot get containers, datanode {} not found.", datanode.getUuid(), ex); } DatanodeInfo dnInfo = (DatanodeInfo) datanode; - datanodes.add(builder.withHostname(nodeManager.getHostName(datanode)) - .withDatanodeStorageReport(storageReport) - .withLastHeartbeat(nodeManager.getLastHeartbeat(datanode)) - .withState(nodeState) - .withOperationalState(nodeOpState) - .withPipelines(pipelines) - .withLeaderCount(leaderCount.get()) - .withUUid(datanode.getUuidString()) - .withVersion(nodeManager.getVersion(datanode)) - .withSetupTime(nodeManager.getSetupTime(datanode)) - .withRevision(nodeManager.getRevision(datanode)) - .withBuildDate(nodeManager.getBuildDate(datanode)) - .withLayoutVersion( + datanodes.add(builder.setHostname(nodeManager.getHostName(datanode)) + .setDatanodeStorageReport(storageReport) + .setLastHeartbeat(nodeManager.getLastHeartbeat(datanode)) + .setState(nodeState) + .setOperationalState(nodeOpState) + .setPipelines(pipelines) + .setLeaderCount(leaderCount.get()) + .setUuid(datanode.getUuidString()) + .setVersion(nodeManager.getVersion(datanode)) + .setSetupTime(nodeManager.getSetupTime(datanode)) + .setRevision(nodeManager.getRevision(datanode)) + .setLayoutVersion( dnInfo.getLastKnownLayoutVersion().getMetadataLayoutVersion()) - .withNetworkLocation(datanode.getNetworkLocation()) + .setNetworkLocation(datanode.getNetworkLocation()) .build()); }); @@ -221,26 +220,26 @@ public Response removeDatanodes(List uuids) { try { if (preChecksSuccess(nodeByUuid, failedNodeErrorResponseMap)) { removedDatanodes.add(DatanodeMetadata.newBuilder() - .withHostname(nodeManager.getHostName(nodeByUuid)) - .withUUid(uuid) - .withState(nodeManager.getNodeStatus(nodeByUuid).getHealth()) + .setHostname(nodeManager.getHostName(nodeByUuid)) + .setUuid(uuid) + .setState(nodeManager.getNodeStatus(nodeByUuid).getHealth()) .build()); nodeManager.removeNode(nodeByUuid); LOG.info("Node {} removed successfully !!!", uuid); } else { failedDatanodes.add(DatanodeMetadata.newBuilder() - .withHostname(nodeManager.getHostName(nodeByUuid)) - .withUUid(uuid) - .withOperationalState(nodeByUuid.getPersistedOpState()) - .withState(nodeManager.getNodeStatus(nodeByUuid).getHealth()) + .setHostname(nodeManager.getHostName(nodeByUuid)) + .setUuid(uuid) + .setOperationalState(nodeByUuid.getPersistedOpState()) + .setState(nodeManager.getNodeStatus(nodeByUuid).getHealth()) .build()); } } catch (NodeNotFoundException nnfe) { LOG.error("Selected node {} not found : {} ", uuid, nnfe); notFoundDatanodes.add(DatanodeMetadata.newBuilder() - .withHostname("") - .withState(NodeState.DEAD) - .withUUid(uuid).build()); + .setHostname("") + .setState(NodeState.DEAD) + .setUuid(uuid).build()); } } } catch (Exception exp) { diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/OMDBInsightEndpoint.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/OMDBInsightEndpoint.java index 4620b69fbe3..64da15db413 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/OMDBInsightEndpoint.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/OMDBInsightEndpoint.java @@ -32,6 +32,7 @@ import org.apache.hadoop.ozone.recon.ReconUtils; import org.apache.hadoop.ozone.recon.api.handlers.BucketHandler; import org.apache.hadoop.ozone.recon.api.types.KeyEntityInfo; +import org.apache.hadoop.ozone.recon.api.types.KeyEntityInfoProtoWrapper; import org.apache.hadoop.ozone.recon.api.types.KeyInsightInfoResponse; import org.apache.hadoop.ozone.recon.api.types.ListKeysResponse; import org.apache.hadoop.ozone.recon.api.types.NSSummary; @@ -55,30 +56,31 @@ import javax.ws.rs.core.MediaType; import javax.ws.rs.core.Response; import java.io.IOException; +import java.util.Collections; import java.util.ArrayList; -import java.util.Arrays; import java.util.HashMap; import java.util.LinkedHashMap; import java.util.List; import java.util.Map; -import java.util.TimeZone; -import java.util.function.Predicate; -import java.util.stream.Collectors; -import java.util.stream.Stream; +import static org.apache.commons.lang3.StringUtils.isNotBlank; import static org.apache.hadoop.ozone.OzoneConsts.OM_KEY_PREFIX; import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.OPEN_FILE_TABLE; import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.OPEN_KEY_TABLE; import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.DELETED_TABLE; import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.DELETED_DIR_TABLE; import static org.apache.hadoop.ozone.recon.ReconConstants.DEFAULT_FETCH_COUNT; -import static org.apache.hadoop.ozone.recon.ReconConstants.DEFAULT_KEY_SIZE; -import static org.apache.hadoop.ozone.recon.ReconConstants.RECON_QUERY_LIMIT; -import static org.apache.hadoop.ozone.recon.ReconConstants.RECON_QUERY_PREVKEY; import static org.apache.hadoop.ozone.recon.ReconConstants.DEFAULT_OPEN_KEY_INCLUDE_FSO; import static org.apache.hadoop.ozone.recon.ReconConstants.DEFAULT_OPEN_KEY_INCLUDE_NON_FSO; import static org.apache.hadoop.ozone.recon.ReconConstants.RECON_OPEN_KEY_INCLUDE_FSO; import static org.apache.hadoop.ozone.recon.ReconConstants.RECON_OPEN_KEY_INCLUDE_NON_FSO; +import static org.apache.hadoop.ozone.recon.ReconConstants.RECON_QUERY_START_PREFIX; +import static org.apache.hadoop.ozone.recon.ReconConstants.DEFAULT_KEY_SIZE; +import static org.apache.hadoop.ozone.recon.ReconConstants.RECON_QUERY_LIMIT; +import static org.apache.hadoop.ozone.recon.ReconConstants.RECON_QUERY_PREVKEY; +import static org.apache.hadoop.ozone.recon.ReconResponseUtils.createBadRequestResponse; +import static org.apache.hadoop.ozone.recon.ReconResponseUtils.createInternalServerErrorResponse; +import static org.apache.hadoop.ozone.recon.ReconResponseUtils.noMatchedKeysResponse; import static org.apache.hadoop.ozone.recon.api.handlers.BucketHandler.getBucketHandler; import static org.apache.hadoop.ozone.recon.api.handlers.EntityHandler.normalizePath; import static org.apache.hadoop.ozone.recon.api.handlers.EntityHandler.parseRequestPath; @@ -176,101 +178,133 @@ public OMDBInsightEndpoint(OzoneStorageContainerManager reconSCM, @Path("/open") public Response getOpenKeyInfo( @DefaultValue(DEFAULT_FETCH_COUNT) @QueryParam(RECON_QUERY_LIMIT) - int limit, + int limit, @DefaultValue(StringUtils.EMPTY) @QueryParam(RECON_QUERY_PREVKEY) - String prevKey, - @DefaultValue(DEFAULT_OPEN_KEY_INCLUDE_FSO) - @QueryParam(RECON_OPEN_KEY_INCLUDE_FSO) - boolean includeFso, - @DefaultValue(DEFAULT_OPEN_KEY_INCLUDE_NON_FSO) - @QueryParam(RECON_OPEN_KEY_INCLUDE_NON_FSO) - boolean includeNonFso) { + String prevKey, + @DefaultValue(StringUtils.EMPTY) @QueryParam(RECON_QUERY_START_PREFIX) + String startPrefix, + @DefaultValue(DEFAULT_OPEN_KEY_INCLUDE_FSO) @QueryParam(RECON_OPEN_KEY_INCLUDE_FSO) + boolean includeFso, + @DefaultValue(DEFAULT_OPEN_KEY_INCLUDE_NON_FSO) @QueryParam(RECON_OPEN_KEY_INCLUDE_NON_FSO) + boolean includeNonFso) { + KeyInsightInfoResponse openKeyInsightInfo = new KeyInsightInfoResponse(); - List nonFSOKeyInfoList = - openKeyInsightInfo.getNonFSOKeyInfoList(); - - boolean skipPrevKeyDone = false; - boolean isLegacyBucketLayout = true; - boolean recordsFetchedLimitReached = false; - - String lastKey = ""; - List fsoKeyInfoList = openKeyInsightInfo.getFsoKeyInfoList(); - for (BucketLayout layout : Arrays.asList( - BucketLayout.LEGACY, BucketLayout.FILE_SYSTEM_OPTIMIZED)) { - isLegacyBucketLayout = (layout == BucketLayout.LEGACY); - // Skip bucket iteration based on parameters includeFso and includeNonFso - if ((!includeFso && !isLegacyBucketLayout) || - (!includeNonFso && isLegacyBucketLayout)) { - continue; + + try { + long replicatedTotal = 0; + long unreplicatedTotal = 0; + boolean skipPrevKeyDone = false; // Tracks if prevKey was used earlier + boolean keysFound = false; // Flag to track if any keys are found + String lastKey = null; + Map obsKeys = Collections.emptyMap(); + Map fsoKeys = Collections.emptyMap(); + + // Validate startPrefix if it's provided + if (isNotBlank(startPrefix) && !validateStartPrefix(startPrefix)) { + return createBadRequestResponse("Invalid startPrefix: Path must be at the bucket level or deeper."); } - Table openKeyTable = - omMetadataManager.getOpenKeyTable(layout); - try ( - TableIterator> - keyIter = openKeyTable.iterator()) { - boolean skipPrevKey = false; - String seekKey = prevKey; - if (!skipPrevKeyDone && StringUtils.isNotBlank(prevKey)) { - skipPrevKey = true; - Table.KeyValue seekKeyValue = - keyIter.seek(seekKey); - // check if RocksDB was able to seek correctly to the given key prefix - // if not, then return empty result - // In case of an empty prevKeyPrefix, all the keys are returned - if (seekKeyValue == null || - (StringUtils.isNotBlank(prevKey) && - !seekKeyValue.getKey().equals(prevKey))) { - continue; - } + // Use searchOpenKeys logic with adjustments for FSO and Non-FSO filtering + if (includeNonFso) { + // Search for non-FSO keys in KeyTable + Table openKeyTable = omMetadataManager.getOpenKeyTable(BucketLayout.LEGACY); + obsKeys = ReconUtils.extractKeysFromTable(openKeyTable, startPrefix, limit, prevKey); + for (Map.Entry entry : obsKeys.entrySet()) { + keysFound = true; + skipPrevKeyDone = true; // Don't use the prevKey for the file table + KeyEntityInfo keyEntityInfo = createKeyEntityInfoFromOmKeyInfo(entry.getKey(), entry.getValue()); + openKeyInsightInfo.getNonFSOKeyInfoList().add(keyEntityInfo); // Add to non-FSO list + replicatedTotal += entry.getValue().getReplicatedSize(); + unreplicatedTotal += entry.getValue().getDataSize(); + lastKey = entry.getKey(); // Update lastKey } - while (keyIter.hasNext()) { - Table.KeyValue kv = keyIter.next(); - String key = kv.getKey(); - lastKey = key; - OmKeyInfo omKeyInfo = kv.getValue(); - // skip the prev key if prev key is present - if (skipPrevKey && key.equals(prevKey)) { - skipPrevKeyDone = true; - continue; - } - KeyEntityInfo keyEntityInfo = new KeyEntityInfo(); - keyEntityInfo.setKey(key); - keyEntityInfo.setPath(omKeyInfo.getKeyName()); - keyEntityInfo.setInStateSince(omKeyInfo.getCreationTime()); - keyEntityInfo.setSize(omKeyInfo.getDataSize()); - keyEntityInfo.setReplicatedSize(omKeyInfo.getReplicatedSize()); - keyEntityInfo.setReplicationConfig(omKeyInfo.getReplicationConfig()); - openKeyInsightInfo.setUnreplicatedDataSize( - openKeyInsightInfo.getUnreplicatedDataSize() + - keyEntityInfo.getSize()); - openKeyInsightInfo.setReplicatedDataSize( - openKeyInsightInfo.getReplicatedDataSize() + - keyEntityInfo.getReplicatedSize()); - boolean added = - isLegacyBucketLayout ? nonFSOKeyInfoList.add(keyEntityInfo) : - fsoKeyInfoList.add(keyEntityInfo); - if ((nonFSOKeyInfoList.size() + fsoKeyInfoList.size()) == limit) { - recordsFetchedLimitReached = true; - break; - } + } + + if (includeFso) { + // Search for FSO keys in FileTable + // If prevKey was used for non-FSO keys, skip it for FSO keys. + String effectivePrevKey = skipPrevKeyDone ? "" : prevKey; + // If limit = -1 then we need to fetch all keys without limit + int effectiveLimit = limit == -1 ? limit : limit - obsKeys.size(); + fsoKeys = searchOpenKeysInFSO(startPrefix, effectiveLimit, effectivePrevKey); + for (Map.Entry entry : fsoKeys.entrySet()) { + keysFound = true; + KeyEntityInfo keyEntityInfo = createKeyEntityInfoFromOmKeyInfo(entry.getKey(), entry.getValue()); + openKeyInsightInfo.getFsoKeyInfoList().add(keyEntityInfo); // Add to FSO list + replicatedTotal += entry.getValue().getReplicatedSize(); + unreplicatedTotal += entry.getValue().getDataSize(); + lastKey = entry.getKey(); // Update lastKey } - } catch (IOException ex) { - throw new WebApplicationException(ex, - Response.Status.INTERNAL_SERVER_ERROR); - } catch (IllegalArgumentException e) { - throw new WebApplicationException(e, Response.Status.BAD_REQUEST); - } catch (Exception ex) { - throw new WebApplicationException(ex, - Response.Status.INTERNAL_SERVER_ERROR); } - if (recordsFetchedLimitReached) { - break; + + // If no keys were found, return a response indicating that no keys matched + if (!keysFound) { + return noMatchedKeysResponse(startPrefix); } + + // Set the aggregated totals in the response + openKeyInsightInfo.setReplicatedDataSize(replicatedTotal); + openKeyInsightInfo.setUnreplicatedDataSize(unreplicatedTotal); + openKeyInsightInfo.setLastKey(lastKey); + + // Return the response with the matched keys and their data sizes + return Response.ok(openKeyInsightInfo).build(); + } catch (IOException e) { + // Handle IO exceptions and return an internal server error response + return createInternalServerErrorResponse("Error searching open keys in OM DB: " + e.getMessage()); + } catch (IllegalArgumentException e) { + // Handle illegal argument exceptions and return a bad request response + return createBadRequestResponse("Invalid argument: " + e.getMessage()); } + } - openKeyInsightInfo.setLastKey(lastKey); - return Response.ok(openKeyInsightInfo).build(); + public Map searchOpenKeysInFSO(String startPrefix, + int limit, String prevKey) + throws IOException, IllegalArgumentException { + Map matchedKeys = new LinkedHashMap<>(); + // Convert the search prefix to an object path for FSO buckets + String startPrefixObjectPath = ReconUtils.convertToObjectPathForOpenKeySearch( + startPrefix, omMetadataManager, reconNamespaceSummaryManager, reconSCM); + String[] names = parseRequestPath(startPrefixObjectPath); + Table openFileTable = + omMetadataManager.getOpenKeyTable(BucketLayout.FILE_SYSTEM_OPTIMIZED); + + // If names.length <= 2, then the search prefix is at the volume or bucket level hence + // no need to find parent or extract id's or find subpaths as the openFileTable is + // suitable for volume and bucket level search + if (names.length > 2 && startPrefixObjectPath.endsWith(OM_KEY_PREFIX)) { + // Fetch the parent ID to search for + long parentId = Long.parseLong(names[names.length - 1]); + + // Fetch the nameSpaceSummary for the parent ID + NSSummary parentSummary = reconNamespaceSummaryManager.getNSSummary(parentId); + if (parentSummary == null) { + return matchedKeys; + } + List subPaths = new ArrayList<>(); + // Add the initial search prefix object path because it can have both openFiles + // and subdirectories with openFiles + subPaths.add(startPrefixObjectPath); + + // Recursively gather all subpaths + ReconUtils.gatherSubPaths(parentId, subPaths, Long.parseLong(names[0]), Long.parseLong(names[1]), + reconNamespaceSummaryManager); + + // Iterate over the subpaths and retrieve the open files + for (String subPath : subPaths) { + matchedKeys.putAll( + ReconUtils.extractKeysFromTable(openFileTable, subPath, limit - matchedKeys.size(), prevKey)); + if (matchedKeys.size() >= limit) { + break; + } + } + return matchedKeys; + } + + // If the search level is at the volume, bucket or key level, directly search the openFileTable + matchedKeys.putAll( + ReconUtils.extractKeysFromTable(openFileTable, startPrefixObjectPath, limit, prevKey)); + return matchedKeys; } /** @@ -339,62 +373,6 @@ private Long getValueFromId(GlobalStats record) { return record != null ? record.getValue() : 0L; } - private void getPendingForDeletionKeyInfo( - int limit, - String prevKey, - KeyInsightInfoResponse deletedKeyAndDirInsightInfo) { - List repeatedOmKeyInfoList = - deletedKeyAndDirInsightInfo.getRepeatedOmKeyInfoList(); - Table deletedTable = - omMetadataManager.getDeletedTable(); - try ( - TableIterator> - keyIter = deletedTable.iterator()) { - boolean skipPrevKey = false; - String seekKey = prevKey; - String lastKey = ""; - if (StringUtils.isNotBlank(prevKey)) { - skipPrevKey = true; - Table.KeyValue seekKeyValue = - keyIter.seek(seekKey); - // check if RocksDB was able to seek correctly to the given key prefix - // if not, then return empty result - // In case of an empty prevKeyPrefix, all the keys are returned - if (seekKeyValue == null || - (StringUtils.isNotBlank(prevKey) && - !seekKeyValue.getKey().equals(prevKey))) { - return; - } - } - while (keyIter.hasNext()) { - Table.KeyValue kv = keyIter.next(); - String key = kv.getKey(); - lastKey = key; - RepeatedOmKeyInfo repeatedOmKeyInfo = kv.getValue(); - // skip the prev key if prev key is present - if (skipPrevKey && key.equals(prevKey)) { - continue; - } - updateReplicatedAndUnReplicatedTotal(deletedKeyAndDirInsightInfo, - repeatedOmKeyInfo); - repeatedOmKeyInfoList.add(repeatedOmKeyInfo); - if ((repeatedOmKeyInfoList.size()) == limit) { - break; - } - } - deletedKeyAndDirInsightInfo.setLastKey(lastKey); - } catch (IOException ex) { - throw new WebApplicationException(ex, - Response.Status.INTERNAL_SERVER_ERROR); - } catch (IllegalArgumentException e) { - throw new WebApplicationException(e, Response.Status.BAD_REQUEST); - } catch (Exception ex) { - throw new WebApplicationException(ex, - Response.Status.INTERNAL_SERVER_ERROR); - } - } - /** Retrieves the summary of deleted keys. * * This method calculates and returns a summary of deleted keys. @@ -428,6 +406,7 @@ public Response getDeletedKeySummary() { * limit - limits the number of key/files returned. * prevKey - E.g. /vol1/bucket1/key1, this will skip keys till it * seeks correctly to the given prevKey. + * startPrefix - E.g. /vol1/bucket1, this will return keys matching this prefix. * Sample API Response: * { * "lastKey": "vol1/bucket1/key1", @@ -476,17 +455,90 @@ public Response getDeletedKeySummary() { @GET @Path("/deletePending") public Response getDeletedKeyInfo( - @DefaultValue(DEFAULT_FETCH_COUNT) @QueryParam(RECON_QUERY_LIMIT) - int limit, - @DefaultValue(StringUtils.EMPTY) @QueryParam(RECON_QUERY_PREVKEY) - String prevKey) { - KeyInsightInfoResponse - deletedKeyInsightInfo = new KeyInsightInfoResponse(); - getPendingForDeletionKeyInfo(limit, prevKey, - deletedKeyInsightInfo); + @DefaultValue(DEFAULT_FETCH_COUNT) @QueryParam(RECON_QUERY_LIMIT) int limit, + @DefaultValue(StringUtils.EMPTY) @QueryParam(RECON_QUERY_PREVKEY) String prevKey, + @DefaultValue(StringUtils.EMPTY) @QueryParam(RECON_QUERY_START_PREFIX) String startPrefix) { + + // Initialize the response object to hold the key information + KeyInsightInfoResponse deletedKeyInsightInfo = new KeyInsightInfoResponse(); + + boolean keysFound = false; + + try { + // Validate startPrefix if it's provided + if (isNotBlank(startPrefix) && !validateStartPrefix(startPrefix)) { + return createBadRequestResponse("Invalid startPrefix: Path must be at the bucket level or deeper."); + } + + // Perform the search based on the limit, prevKey, and startPrefix + keysFound = getPendingForDeletionKeyInfo(limit, prevKey, startPrefix, deletedKeyInsightInfo); + + } catch (IllegalArgumentException e) { + LOG.error("Invalid startPrefix provided: {}", startPrefix, e); + return createBadRequestResponse("Invalid startPrefix: " + e.getMessage()); + } catch (IOException e) { + LOG.error("I/O error while searching deleted keys in OM DB", e); + return createInternalServerErrorResponse("Error searching deleted keys in OM DB: " + e.getMessage()); + } catch (Exception e) { + LOG.error("Unexpected error occurred while searching deleted keys", e); + return createInternalServerErrorResponse("Unexpected error: " + e.getMessage()); + } + + if (!keysFound) { + return noMatchedKeysResponse(""); + } + return Response.ok(deletedKeyInsightInfo).build(); } + /** + * Retrieves keys pending deletion based on startPrefix, filtering keys matching the prefix. + * + * @param limit The limit of records to return. + * @param prevKey Pagination key. + * @param startPrefix The search prefix. + * @param deletedKeyInsightInfo The response object to populate. + */ + private boolean getPendingForDeletionKeyInfo( + int limit, String prevKey, String startPrefix, + KeyInsightInfoResponse deletedKeyInsightInfo) throws IOException { + + long replicatedTotal = 0; + long unreplicatedTotal = 0; + boolean keysFound = false; + String lastKey = null; + + // Search for deleted keys in DeletedTable + Table deletedTable = omMetadataManager.getDeletedTable(); + Map deletedKeys = + ReconUtils.extractKeysFromTable(deletedTable, startPrefix, limit, prevKey); + + // Iterate over the retrieved keys and populate the response + for (Map.Entry entry : deletedKeys.entrySet()) { + keysFound = true; + RepeatedOmKeyInfo repeatedOmKeyInfo = entry.getValue(); + + // We know each RepeatedOmKeyInfo has just one OmKeyInfo object + OmKeyInfo keyInfo = repeatedOmKeyInfo.getOmKeyInfoList().get(0); + KeyEntityInfo keyEntityInfo = createKeyEntityInfoFromOmKeyInfo(entry.getKey(), keyInfo); + + // Add the key directly to the list without classification + deletedKeyInsightInfo.getRepeatedOmKeyInfoList().add(repeatedOmKeyInfo); + + replicatedTotal += keyInfo.getReplicatedSize(); + unreplicatedTotal += keyInfo.getDataSize(); + + lastKey = entry.getKey(); // Update lastKey + } + + // Set the aggregated totals in the response + deletedKeyInsightInfo.setReplicatedDataSize(replicatedTotal); + deletedKeyInsightInfo.setUnreplicatedDataSize(unreplicatedTotal); + deletedKeyInsightInfo.setLastKey(lastKey); + + return keysFound; + } + /** * Creates a keys summary for deleted keys and updates the provided * keysSummary map. Calculates the total number of deleted keys, replicated @@ -526,7 +578,7 @@ private void getPendingForDeletionDirInfo( boolean skipPrevKey = false; String seekKey = prevKey; String lastKey = ""; - if (StringUtils.isNotBlank(prevKey)) { + if (isNotBlank(prevKey)) { skipPrevKey = true; Table.KeyValue seekKeyValue = keyIter.seek(seekKey); @@ -534,7 +586,7 @@ private void getPendingForDeletionDirInfo( // if not, then return empty result // In case of an empty prevKeyPrefix, all the keys are returned if (seekKeyValue == null || - (StringUtils.isNotBlank(prevKey) && + (isNotBlank(prevKey) && !seekKeyValue.getKey().equals(prevKey))) { return; } @@ -549,6 +601,7 @@ private void getPendingForDeletionDirInfo( continue; } KeyEntityInfo keyEntityInfo = new KeyEntityInfo(); + keyEntityInfo.setIsKey(omKeyInfo.isFile()); keyEntityInfo.setKey(omKeyInfo.getFileName()); keyEntityInfo.setPath(createPath(omKeyInfo)); keyEntityInfo.setInStateSince(omKeyInfo.getCreationTime()); @@ -953,20 +1006,20 @@ public Response listKeys(@QueryParam("replicationType") String replicationType, ListKeysResponse listKeysResponse = new ListKeysResponse(); if (!ReconUtils.isInitializationComplete(omMetadataManager)) { listKeysResponse.setStatus(ResponseStatus.INITIALIZING); - return Response.ok(listKeysResponse).build(); + return Response.status(Response.Status.SERVICE_UNAVAILABLE).entity(listKeysResponse).build(); } ParamInfo paramInfo = new ParamInfo(replicationType, creationDate, keySize, startPrefix, prevKey, limit, false, ""); Response response = getListKeysResponse(paramInfo); if ((response.getStatus() != Response.Status.OK.getStatusCode()) && - (response.getStatus() != Response.Status.NOT_FOUND.getStatusCode())) { + (response.getStatus() != Response.Status.NO_CONTENT.getStatusCode())) { return response; } if (response.getEntity() instanceof ListKeysResponse) { listKeysResponse = (ListKeysResponse) response.getEntity(); } - List keyInfoList = listKeysResponse.getKeys(); + List keyInfoList = listKeysResponse.getKeys(); if (!keyInfoList.isEmpty()) { listKeysResponse.setLastKey(keyInfoList.get(keyInfoList.size() - 1).getKey()); } @@ -974,72 +1027,58 @@ public Response listKeys(@QueryParam("replicationType") String replicationType, } private Response getListKeysResponse(ParamInfo paramInfo) { + ListKeysResponse listKeysResponse = new ListKeysResponse(); try { paramInfo.setLimit(Math.max(0, paramInfo.getLimit())); // Ensure limit is non-negative - ListKeysResponse listKeysResponse = new ListKeysResponse(); listKeysResponse.setPath(paramInfo.getStartPrefix()); long replicatedTotal = 0; long unreplicatedTotal = 0; - boolean keysFound = false; // Flag to track if any keys are found // Search keys from non-FSO layout. - Map obsKeys; - Table keyTable = - omMetadataManager.getKeyTable(BucketLayout.LEGACY); - obsKeys = retrieveKeysFromTable(keyTable, paramInfo); - for (Map.Entry entry : obsKeys.entrySet()) { - keysFound = true; - KeyEntityInfo keyEntityInfo = - createKeyEntityInfoFromOmKeyInfo(entry.getKey(), entry.getValue()); - - listKeysResponse.getKeys().add(keyEntityInfo); - replicatedTotal += entry.getValue().getReplicatedSize(); - unreplicatedTotal += entry.getValue().getDataSize(); - } + Table keyTable = + omMetadataManager.getKeyTableLite(BucketLayout.LEGACY); + retrieveKeysFromTable(keyTable, paramInfo, listKeysResponse.getKeys()); // Search keys from FSO layout. - Map fsoKeys = searchKeysInFSO(paramInfo); - for (Map.Entry entry : fsoKeys.entrySet()) { - keysFound = true; - KeyEntityInfo keyEntityInfo = - createKeyEntityInfoFromOmKeyInfo(entry.getKey(), entry.getValue()); - - listKeysResponse.getKeys().add(keyEntityInfo); - replicatedTotal += entry.getValue().getReplicatedSize(); - unreplicatedTotal += entry.getValue().getDataSize(); - } + searchKeysInFSO(paramInfo, listKeysResponse.getKeys()); // If no keys were found, return a response indicating that no keys matched - if (!keysFound) { + if (listKeysResponse.getKeys().isEmpty()) { return ReconResponseUtils.noMatchedKeysResponse(paramInfo.getStartPrefix()); } + for (KeyEntityInfoProtoWrapper keyEntityInfo : listKeysResponse.getKeys()) { + replicatedTotal += keyEntityInfo.getReplicatedSize(); + unreplicatedTotal += keyEntityInfo.getSize(); + } + // Set the aggregated totals in the response listKeysResponse.setReplicatedDataSize(replicatedTotal); listKeysResponse.setUnReplicatedDataSize(unreplicatedTotal); return Response.ok(listKeysResponse).build(); - } catch (IOException e) { - return ReconResponseUtils.createInternalServerErrorResponse( - "Error listing keys from OM DB: " + e.getMessage()); } catch (RuntimeException e) { + if (e instanceof ServiceNotReadyException) { + listKeysResponse.setStatus(ResponseStatus.INITIALIZING); + return Response.status(Response.Status.SERVICE_UNAVAILABLE).entity(listKeysResponse).build(); + } + LOG.error("Error generating listKeys response", e); return ReconResponseUtils.createInternalServerErrorResponse( "Unexpected runtime error while searching keys in OM DB: " + e.getMessage()); } catch (Exception e) { + LOG.error("Error generating listKeys response", e); return ReconResponseUtils.createInternalServerErrorResponse( "Error listing keys from OM DB: " + e.getMessage()); } } - public Map searchKeysInFSO(ParamInfo paramInfo) + public void searchKeysInFSO(ParamInfo paramInfo, List results) throws IOException { - int originalLimit = paramInfo.getLimit(); - Map matchedKeys = new LinkedHashMap<>(); // Convert the search prefix to an object path for FSO buckets String startPrefixObjectPath = convertStartPrefixPathToObjectIdPath(paramInfo.getStartPrefix()); String[] names = parseRequestPath(startPrefixObjectPath); - Table fileTable = - omMetadataManager.getKeyTable(BucketLayout.FILE_SYSTEM_OPTIMIZED); + Table fileTable = + omMetadataManager.getKeyTableLite(BucketLayout.FILE_SYSTEM_OPTIMIZED); // If names.length > 2, then the search prefix is at the level above bucket level hence // no need to find parent or extract id's or find subpaths as the fileTable is @@ -1052,7 +1091,7 @@ public Map searchKeysInFSO(ParamInfo paramInfo) NSSummary parentSummary = reconNamespaceSummaryManager.getNSSummary(parentId); if (parentSummary == null) { - return matchedKeys; + return; } List subPaths = new ArrayList<>(); // Add the initial search prefix object path because it can have both files and subdirectories with files. @@ -1064,21 +1103,17 @@ public Map searchKeysInFSO(ParamInfo paramInfo) // Iterate over the subpaths and retrieve the files for (String subPath : subPaths) { paramInfo.setStartPrefix(subPath); - matchedKeys.putAll( - retrieveKeysFromTable(fileTable, paramInfo)); - paramInfo.setLimit(originalLimit - matchedKeys.size()); - if (matchedKeys.size() >= originalLimit) { + retrieveKeysFromTable(fileTable, paramInfo, results); + if (results.size() >= paramInfo.getLimit()) { break; } } - return matchedKeys; + return; } paramInfo.setStartPrefix(startPrefixObjectPath); // Iterate over for bucket and volume level search - matchedKeys.putAll( - retrieveKeysFromTable(fileTable, paramInfo)); - return matchedKeys; + retrieveKeysFromTable(fileTable, paramInfo, results); } @@ -1151,32 +1186,34 @@ public String convertStartPrefixPathToObjectIdPath(String startPrefixPath) * @return A map of keys and their corresponding OmKeyInfo objects. * @throws IOException If there are problems accessing the table. */ - private Map retrieveKeysFromTable( - Table table, ParamInfo paramInfo) + private void retrieveKeysFromTable( + Table table, ParamInfo paramInfo, List results) throws IOException { boolean skipPrevKey = false; String seekKey = paramInfo.getPrevKey(); - Map matchedKeys = new LinkedHashMap<>(); try ( - TableIterator> keyIter = table.iterator()) { + TableIterator> keyIter = table.iterator()) { - if (!paramInfo.isSkipPrevKeyDone() && StringUtils.isNotBlank(seekKey)) { + if (!paramInfo.isSkipPrevKeyDone() && isNotBlank(seekKey)) { skipPrevKey = true; - Table.KeyValue seekKeyValue = + Table.KeyValue seekKeyValue = keyIter.seek(seekKey); // check if RocksDB was able to seek correctly to the given key prefix // if not, then return empty result // In case of an empty prevKeyPrefix, all the keys are returned if (seekKeyValue == null || (!seekKeyValue.getKey().equals(paramInfo.getPrevKey()))) { - return matchedKeys; + return; } } else { keyIter.seek(paramInfo.getStartPrefix()); } + long prevParentID = -1; + StringBuilder keyPrefix = null; + int keyPrefixLength = 0; while (keyIter.hasNext()) { - Table.KeyValue entry = keyIter.next(); + Table.KeyValue entry = keyIter.next(); String dbKey = entry.getKey(); if (!dbKey.startsWith(paramInfo.getStartPrefix())) { break; // Exit the loop if the key no longer matches the prefix @@ -1186,9 +1223,37 @@ private Map retrieveKeysFromTable( continue; } if (applyFilters(entry, paramInfo)) { - matchedKeys.put(dbKey, entry.getValue()); + KeyEntityInfoProtoWrapper keyEntityInfo = entry.getValue(); + keyEntityInfo.setKey(dbKey); + if (keyEntityInfo.getParentId() == 0) { + // Legacy bucket keys have a parentID of zero. OBS bucket keys have a parentID of the bucketID. + // FSO keys have a parent of the immediate parent directory. + // Legacy buckets are obsolete, so this code path is not optimized. We don't expect to see many Legacy + // buckets in practice. + prevParentID = -1; + keyEntityInfo.setPath(ReconUtils.constructFullPath(keyEntityInfo.getKeyName(), keyEntityInfo.getParentId(), + keyEntityInfo.getVolumeName(), keyEntityInfo.getBucketName(), reconNamespaceSummaryManager, + omMetadataManager)); + } else { + // As we iterate keys in sorted order, its highly likely that keys have the same prefix for many keys in a + // row. Especially for FSO buckets, its expensive to construct the path for each key. So, we construct the + // prefix once and reuse it for each identical parent. Only if the parent changes do we need to construct + // a new prefix path. + if (prevParentID != keyEntityInfo.getParentId()) { + prevParentID = keyEntityInfo.getParentId(); + keyPrefix = ReconUtils.constructFullPathPrefix(keyEntityInfo.getParentId(), + keyEntityInfo.getVolumeName(), keyEntityInfo.getBucketName(), reconNamespaceSummaryManager, + omMetadataManager); + keyPrefixLength = keyPrefix.length(); + } + keyPrefix.setLength(keyPrefixLength); + keyPrefix.append(keyEntityInfo.getKeyName()); + keyEntityInfo.setPath(keyPrefix.toString()); + } + + results.add(keyEntityInfo); paramInfo.setLastKey(dbKey); - if (matchedKeys.size() >= paramInfo.getLimit()) { + if (results.size() >= paramInfo.getLimit()) { break; } } @@ -1197,53 +1262,25 @@ private Map retrieveKeysFromTable( LOG.error("Error retrieving keys from table for path: {}", paramInfo.getStartPrefix(), exception); throw exception; } - return matchedKeys; } - private boolean applyFilters(Table.KeyValue entry, ParamInfo paramInfo) throws IOException { + private boolean applyFilters(Table.KeyValue entry, ParamInfo paramInfo) + throws IOException { LOG.debug("Applying filters on : {}", entry.getKey()); - long epochMillis = - ReconUtils.convertToEpochMillis(paramInfo.getCreationDate(), "MM-dd-yyyy HH:mm:ss", TimeZone.getDefault()); - Predicate> keyAgeFilter = keyData -> { - try { - return keyData.getValue().getCreationTime() >= epochMillis; - } catch (IOException e) { - throw new RuntimeException(e); - } - }; - Predicate> keyReplicationFilter = - keyData -> { - try { - return keyData.getValue().getReplicationConfig().getReplicationType().name() - .equals(paramInfo.getReplicationType()); - } catch (IOException e) { - try { - throw new IOException(e); - } catch (IOException ex) { - throw new RuntimeException(ex); - } - } - }; - Predicate> keySizeFilter = keyData -> { - try { - return keyData.getValue().getDataSize() >= paramInfo.getKeySize(); - } catch (IOException e) { - throw new RuntimeException(e); - } - }; - - List> filteredKeyList = Stream.of(entry) - .filter(keyData -> !StringUtils.isEmpty(paramInfo.getCreationDate()) ? keyAgeFilter.test(keyData) : true) - .filter( - keyData -> !StringUtils.isEmpty(paramInfo.getReplicationType()) ? keyReplicationFilter.test(keyData) : true) - .filter(keySizeFilter) - .collect(Collectors.toList()); + if (!StringUtils.isEmpty(paramInfo.getCreationDate()) + && (entry.getValue().getCreationTime() < paramInfo.getCreationDateEpoch())) { + return false; + } - LOG.debug("After applying filter on : {}, filtered list size: {}", entry.getKey(), filteredKeyList.size()); + if (!StringUtils.isEmpty(paramInfo.getReplicationType()) + && !entry.getValue().getReplicationConfig().getReplicationType().name().equals( + paramInfo.getReplicationType())) { + return false; + } - return (filteredKeyList.size() > 0); + return entry.getValue().getSize() >= paramInfo.getKeySize(); } /** @@ -1257,8 +1294,8 @@ private KeyEntityInfo createKeyEntityInfoFromOmKeyInfo(String dbKey, OmKeyInfo keyInfo) throws IOException { KeyEntityInfo keyEntityInfo = new KeyEntityInfo(); keyEntityInfo.setKey(dbKey); // Set the DB key - keyEntityInfo.setPath(ReconUtils.constructFullPath(keyInfo, reconNamespaceSummaryManager, - omMetadataManager)); + keyEntityInfo.setIsKey(keyInfo.isFile()); + keyEntityInfo.setPath(ReconUtils.constructFullPath(keyInfo, reconNamespaceSummaryManager, omMetadataManager)); keyEntityInfo.setSize(keyInfo.getDataSize()); keyEntityInfo.setCreationTime(keyInfo.getCreationTime()); keyEntityInfo.setModificationTime(keyInfo.getModificationTime()); @@ -1276,17 +1313,18 @@ private void createSummaryForDeletedDirectories( dirSummary.put("totalDeletedDirectories", deletedDirCount); } - private void updateReplicatedAndUnReplicatedTotal( - KeyInsightInfoResponse deletedKeyAndDirInsightInfo, - RepeatedOmKeyInfo repeatedOmKeyInfo) { - repeatedOmKeyInfo.getOmKeyInfoList().forEach(omKeyInfo -> { - deletedKeyAndDirInsightInfo.setUnreplicatedDataSize( - deletedKeyAndDirInsightInfo.getUnreplicatedDataSize() + - omKeyInfo.getDataSize()); - deletedKeyAndDirInsightInfo.setReplicatedDataSize( - deletedKeyAndDirInsightInfo.getReplicatedDataSize() + - omKeyInfo.getReplicatedSize()); - }); + private boolean validateStartPrefix(String startPrefix) { + + // Ensure startPrefix starts with '/' for non-empty values + startPrefix = startPrefix.startsWith("/") ? startPrefix : "/" + startPrefix; + + // Split the path to ensure it's at least at the bucket level (volume/bucket). + String[] pathComponents = startPrefix.split("/"); + if (pathComponents.length < 3 || pathComponents[2].isEmpty()) { + return false; // Invalid if not at bucket level or deeper + } + + return true; } private String createPath(OmKeyInfo omKeyInfo) { diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/OMDBInsightSearchEndpoint.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/OMDBInsightSearchEndpoint.java deleted file mode 100644 index 58d2cd31076..00000000000 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/OMDBInsightSearchEndpoint.java +++ /dev/null @@ -1,391 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

    - * http://www.apache.org/licenses/LICENSE-2.0 - *

    - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.recon.api; - -import org.apache.hadoop.hdds.scm.server.OzoneStorageContainerManager; -import org.apache.hadoop.hdds.utils.db.Table; -import org.apache.hadoop.hdds.utils.db.TableIterator; -import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; -import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo; -import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; -import org.apache.hadoop.ozone.om.helpers.BucketLayout; -import org.apache.hadoop.ozone.recon.ReconUtils; -import org.apache.hadoop.ozone.recon.api.handlers.BucketHandler; -import org.apache.hadoop.ozone.recon.api.types.KeyEntityInfo; -import org.apache.hadoop.ozone.recon.api.types.KeyInsightInfoResponse; -import org.apache.hadoop.ozone.recon.api.types.NSSummary; -import org.apache.hadoop.ozone.recon.recovery.ReconOMMetadataManager; -import org.apache.hadoop.ozone.recon.spi.impl.ReconNamespaceSummaryManagerImpl; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import javax.inject.Inject; -import javax.ws.rs.DefaultValue; -import javax.ws.rs.GET; -import javax.ws.rs.Path; -import javax.ws.rs.QueryParam; -import javax.ws.rs.Produces; -import javax.ws.rs.core.MediaType; -import javax.ws.rs.core.Response; -import java.io.IOException; -import java.util.Map; -import java.util.LinkedHashMap; -import java.util.List; -import java.util.ArrayList; - -import static org.apache.hadoop.ozone.OzoneConsts.OM_KEY_PREFIX; -import static org.apache.hadoop.ozone.recon.ReconConstants.DEFAULT_START_PREFIX; -import static org.apache.hadoop.ozone.recon.ReconConstants.RECON_OPEN_KEY_DEFAULT_SEARCH_LIMIT; -import static org.apache.hadoop.ozone.recon.ReconConstants.RECON_OPEN_KEY_SEARCH_DEFAULT_PREV_KEY; -import static org.apache.hadoop.ozone.recon.ReconResponseUtils.noMatchedKeysResponse; -import static org.apache.hadoop.ozone.recon.ReconResponseUtils.createBadRequestResponse; -import static org.apache.hadoop.ozone.recon.ReconResponseUtils.createInternalServerErrorResponse; -import static org.apache.hadoop.ozone.recon.ReconUtils.constructObjectPathWithPrefix; -import static org.apache.hadoop.ozone.recon.ReconUtils.validateNames; -import static org.apache.hadoop.ozone.recon.api.handlers.BucketHandler.getBucketHandler; -import static org.apache.hadoop.ozone.recon.api.handlers.EntityHandler.normalizePath; -import static org.apache.hadoop.ozone.recon.api.handlers.EntityHandler.parseRequestPath; - -/** - * REST endpoint for search implementation in OM DB Insight. - */ -@Path("/keys") -@Produces(MediaType.APPLICATION_JSON) -@AdminOnly -public class OMDBInsightSearchEndpoint { - - private OzoneStorageContainerManager reconSCM; - private final ReconOMMetadataManager omMetadataManager; - private static final Logger LOG = - LoggerFactory.getLogger(OMDBInsightSearchEndpoint.class); - private ReconNamespaceSummaryManagerImpl reconNamespaceSummaryManager; - - - @Inject - public OMDBInsightSearchEndpoint(OzoneStorageContainerManager reconSCM, - ReconOMMetadataManager omMetadataManager, - ReconNamespaceSummaryManagerImpl reconNamespaceSummaryManager) { - this.reconSCM = reconSCM; - this.omMetadataManager = omMetadataManager; - this.reconNamespaceSummaryManager = reconNamespaceSummaryManager; - } - - - /** - * Performs a search for open keys in the Ozone Manager (OM) database using a specified search prefix. - * This endpoint searches across both File System Optimized (FSO) and Object Store (non-FSO) layouts, - * compiling a list of keys that match the given prefix along with their data sizes. - *

    - * The search prefix must start from the bucket level ('/volumeName/bucketName/') or any specific directory - * or key level (e.g., '/volA/bucketA/dir1' for everything under 'dir1' inside 'bucketA' of 'volA'). - * The search operation matches the prefix against the start of keys' names within the OM DB. - *

    - * Example Usage: - * 1. A startPrefix of "/volA/bucketA/" retrieves every key under bucket 'bucketA' in volume 'volA'. - * 2. Specifying "/volA/bucketA/dir1" focuses the search within 'dir1' inside 'bucketA' of 'volA'. - * - * @param startPrefix The prefix for searching keys, starting from the bucket level or any specific path. - * @param limit Limits the number of returned keys. - * @param prevKey The key to start after for the next set of records. - * @return A KeyInsightInfoResponse, containing matching keys and their data sizes. - * @throws IOException On failure to access the OM database or process the operation. - * @throws IllegalArgumentException If the provided startPrefix or other arguments are invalid. - */ - @GET - @Path("/open/search") - public Response searchOpenKeys( - @DefaultValue(DEFAULT_START_PREFIX) @QueryParam("startPrefix") - String startPrefix, - @DefaultValue(RECON_OPEN_KEY_DEFAULT_SEARCH_LIMIT) @QueryParam("limit") - int limit, - @DefaultValue(RECON_OPEN_KEY_SEARCH_DEFAULT_PREV_KEY) @QueryParam("prevKey") String prevKey) throws IOException { - - try { - // Ensure startPrefix is not null or empty and starts with '/' - if (startPrefix == null || startPrefix.length() == 0) { - return createBadRequestResponse( - "Invalid startPrefix: Path must be at the bucket level or deeper."); - } - startPrefix = startPrefix.startsWith("/") ? startPrefix : "/" + startPrefix; - - // Split the path to ensure it's at least at the bucket level - String[] pathComponents = startPrefix.split("/"); - if (pathComponents.length < 3 || pathComponents[2].isEmpty()) { - return createBadRequestResponse( - "Invalid startPrefix: Path must be at the bucket level or deeper."); - } - - // Ensure the limit is non-negative - limit = Math.max(0, limit); - - // Initialize response object - KeyInsightInfoResponse insightResponse = new KeyInsightInfoResponse(); - long replicatedTotal = 0; - long unreplicatedTotal = 0; - boolean keysFound = false; // Flag to track if any keys are found - String lastKey = null; - - // Search for non-fso keys in KeyTable - Table openKeyTable = - omMetadataManager.getOpenKeyTable(BucketLayout.LEGACY); - Map obsKeys = - retrieveKeysFromTable(openKeyTable, startPrefix, limit, prevKey); - for (Map.Entry entry : obsKeys.entrySet()) { - keysFound = true; - KeyEntityInfo keyEntityInfo = - createKeyEntityInfoFromOmKeyInfo(entry.getKey(), entry.getValue()); - insightResponse.getNonFSOKeyInfoList() - .add(keyEntityInfo); // Add to non-FSO list - replicatedTotal += entry.getValue().getReplicatedSize(); - unreplicatedTotal += entry.getValue().getDataSize(); - lastKey = entry.getKey(); // Update lastKey - } - - // Search for fso keys in FileTable - Map fsoKeys = searchOpenKeysInFSO(startPrefix, limit, prevKey); - for (Map.Entry entry : fsoKeys.entrySet()) { - keysFound = true; - KeyEntityInfo keyEntityInfo = - createKeyEntityInfoFromOmKeyInfo(entry.getKey(), entry.getValue()); - insightResponse.getFsoKeyInfoList() - .add(keyEntityInfo); // Add to FSO list - replicatedTotal += entry.getValue().getReplicatedSize(); - unreplicatedTotal += entry.getValue().getDataSize(); - lastKey = entry.getKey(); // Update lastKey - } - - // If no keys were found, return a response indicating that no keys matched - if (!keysFound) { - return noMatchedKeysResponse(startPrefix); - } - - // Set the aggregated totals in the response - insightResponse.setReplicatedDataSize(replicatedTotal); - insightResponse.setUnreplicatedDataSize(unreplicatedTotal); - insightResponse.setLastKey(lastKey); - - // Return the response with the matched keys and their data sizes - return Response.ok(insightResponse).build(); - } catch (IOException e) { - // Handle IO exceptions and return an internal server error response - return createInternalServerErrorResponse( - "Error searching open keys in OM DB: " + e.getMessage()); - } catch (IllegalArgumentException e) { - // Handle illegal argument exceptions and return a bad request response - return createBadRequestResponse( - "Invalid startPrefix: " + e.getMessage()); - } - } - - public Map searchOpenKeysInFSO(String startPrefix, - int limit, String prevKey) - throws IOException, IllegalArgumentException { - Map matchedKeys = new LinkedHashMap<>(); - // Convert the search prefix to an object path for FSO buckets - String startPrefixObjectPath = convertToObjectPath(startPrefix); - String[] names = parseRequestPath(startPrefixObjectPath); - Table openFileTable = - omMetadataManager.getOpenKeyTable(BucketLayout.FILE_SYSTEM_OPTIMIZED); - - // If names.length <= 2, then the search prefix is at the volume or bucket level hence - // no need to find parent or extract id's or find subpaths as the openFileTable is - // suitable for volume and bucket level search - if (names.length > 2 && startPrefixObjectPath.endsWith(OM_KEY_PREFIX)) { - // Fetch the parent ID to search for - long parentId = Long.parseLong(names[names.length - 1]); - - // Fetch the nameSpaceSummary for the parent ID - NSSummary parentSummary = reconNamespaceSummaryManager.getNSSummary(parentId); - if (parentSummary == null) { - return matchedKeys; - } - List subPaths = new ArrayList<>(); - // Add the initial search prefix object path because it can have both openFiles - // and subdirectories with openFiles - subPaths.add(startPrefixObjectPath); - - // Recursively gather all subpaths - ReconUtils.gatherSubPaths(parentId, subPaths, Long.parseLong(names[0]), Long.parseLong(names[1]), - reconNamespaceSummaryManager); - - // Iterate over the subpaths and retrieve the open files - for (String subPath : subPaths) { - matchedKeys.putAll(retrieveKeysFromTable(openFileTable, subPath, limit - matchedKeys.size(), prevKey)); - if (matchedKeys.size() >= limit) { - break; - } - } - return matchedKeys; - } - - // If the search level is at the volume, bucket or key level, directly search the openFileTable - matchedKeys.putAll(retrieveKeysFromTable(openFileTable, startPrefixObjectPath, limit, prevKey)); - return matchedKeys; - } - - /** - * Converts a key prefix into an object path for FSO buckets, using IDs. - * - * This method transforms a user-provided path (e.g., "volume/bucket/dir1") into - * a database-friendly format ("/volumeID/bucketID/ParentId/") by replacing names - * with their corresponding IDs. It simplifies database queries for FSO bucket operations. - *

    -   * {@code
    -   * Examples:
    -   * - Input: "volume/bucket/key" -> Output: "/volumeID/bucketID/parentDirID/key"
    -   * - Input: "volume/bucket/dir1" -> Output: "/volumeID/bucketID/dir1ID/"
    -   * - Input: "volume/bucket/dir1/key1" -> Output: "/volumeID/bucketID/dir1ID/key1"
    -   * - Input: "volume/bucket/dir1/dir2" -> Output: "/volumeID/bucketID/dir2ID/"
    -   * }
    -   * 
    - * @param prevKeyPrefix The path to be converted. - * @return The object path as "/volumeID/bucketID/ParentId/" or an empty string if an error occurs. - * @throws IOException If database access fails. - * @throws IllegalArgumentException If the provided path is invalid or cannot be converted. - */ - public String convertToObjectPath(String prevKeyPrefix) throws IOException { - try { - String[] names = parseRequestPath(normalizePath(prevKeyPrefix, BucketLayout.FILE_SYSTEM_OPTIMIZED)); - Table openFileTable = omMetadataManager.getOpenKeyTable(BucketLayout.FILE_SYSTEM_OPTIMIZED); - - // Root-Level: Return the original path - if (names.length == 0) { - return prevKeyPrefix; - } - - // Volume-Level: Fetch the volumeID - String volumeName = names[0]; - validateNames(volumeName); - String volumeKey = omMetadataManager.getVolumeKey(volumeName); - long volumeId = omMetadataManager.getVolumeTable().getSkipCache(volumeKey).getObjectID(); - if (names.length == 1) { - return constructObjectPathWithPrefix(volumeId); - } - - // Bucket-Level: Fetch the bucketID - String bucketName = names[1]; - validateNames(bucketName); - String bucketKey = omMetadataManager.getBucketKey(volumeName, bucketName); - OmBucketInfo bucketInfo = omMetadataManager.getBucketTable().getSkipCache(bucketKey); - long bucketId = bucketInfo.getObjectID(); - if (names.length == 2 || bucketInfo.getBucketLayout() != BucketLayout.FILE_SYSTEM_OPTIMIZED) { - return constructObjectPathWithPrefix(volumeId, bucketId); - } - - // Directory or Key-Level: Check both key and directory - BucketHandler handler = - getBucketHandler(reconNamespaceSummaryManager, omMetadataManager, reconSCM, bucketInfo); - - if (names.length >= 3) { - String lastEntiry = names[names.length - 1]; - - // Check if the directory exists - OmDirectoryInfo dirInfo = handler.getDirInfo(names); - if (dirInfo != null && dirInfo.getName().equals(lastEntiry)) { - return constructObjectPathWithPrefix(volumeId, bucketId, dirInfo.getObjectID()) + OM_KEY_PREFIX; - } - - // Check if the key exists - long dirID = handler.getDirObjectId(names, names.length); - String keyKey = constructObjectPathWithPrefix(volumeId, bucketId, dirID) + - OM_KEY_PREFIX + lastEntiry; - OmKeyInfo keyInfo = openFileTable.getSkipCache(keyKey); - if (keyInfo != null && keyInfo.getFileName().equals(lastEntiry)) { - return constructObjectPathWithPrefix(volumeId, bucketId, - keyInfo.getParentObjectID()) + OM_KEY_PREFIX + lastEntiry; - } - - return prevKeyPrefix; - } - } catch (IllegalArgumentException e) { - LOG.error( - "IllegalArgumentException encountered while converting key prefix to object path: {}", - prevKeyPrefix, e); - throw e; - } catch (RuntimeException e) { - LOG.error( - "RuntimeException encountered while converting key prefix to object path: {}", - prevKeyPrefix, e); - return prevKeyPrefix; - } - return prevKeyPrefix; - } - - - /** - * Common method to retrieve keys from a table based on a search prefix and a limit. - * - * @param table The table to retrieve keys from. - * @param startPrefix The search prefix to match keys against. - * @param limit The maximum number of keys to retrieve. - * @param prevKey The key to start after for the next set of records. - * @return A map of keys and their corresponding OmKeyInfo objects. - * @throws IOException If there are problems accessing the table. - */ - private Map retrieveKeysFromTable( - Table table, String startPrefix, int limit, String prevKey) - throws IOException { - Map matchedKeys = new LinkedHashMap<>(); - try (TableIterator> keyIter = table.iterator()) { - // If a previous key is provided, seek to the previous key and skip it. - if (!prevKey.isEmpty()) { - keyIter.seek(prevKey); - if (keyIter.hasNext()) { - // Skip the previous key - keyIter.next(); - } - } else { - // If no previous key is provided, start from the search prefix. - keyIter.seek(startPrefix); - } - while (keyIter.hasNext() && matchedKeys.size() < limit) { - Table.KeyValue entry = keyIter.next(); - String dbKey = entry.getKey(); - if (!dbKey.startsWith(startPrefix)) { - break; // Exit the loop if the key no longer matches the prefix - } - matchedKeys.put(dbKey, entry.getValue()); - } - } catch (IOException exception) { - LOG.error("Error retrieving keys from table for path: {}", startPrefix, exception); - throw exception; - } - return matchedKeys; - } - - /** - * Creates a KeyEntityInfo object from an OmKeyInfo object and the corresponding key. - * - * @param dbKey The key in the database corresponding to the OmKeyInfo object. - * @param keyInfo The OmKeyInfo object to create the KeyEntityInfo from. - * @return The KeyEntityInfo object created from the OmKeyInfo object and the key. - */ - private KeyEntityInfo createKeyEntityInfoFromOmKeyInfo(String dbKey, - OmKeyInfo keyInfo) { - KeyEntityInfo keyEntityInfo = new KeyEntityInfo(); - keyEntityInfo.setKey(dbKey); // Set the DB key - keyEntityInfo.setPath(keyInfo.getKeyName()); // Assuming path is the same as key name - keyEntityInfo.setInStateSince(keyInfo.getCreationTime()); - keyEntityInfo.setSize(keyInfo.getDataSize()); - keyEntityInfo.setReplicatedSize(keyInfo.getReplicatedSize()); - keyEntityInfo.setReplicationConfig(keyInfo.getReplicationConfig()); - return keyEntityInfo; - } - -} diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestGrpcReplicationServiceWithZeroCopy.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/ServiceNotReadyException.java similarity index 73% rename from hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestGrpcReplicationServiceWithZeroCopy.java rename to hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/ServiceNotReadyException.java index 00891cf3e24..4190cc279ce 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestGrpcReplicationServiceWithZeroCopy.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/ServiceNotReadyException.java @@ -1,4 +1,4 @@ -/* +/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,17 +15,15 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.apache.hadoop.ozone.container.replication; -import org.junit.jupiter.api.BeforeEach; +package org.apache.hadoop.ozone.recon.api; /** - * Tests {@link GrpcReplicationService}. + * This exception being thrown when Rest API service is still initializing and not yet ready. */ -class TestGrpcReplicationServiceWithZeroCopy - extends TestGrpcReplicationService { - @BeforeEach - public void setUp() throws Exception { - init(true); +public class ServiceNotReadyException extends RuntimeException { + public ServiceNotReadyException(String message) { + super(message); } } + diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/AclMetadata.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/AclMetadata.java index fae47b3b368..51994abfbad 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/AclMetadata.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/AclMetadata.java @@ -73,7 +73,6 @@ public static Builder newBuilder() { /** * Builder for AclMetadata. */ - @SuppressWarnings("checkstyle:hiddenfield") public static final class Builder { private String type; private String name; @@ -84,23 +83,23 @@ public Builder() { } - public Builder withType(String type) { + public Builder setType(String type) { this.type = type; return this; } - public Builder withName(String name) { + public Builder setName(String name) { this.name = name; return this; } - public Builder withScope(String scope) { + public Builder setScope(String scope) { this.scope = scope; return this; } - public Builder withAclList(List aclList) { + public Builder setAclList(List aclList) { this.aclList = aclList; return this; } @@ -127,10 +126,10 @@ public static AclMetadata fromOzoneAcl(OzoneAcl ozoneAcl) { AclMetadata.Builder builder = AclMetadata.newBuilder(); - return builder.withType(ozoneAcl.getType().toString().toUpperCase()) - .withName(ozoneAcl.getName()) - .withScope(ozoneAcl.getAclScope().toString().toUpperCase()) - .withAclList(ozoneAcl.getAclStringList()) + return builder.setType(ozoneAcl.getType().toString().toUpperCase()) + .setName(ozoneAcl.getName()) + .setScope(ozoneAcl.getAclScope().toString().toUpperCase()) + .setAclList(ozoneAcl.getAclStringList()) .build(); } } diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/ClusterStateResponse.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/ClusterStateResponse.java index 6e595891c4d..424584a2be4 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/ClusterStateResponse.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/ClusterStateResponse.java @@ -138,7 +138,6 @@ private ClusterStateResponse(Builder b) { /** * Builder for ClusterStateResponse. */ - @SuppressWarnings("checkstyle:hiddenfield") public static final class Builder { private int pipelines; private int totalDatanodes; diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/DatanodeMetadata.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/DatanodeMetadata.java index 06c20a963a2..ef64e921a31 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/DatanodeMetadata.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/DatanodeMetadata.java @@ -84,10 +84,6 @@ public final class DatanodeMetadata { @JsonInclude(JsonInclude.Include.NON_NULL) private String revision; - @XmlElement(name = "buildDate") - @JsonInclude(JsonInclude.Include.NON_NULL) - private String buildDate; - @XmlElement(name = "layoutVersion") @JsonInclude(JsonInclude.Include.NON_DEFAULT) private int layoutVersion; @@ -110,7 +106,6 @@ private DatanodeMetadata(Builder builder) { this.version = builder.version; this.setupTime = builder.setupTime; this.revision = builder.revision; - this.buildDate = builder.buildDate; this.layoutVersion = builder.layoutVersion; this.networkLocation = builder.networkLocation; } @@ -167,10 +162,6 @@ public String getRevision() { return revision; } - public String getBuildDate() { - return buildDate; - } - public int getLayoutVersion() { return layoutVersion; } @@ -191,7 +182,6 @@ public static Builder newBuilder() { /** * Builder for DatanodeMetadata. */ - @SuppressWarnings("checkstyle:hiddenfield") public static final class Builder { private String hostname; private String uuid; @@ -206,7 +196,6 @@ public static final class Builder { private String version; private long setupTime; private String revision; - private String buildDate; private int layoutVersion; private String networkLocation; @@ -216,83 +205,78 @@ public Builder() { this.leaderCount = 0; } - public Builder withHostname(String hostname) { + public Builder setHostname(String hostname) { this.hostname = hostname; return this; } - public Builder withState(NodeState state) { + public Builder setState(NodeState state) { this.state = state; return this; } - public Builder withOperationalState(NodeOperationalState opState) { - this.opState = opState; + public Builder setOperationalState(NodeOperationalState operationalState) { + this.opState = operationalState; return this; } - public Builder withLastHeartbeat(long lastHeartbeat) { + public Builder setLastHeartbeat(long lastHeartbeat) { this.lastHeartbeat = lastHeartbeat; return this; } - public Builder withDatanodeStorageReport(DatanodeStorageReport + public Builder setDatanodeStorageReport(DatanodeStorageReport datanodeStorageReport) { this.datanodeStorageReport = datanodeStorageReport; return this; } - public Builder withPipelines(List pipelines) { + public Builder setPipelines(List pipelines) { this.pipelines = pipelines; return this; } - public Builder withContainers(int containers) { + public Builder setContainers(int containers) { this.containers = containers; return this; } - public Builder withOpenContainers(int openContainers) { + public Builder setOpenContainers(int openContainers) { this.openContainers = openContainers; return this; } - public Builder withLeaderCount(int leaderCount) { + public Builder setLeaderCount(int leaderCount) { this.leaderCount = leaderCount; return this; } - public Builder withUUid(String uuid) { + public Builder setUuid(String uuid) { this.uuid = uuid; return this; } - public Builder withVersion(String version) { + public Builder setVersion(String version) { this.version = version; return this; } - public Builder withSetupTime(long setupTime) { + public Builder setSetupTime(long setupTime) { this.setupTime = setupTime; return this; } - public Builder withRevision(String revision) { + public Builder setRevision(String revision) { this.revision = revision; return this; } - public Builder withBuildDate(String buildDate) { - this.buildDate = buildDate; - return this; - } - - public Builder withLayoutVersion(int layoutVersion) { + public Builder setLayoutVersion(int layoutVersion) { this.layoutVersion = layoutVersion; return this; } - public Builder withNetworkLocation(String networkLocation) { + public Builder setNetworkLocation(String networkLocation) { this.networkLocation = networkLocation; return this; } diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/KeyEntityInfo.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/KeyEntityInfo.java index d7cd3599190..8a56cbbd33f 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/KeyEntityInfo.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/KeyEntityInfo.java @@ -143,7 +143,7 @@ public boolean isKey() { return isKey; } - public void setKey(boolean key) { - isKey = key; + public void setIsKey(boolean isKey) { + this.isKey = isKey; } } diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/KeyEntityInfoProtoWrapper.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/KeyEntityInfoProtoWrapper.java new file mode 100644 index 00000000000..890f8fbd3aa --- /dev/null +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/KeyEntityInfoProtoWrapper.java @@ -0,0 +1,145 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

    + * http://www.apache.org/licenses/LICENSE-2.0 + *

    + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.recon.api.types; + +import com.fasterxml.jackson.annotation.JsonProperty; +import org.apache.hadoop.hdds.client.ReplicationConfig; +import org.apache.hadoop.hdds.utils.db.Codec; +import org.apache.hadoop.hdds.utils.db.DelegatedCodec; +import org.apache.hadoop.hdds.utils.db.Proto2Codec; +import org.apache.hadoop.ozone.om.helpers.QuotaUtil; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; + +/** + * POJO object wrapper for metadata of a given key/file. This class wraps a KeyInfo protobuf + * object and delegates most accessors to it. + */ +public final class KeyEntityInfoProtoWrapper { + + public static Codec getCodec() { + return new DelegatedCodec<>( + Proto2Codec.get(OzoneManagerProtocolProtos.KeyInfo.getDefaultInstance()), + KeyEntityInfoProtoWrapper::getFromProtobuf, + KeyEntityInfoProtoWrapper::toProtobuf, + KeyEntityInfoProtoWrapper.class); + } + + private final OzoneManagerProtocolProtos.KeyInfo keyInfoProto; + + /** This is key table key of rocksDB and will help UI to implement pagination + * where UI will use the last record key to send in API as preKeyPrefix. */ + @JsonProperty("key") + private String key; + + /** Path of a key/file. */ + @JsonProperty("path") + private String path; + + @JsonProperty("replicatedSize") + private final long replicatedSize; + + @JsonProperty("replicationInfo") + private final ReplicationConfig replicationConfig; + + private KeyEntityInfoProtoWrapper(OzoneManagerProtocolProtos.KeyInfo proto) { + keyInfoProto = proto; + replicationConfig = ReplicationConfig.fromProto(proto.getType(), proto.getFactor(), + proto.getEcReplicationConfig()); + this.replicatedSize = QuotaUtil.getReplicatedSize(getSize(), getReplicationConfig()); + } + + public static KeyEntityInfoProtoWrapper getFromProtobuf(OzoneManagerProtocolProtos.KeyInfo keyInfo) { + return new KeyEntityInfoProtoWrapper(keyInfo); + } + + public OzoneManagerProtocolProtos.KeyInfo toProtobuf() { + throw new UnsupportedOperationException("This method is not supported."); + } + + @JsonProperty("key") + public String getKey() { + if (key == null) { + throw new IllegalStateException("Key must be set to correctly serialize this object."); + } + return key; + } + + public void setKey(String key) { + this.key = key; + } + + @JsonProperty("path") + public String getPath() { + if (path == null) { + throw new IllegalStateException("Path must be set to correctly serialize this object."); + } + return path; + } + + public void setPath(String path) { + this.path = path; + } + + @JsonProperty("size") + public long getSize() { + return keyInfoProto.getDataSize(); + } + + @JsonProperty("replicatedSize") + public long getReplicatedSize() { + return replicatedSize; + } + + @JsonProperty("replicationInfo") + public ReplicationConfig getReplicationConfig() { + return replicationConfig; + } + + @JsonProperty("creationTime") + public long getCreationTime() { + return keyInfoProto.getCreationTime(); + } + + @JsonProperty("modificationTime") + public long getModificationTime() { + return keyInfoProto.getModificationTime(); + } + + @JsonProperty("isKey") + public boolean getIsKey() { + return keyInfoProto.getIsFile(); + } + + public long getParentId() { + return keyInfoProto.getParentID(); + } + + public String getVolumeName() { + return keyInfoProto.getVolumeName(); + } + + public String getBucketName() { + return keyInfoProto.getBucketName(); + } + + /** Returns the key name of the key stored in the OM Key Info object. */ + public String getKeyName() { + return keyInfoProto.getKeyName(); + } +} diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/ListKeysResponse.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/ListKeysResponse.java index 7220060aeb0..2770e7f7f6f 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/ListKeysResponse.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/ListKeysResponse.java @@ -51,7 +51,7 @@ public class ListKeysResponse { /** list of keys. */ @JsonProperty("keys") - private List keys; + private List keys; public ListKeysResponse() { @@ -95,11 +95,11 @@ public void setPath(String path) { this.path = path; } - public List getKeys() { + public List getKeys() { return keys; } - public void setKeys(List keys) { + public void setKeys(List keys) { this.keys = keys; } diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/NamespaceSummaryResponse.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/NamespaceSummaryResponse.java index 5ccfd988731..ccbaa35788c 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/NamespaceSummaryResponse.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/NamespaceSummaryResponse.java @@ -49,8 +49,8 @@ public class NamespaceSummaryResponse { * * @return Builder */ - public static NamespaceSummaryResponse.Builder newBuilder() { - return new NamespaceSummaryResponse.Builder(); + public static Builder newBuilder() { + return new Builder(); } public NamespaceSummaryResponse(Builder b) { @@ -104,7 +104,6 @@ public void setObjectDBInfo(ObjectDBInfo objectDBInfo) { /** * Builder for NamespaceSummaryResponse. */ - @SuppressWarnings("checkstyle:hiddenfield") public static final class Builder { private String path; private EntityType entityType; @@ -119,30 +118,30 @@ public Builder() { this.entityType = EntityType.ROOT; } - public NamespaceSummaryResponse.Builder setPath(String path) { + public Builder setPath(String path) { this.path = path; return this; } - public NamespaceSummaryResponse.Builder setEntityType( + public Builder setEntityType( EntityType entityType) { this.entityType = entityType; return this; } - public NamespaceSummaryResponse.Builder setCountStats( + public Builder setCountStats( CountStats countStats) { this.countStats = countStats; return this; } - public NamespaceSummaryResponse.Builder setObjectDBInfo( + public Builder setObjectDBInfo( ObjectDBInfo objectDBInfo) { this.objectDBInfo = objectDBInfo; return this; } - public NamespaceSummaryResponse.Builder setStatus( + public Builder setStatus( ResponseStatus status) { this.status = status; return this; diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/ParamInfo.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/ParamInfo.java index 345b0429076..e4bcea47b4d 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/ParamInfo.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/ParamInfo.java @@ -17,6 +17,10 @@ */ package org.apache.hadoop.ozone.recon.api.types; +import org.apache.hadoop.ozone.recon.ReconUtils; + +import java.util.TimeZone; + /** * Wrapper object for statistics of records of a page in API response. */ @@ -37,6 +41,8 @@ public class ParamInfo { */ private String creationDate; + private long creationDateEpoch = -1; + /** * */ @@ -87,6 +93,14 @@ public String getCreationDate() { return creationDate; } + public long getCreationDateEpoch() { + if (creationDateEpoch == -1) { + creationDateEpoch = ReconUtils.convertToEpochMillis( + getCreationDate(), "MM-dd-yyyy HH:mm:ss", TimeZone.getDefault()); + } + return creationDateEpoch; + } + public String getReplicationType() { return replicationType; } diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/PipelineMetadata.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/PipelineMetadata.java index d26f87f6f78..2a2f223bb95 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/PipelineMetadata.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/PipelineMetadata.java @@ -133,7 +133,6 @@ private PipelineMetadata(Builder b) { /** * Builder for PipelineMetadata. */ - @SuppressWarnings("checkstyle:hiddenfield") public static class Builder { private UUID pipelineId; private PipelineState status; diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/codec/NSSummaryCodec.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/codec/NSSummaryCodec.java index f3b273451a2..aa6c5a765d1 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/codec/NSSummaryCodec.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/codec/NSSummaryCodec.java @@ -58,6 +58,11 @@ private NSSummaryCodec() { // singleton } + @Override + public Class getTypeClass() { + return NSSummary.class; + } + @Override public byte[] toPersistedFormat(NSSummary object) throws IOException { Set childDirs = object.getChildDir(); diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/recovery/ReconOMMetadataManager.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/recovery/ReconOMMetadataManager.java index 14ae997073c..82913f453d0 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/recovery/ReconOMMetadataManager.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/recovery/ReconOMMetadataManager.java @@ -23,9 +23,12 @@ import java.util.List; import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.utils.db.Table; import org.apache.hadoop.ozone.om.OMMetadataManager; +import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs; +import org.apache.hadoop.ozone.recon.api.types.KeyEntityInfoProtoWrapper; /** * Interface for the OM Metadata Manager + DB store maintained by @@ -113,4 +116,13 @@ List listBucketsUnderVolume( */ OzoneConfiguration getOzoneConfiguration(); + /** + * A lighter weight version of the getKeyTable method that only returns the KeyEntityInfo wrapper object. This + * avoids creating a full OMKeyInfo object for each key if it is not needed. + * @param bucketLayout The Bucket layout to use for the key table. + * @return A table of keys and their metadata. + * @throws IOException + */ + Table getKeyTableLite(BucketLayout bucketLayout) throws IOException; + } diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/recovery/ReconOmMetadataManagerImpl.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/recovery/ReconOmMetadataManagerImpl.java index 91cb61369fc..f750a0abb6a 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/recovery/ReconOmMetadataManagerImpl.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/recovery/ReconOmMetadataManagerImpl.java @@ -41,9 +41,11 @@ import org.apache.hadoop.ozone.om.OmMetadataManagerImpl; import org.apache.hadoop.hdds.utils.db.DBStore; import org.apache.hadoop.hdds.utils.db.DBStoreBuilder; +import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs; import org.apache.hadoop.ozone.recon.ReconUtils; +import org.apache.hadoop.ozone.recon.api.types.KeyEntityInfoProtoWrapper; import org.eclipse.jetty.util.StringUtil; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -97,6 +99,7 @@ private void initializeNewRdbStore(File dbFile) throws IOException { .setName(dbFile.getName()) .setPath(dbFile.toPath().getParent()); addOMTablesAndCodecs(dbStoreBuilder); + dbStoreBuilder.addCodec(KeyEntityInfoProtoWrapper.class, KeyEntityInfoProtoWrapper.getCodec()); setStore(dbStoreBuilder.build()); LOG.info("Created OM DB handle from snapshot at {}.", dbFile.getAbsolutePath()); @@ -109,6 +112,12 @@ private void initializeNewRdbStore(File dbFile) throws IOException { } } + @Override + public Table getKeyTableLite(BucketLayout bucketLayout) throws IOException { + String tableName = bucketLayout.isFileSystemOptimized() ? FILE_TABLE : KEY_TABLE; + return getStore().getTable(tableName, String.class, KeyEntityInfoProtoWrapper.class); + } + @Override public void updateOmDB(File newDbLocation) throws IOException { if (getStore() != null) { diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ContainerReplicaHistoryList.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ContainerReplicaHistoryList.java index 5895d3e133c..7afed9c1ce9 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ContainerReplicaHistoryList.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ContainerReplicaHistoryList.java @@ -34,11 +34,11 @@ * For Recon DB table definition. */ public class ContainerReplicaHistoryList { - private static final Codec CODEC - = new DelegatedCodec<>(Proto2Codec.get( - ContainerReplicaHistoryListProto.getDefaultInstance()), + private static final Codec CODEC = new DelegatedCodec<>( + Proto2Codec.get(ContainerReplicaHistoryListProto.getDefaultInstance()), ContainerReplicaHistoryList::fromProto, - ContainerReplicaHistoryList::toProto); + ContainerReplicaHistoryList::toProto, + ContainerReplicaHistoryList.class); public static Codec getCodec() { return CODEC; diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconNodeManager.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconNodeManager.java index 65a9530c5ca..2ebeafcccb9 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconNodeManager.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconNodeManager.java @@ -205,17 +205,6 @@ public String getRevision(DatanodeDetails datanodeDetails) { EMPTY_DATANODE_DETAILS).getRevision(); } - /** - * Returns the build date of the given node. - * - * @param datanodeDetails DatanodeDetails - * @return buildDate - */ - public String getBuildDate(DatanodeDetails datanodeDetails) { - return inMemDatanodeDetails.getOrDefault(datanodeDetails.getUuid(), - EMPTY_DATANODE_DETAILS).getBuildDate(); - } - @Override public void onMessage(CommandForDatanode commandForDatanode, EventPublisher ignored) { diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconSCMDBDefinition.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconSCMDBDefinition.java index 1ea2f7b1312..4970d5da915 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconSCMDBDefinition.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconSCMDBDefinition.java @@ -32,27 +32,27 @@ /** * Recon SCM db file for ozone. */ -public class ReconSCMDBDefinition extends SCMDBDefinition { +public final class ReconSCMDBDefinition extends SCMDBDefinition { private static final Codec UUID_CODEC = new DelegatedCodec<>( StringCodec.get(), UUID::fromString, UUID::toString, - DelegatedCodec.CopyType.SHALLOW); + UUID.class, DelegatedCodec.CopyType.SHALLOW); public static final String RECON_SCM_DB_NAME = "recon-scm.db"; - public static final DBColumnFamilyDefinition - NODES = - new DBColumnFamilyDefinition( - "nodes", - UUID.class, - UUID_CODEC, - DatanodeDetails.class, - DatanodeDetails.getCodec()); + public static final DBColumnFamilyDefinition NODES + = new DBColumnFamilyDefinition<>("nodes", UUID_CODEC, DatanodeDetails.getCodec()); private static final Map> COLUMN_FAMILIES = DBColumnFamilyDefinition.newUnmodifiableMap( - new SCMDBDefinition().getMap(), NODES); + SCMDBDefinition.get().getMap(), NODES); - public ReconSCMDBDefinition() { + private static final ReconSCMDBDefinition INSTANCE = new ReconSCMDBDefinition(); + + public static ReconSCMDBDefinition get() { + return INSTANCE; + } + + private ReconSCMDBDefinition() { super(COLUMN_FAMILIES); } diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconStorageContainerManagerFacade.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconStorageContainerManagerFacade.java index c773187c4b1..eff68848a2f 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconStorageContainerManagerFacade.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconStorageContainerManagerFacade.java @@ -131,6 +131,8 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import javax.sql.DataSource; + /** * Recon's 'lite' version of SCM. */ @@ -156,6 +158,7 @@ public class ReconStorageContainerManagerFacade private final SCMHAManager scmhaManager; private final SequenceIdGenerator sequenceIdGen; private final ContainerHealthTask containerHealthTask; + private final DataSource dataSource; private DBStore dbStore; private ReconNodeManager nodeManager; @@ -188,7 +191,8 @@ public ReconStorageContainerManagerFacade(OzoneConfiguration conf, ReconContainerMetadataManager reconContainerMetadataManager, ReconUtils reconUtils, ReconSafeModeManager safeModeManager, - ReconContext reconContext) throws IOException { + ReconContext reconContext, + DataSource dataSource) throws IOException { reconNodeDetails = reconUtils.getReconNodeDetails(conf); this.threadNamePrefix = reconNodeDetails.threadNamePrefix(); this.eventQueue = new EventQueue(threadNamePrefix); @@ -218,8 +222,7 @@ public ReconStorageContainerManagerFacade(OzoneConfiguration conf, this.scmStorageConfig = new ReconStorageConfig(conf, reconUtils); this.clusterMap = new NetworkTopologyImpl(conf); - this.dbStore = DBStoreBuilder - .createDBStore(ozoneConfiguration, new ReconSCMDBDefinition()); + this.dbStore = DBStoreBuilder.createDBStore(ozoneConfiguration, ReconSCMDBDefinition.get()); this.scmLayoutVersionManager = new HDDSLayoutVersionManager(scmStorageConfig.getLayoutVersion()); @@ -286,6 +289,8 @@ public ReconStorageContainerManagerFacade(OzoneConfiguration conf, containerCountBySizeDao, utilizationSchemaDefinition); + this.dataSource = dataSource; + StaleNodeHandler staleNodeHandler = new ReconStaleNodeHandler(nodeManager, pipelineManager, conf, pipelineSyncTask); @@ -627,8 +632,7 @@ private void deleteOldSCMDB() throws IOException { private void initializeNewRdbStore(File dbFile) throws IOException { try { - DBStore newStore = createDBAndAddSCMTablesAndCodecs( - dbFile, new ReconSCMDBDefinition()); + final DBStore newStore = createDBAndAddSCMTablesAndCodecs(dbFile, ReconSCMDBDefinition.get()); Table nodeTable = ReconSCMDBDefinition.NODES.getTable(dbStore); Table newNodeTable = @@ -756,4 +760,8 @@ public ContainerCountBySizeDao getContainerCountBySizeDao() { public ReconContext getReconContext() { return reconContext; } + + public DataSource getDataSource() { + return dataSource; + } } diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/impl/ContainerKeyPrefixCodec.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/impl/ContainerKeyPrefixCodec.java index 01a630a5235..500c01bfde2 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/impl/ContainerKeyPrefixCodec.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/impl/ContainerKeyPrefixCodec.java @@ -18,9 +18,6 @@ package org.apache.hadoop.ozone.recon.spi.impl; -import static org.apache.commons.compress.utils.CharsetNames.UTF_8; - -import java.io.IOException; import java.nio.ByteBuffer; import org.apache.commons.lang3.ArrayUtils; @@ -31,6 +28,8 @@ import com.google.common.base.Preconditions; import com.google.common.primitives.Longs; +import static java.nio.charset.StandardCharsets.UTF_8; + /** * Codec to serialize/deserialize {@link ContainerKeyPrefix}. */ @@ -51,8 +50,12 @@ private ContainerKeyPrefixCodec() { } @Override - public byte[] toPersistedFormat(ContainerKeyPrefix containerKeyPrefix) - throws IOException { + public Class getTypeClass() { + return ContainerKeyPrefix.class; + } + + @Override + public byte[] toPersistedFormat(ContainerKeyPrefix containerKeyPrefix) { Preconditions.checkNotNull(containerKeyPrefix, "Null object can't be converted to byte array."); byte[] containerIdBytes = Longs.toByteArray(containerKeyPrefix @@ -76,9 +79,7 @@ public byte[] toPersistedFormat(ContainerKeyPrefix containerKeyPrefix) } @Override - public ContainerKeyPrefix fromPersistedFormat(byte[] rawData) - throws IOException { - + public ContainerKeyPrefix fromPersistedFormat(byte[] rawData) { // First 8 bytes is the containerId. long containerIdFromDB = ByteBuffer.wrap(ArrayUtils.subarray( rawData, 0, Long.BYTES)).getLong(); diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/impl/KeyPrefixContainerCodec.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/impl/KeyPrefixContainerCodec.java index 7baca152b28..70b1d65837c 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/impl/KeyPrefixContainerCodec.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/impl/KeyPrefixContainerCodec.java @@ -24,10 +24,9 @@ import org.apache.hadoop.hdds.utils.db.Codec; import org.apache.hadoop.ozone.recon.api.types.KeyPrefixContainer; -import java.io.IOException; import java.nio.ByteBuffer; -import static org.apache.commons.compress.utils.CharsetNames.UTF_8; +import static java.nio.charset.StandardCharsets.UTF_8; /** * Codec to serialize/deserialize {@link KeyPrefixContainer}. @@ -49,8 +48,12 @@ private KeyPrefixContainerCodec() { private static final String KEY_DELIMITER = "_"; @Override - public byte[] toPersistedFormat(KeyPrefixContainer keyPrefixContainer) - throws IOException { + public Class getTypeClass() { + return KeyPrefixContainer.class; + } + + @Override + public byte[] toPersistedFormat(KeyPrefixContainer keyPrefixContainer) { Preconditions.checkNotNull(keyPrefixContainer, "Null object can't be converted to byte array."); byte[] keyPrefixBytes = keyPrefixContainer.getKeyPrefix().getBytes(UTF_8); @@ -75,9 +78,7 @@ public byte[] toPersistedFormat(KeyPrefixContainer keyPrefixContainer) } @Override - public KeyPrefixContainer fromPersistedFormat(byte[] rawData) - throws IOException { - + public KeyPrefixContainer fromPersistedFormat(byte[] rawData) { // When reading from byte[], we can always expect to have the key, version // and version parts in the byte array. byte[] keyBytes = ArrayUtils.subarray(rawData, diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/impl/ReconDBDefinition.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/impl/ReconDBDefinition.java index 8cb3b4188ed..cde24d7570b 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/impl/ReconDBDefinition.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/impl/ReconDBDefinition.java @@ -46,44 +46,34 @@ public ReconDBDefinition(String dbName) { CONTAINER_KEY = new DBColumnFamilyDefinition<>( "containerKeyTable", - ContainerKeyPrefix.class, ContainerKeyPrefixCodec.get(), - Integer.class, IntegerCodec.get()); public static final DBColumnFamilyDefinition KEY_CONTAINER = new DBColumnFamilyDefinition<>( "keyContainerTable", - KeyPrefixContainer.class, KeyPrefixContainerCodec.get(), - Integer.class, IntegerCodec.get()); public static final DBColumnFamilyDefinition CONTAINER_KEY_COUNT = new DBColumnFamilyDefinition<>( "containerKeyCountTable", - Long.class, LongCodec.get(), - Long.class, LongCodec.get()); public static final DBColumnFamilyDefinition REPLICA_HISTORY = new DBColumnFamilyDefinition( "replica_history", - Long.class, LongCodec.get(), - ContainerReplicaHistoryList.class, ContainerReplicaHistoryList.getCodec()); public static final DBColumnFamilyDefinition NAMESPACE_SUMMARY = new DBColumnFamilyDefinition( "namespaceSummaryTable", - Long.class, LongCodec.get(), - NSSummary.class, NSSummaryCodec.get()); // Container Replica History with bcsId tracking. @@ -91,9 +81,7 @@ public ReconDBDefinition(String dbName) { REPLICA_HISTORY_V2 = new DBColumnFamilyDefinition( "replica_history_v2", - Long.class, LongCodec.get(), - ContainerReplicaHistoryList.class, ContainerReplicaHistoryList.getCodec()); private static final Map> diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/OMDBUpdatesHandler.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/OMDBUpdatesHandler.java index 41e6bf962a7..d1f98c49bdc 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/OMDBUpdatesHandler.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/OMDBUpdatesHandler.java @@ -49,14 +49,12 @@ public class OMDBUpdatesHandler extends ManagedWriteBatch.Handler { private OMMetadataManager omMetadataManager; private List omdbUpdateEvents = new ArrayList<>(); private Map> omdbLatestUpdateEvents = new HashMap<>(); - private OMDBDefinition omdbDefinition; - private OmUpdateEventValidator omUpdateEventValidator; + private final OMDBDefinition omdbDefinition = OMDBDefinition.get(); + private final OmUpdateEventValidator omUpdateEventValidator = new OmUpdateEventValidator(omdbDefinition); public OMDBUpdatesHandler(OMMetadataManager metadataManager) { omMetadataManager = metadataManager; tablesNames = metadataManager.getStore().getTableNames(); - omdbDefinition = new OMDBDefinition(); - omUpdateEventValidator = new OmUpdateEventValidator(omdbDefinition); } @Override diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/upgrade/InitialConstraintUpgradeAction.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/upgrade/InitialConstraintUpgradeAction.java new file mode 100644 index 00000000000..e75efd2116a --- /dev/null +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/upgrade/InitialConstraintUpgradeAction.java @@ -0,0 +1,114 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.hadoop.ozone.recon.upgrade; + +import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.ozone.recon.scm.ReconStorageContainerManagerFacade; +import org.hadoop.ozone.recon.schema.ContainerSchemaDefinition; +import org.jooq.DSLContext; +import org.jooq.impl.DSL; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import javax.sql.DataSource; +import java.sql.Connection; +import java.sql.SQLException; +import java.util.Arrays; + +import static org.apache.hadoop.ozone.recon.upgrade.ReconLayoutFeature.INITIAL_VERSION; +import static org.apache.hadoop.ozone.recon.upgrade.ReconUpgradeAction.UpgradeActionType.FINALIZE; +import static org.hadoop.ozone.recon.codegen.SqlDbUtils.TABLE_EXISTS_CHECK; +import static org.hadoop.ozone.recon.schema.ContainerSchemaDefinition.UNHEALTHY_CONTAINERS_TABLE_NAME; +import static org.jooq.impl.DSL.field; +import static org.jooq.impl.DSL.name; + +/** + * Upgrade action for the INITIAL schema version, which manages constraints + * for the UNHEALTHY_CONTAINERS table. + */ +@UpgradeActionRecon(feature = INITIAL_VERSION, type = FINALIZE) +public class InitialConstraintUpgradeAction implements ReconUpgradeAction { + + private static final Logger LOG = LoggerFactory.getLogger(InitialConstraintUpgradeAction.class); + private DataSource dataSource; + private DSLContext dslContext; + + @Override + public void execute(ReconStorageContainerManagerFacade scmFacade) throws SQLException { + this.dataSource = scmFacade.getDataSource(); + try (Connection conn = dataSource.getConnection()) { + if (!TABLE_EXISTS_CHECK.test(conn, UNHEALTHY_CONTAINERS_TABLE_NAME)) { + return; + } + dslContext = DSL.using(conn); + // Drop the existing constraint + dropConstraint(); + // Add the updated constraint with all enum states + addUpdatedConstraint(); + } catch (SQLException e) { + throw new SQLException("Failed to execute InitialConstraintUpgradeAction", e); + } + } + + /** + * Drops the existing constraint from the UNHEALTHY_CONTAINERS table. + */ + private void dropConstraint() { + String constraintName = UNHEALTHY_CONTAINERS_TABLE_NAME + "ck1"; + dslContext.alterTable(UNHEALTHY_CONTAINERS_TABLE_NAME) + .dropConstraint(constraintName) + .execute(); + LOG.debug("Dropped the existing constraint: {}", constraintName); + } + + /** + * Adds the updated constraint directly within this class. + */ + private void addUpdatedConstraint() { + String[] enumStates = Arrays + .stream(ContainerSchemaDefinition.UnHealthyContainerStates.values()) + .map(Enum::name) + .toArray(String[]::new); + + dslContext.alterTable(ContainerSchemaDefinition.UNHEALTHY_CONTAINERS_TABLE_NAME) + .add(DSL.constraint(ContainerSchemaDefinition.UNHEALTHY_CONTAINERS_TABLE_NAME + "ck1") + .check(field(name("container_state")) + .in(enumStates))) + .execute(); + + LOG.info("Added the updated constraint to the UNHEALTHY_CONTAINERS table for enum state values: {}", + Arrays.toString(enumStates)); + } + + @Override + public UpgradeActionType getType() { + return FINALIZE; + } + + @VisibleForTesting + public void setDataSource(DataSource dataSource) { + this.dataSource = dataSource; + } + + @VisibleForTesting + public void setDslContext(DSLContext dslContext) { + this.dslContext = dslContext; + } +} diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/upgrade/ReconLayoutFeature.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/upgrade/ReconLayoutFeature.java new file mode 100644 index 00000000000..96969c9f3d2 --- /dev/null +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/upgrade/ReconLayoutFeature.java @@ -0,0 +1,103 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.hadoop.ozone.recon.upgrade; + +import org.reflections.Reflections; + +import java.util.EnumMap; +import java.util.Optional; +import java.util.Set; + +/** + * Enum representing Recon layout features with their version, description, + * and associated upgrade action to be executed during an upgrade. + */ +public enum ReconLayoutFeature { + // Represents the starting point for Recon's layout versioning system. + INITIAL_VERSION(0, "Recon Layout Versioning Introduction"); + + private final int version; + private final String description; + private final EnumMap actions = + new EnumMap<>(ReconUpgradeAction.UpgradeActionType.class); + + ReconLayoutFeature(final int version, String description) { + this.version = version; + this.description = description; + } + + public int getVersion() { + return version; + } + + public String getDescription() { + return description; + } + + /** + * Retrieves the upgrade action for the specified {@link ReconUpgradeAction.UpgradeActionType}. + * + * @param type The type of the upgrade action (e.g., FINALIZE). + * @return An {@link Optional} containing the upgrade action if present. + */ + public Optional getAction(ReconUpgradeAction.UpgradeActionType type) { + return Optional.ofNullable(actions.get(type)); + } + + /** + * Associates a given upgrade action with a specific upgrade phase for this feature. + * + * @param type The phase/type of the upgrade action. + * @param action The upgrade action to associate with this feature. + */ + public void addAction(ReconUpgradeAction.UpgradeActionType type, ReconUpgradeAction action) { + actions.put(type, action); + } + + /** + * Scans the classpath for all classes annotated with {@link UpgradeActionRecon} + * and registers their upgrade actions for the corresponding feature and phase. + * This method dynamically loads and registers all upgrade actions based on their + * annotations. + */ + public static void registerUpgradeActions() { + Reflections reflections = new Reflections("org.apache.hadoop.ozone.recon.upgrade"); + Set> actionClasses = reflections.getTypesAnnotatedWith(UpgradeActionRecon.class); + + for (Class actionClass : actionClasses) { + try { + ReconUpgradeAction action = (ReconUpgradeAction) actionClass.getDeclaredConstructor().newInstance(); + UpgradeActionRecon annotation = actionClass.getAnnotation(UpgradeActionRecon.class); + annotation.feature().addAction(annotation.type(), action); + } catch (Exception e) { + throw new RuntimeException("Failed to register upgrade action: " + actionClass.getSimpleName(), e); + } + } + } + + /** + * Returns the list of all layout feature values. + * + * @return An array of all {@link ReconLayoutFeature} values. + */ + public static ReconLayoutFeature[] getValues() { + return ReconLayoutFeature.values(); + } +} diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/upgrade/ReconLayoutVersionManager.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/upgrade/ReconLayoutVersionManager.java new file mode 100644 index 00000000000..a595b6a0c10 --- /dev/null +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/upgrade/ReconLayoutVersionManager.java @@ -0,0 +1,161 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.hadoop.ozone.recon.upgrade; + +import org.apache.hadoop.ozone.recon.ReconContext; +import org.apache.hadoop.ozone.recon.ReconSchemaVersionTableManager; +import org.apache.hadoop.ozone.recon.scm.ReconStorageContainerManagerFacade; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.sql.Connection; +import java.sql.SQLException; +import java.util.Arrays; +import java.util.List; +import java.util.Optional; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.stream.Collectors; + +/** + * ReconLayoutVersionManager is responsible for managing the layout version of the Recon service. + * It determines the current Metadata Layout Version (MLV) and Software Layout Version (SLV) of the + * Recon service, and finalizes the layout features that need to be upgraded. + */ +public class ReconLayoutVersionManager { + + private static final Logger LOG = LoggerFactory.getLogger(ReconLayoutVersionManager.class); + + private final ReconSchemaVersionTableManager schemaVersionTableManager; + private final ReconContext reconContext; + + // Metadata Layout Version (MLV) of the Recon Metadata on disk + private int currentMLV; + + public ReconLayoutVersionManager(ReconSchemaVersionTableManager schemaVersionTableManager, + ReconContext reconContext) + throws SQLException { + this.schemaVersionTableManager = schemaVersionTableManager; + this.currentMLV = determineMLV(); + this.reconContext = reconContext; + ReconLayoutFeature.registerUpgradeActions(); // Register actions via annotation + } + + /** + * Determines the current Metadata Layout Version (MLV) from the version table. + * @return The current Metadata Layout Version (MLV). + */ + private int determineMLV() throws SQLException { + return schemaVersionTableManager.getCurrentSchemaVersion(); + } + + /** + * Determines the Software Layout Version (SLV) based on the latest feature version. + * @return The Software Layout Version (SLV). + */ + private int determineSLV() { + return Arrays.stream(ReconLayoutFeature.values()) + .mapToInt(ReconLayoutFeature::getVersion) + .max() + .orElse(0); // Default to 0 if no features are defined + } + + /** + * Finalizes the layout features that need to be upgraded, by executing the upgrade action for each + * feature that is registered for finalization. + */ + public void finalizeLayoutFeatures(ReconStorageContainerManagerFacade scmFacade) { + // Get features that need finalization, sorted by version + List featuresToFinalize = getRegisteredFeatures(); + + try (Connection connection = scmFacade.getDataSource().getConnection()) { + connection.setAutoCommit(false); // Turn off auto-commit for transactional control + + for (ReconLayoutFeature feature : featuresToFinalize) { + try { + // Fetch only the FINALIZE action for the feature + Optional action = feature.getAction(ReconUpgradeAction.UpgradeActionType.FINALIZE); + if (action.isPresent()) { + // Update the schema version in the database + updateSchemaVersion(feature.getVersion(), connection); + + // Execute the upgrade action + action.get().execute(scmFacade); + + // Commit the transaction only if both operations succeed + connection.commit(); + LOG.info("Feature versioned {} finalized successfully.", feature.getVersion()); + } + } catch (Exception e) { + // Rollback any pending changes for the current feature due to failure + connection.rollback(); + currentMLV = determineMLV(); // Rollback the MLV to the original value + LOG.error("Failed to finalize feature {}. Rolling back changes.", feature.getVersion(), e); + throw e; + } + } + } catch (Exception e) { + // Log the error to both logs and ReconContext + LOG.error("Failed to finalize layout features: {}", e.getMessage()); + reconContext.updateErrors(ReconContext.ErrorCode.UPGRADE_FAILURE); + reconContext.updateHealthStatus(new AtomicBoolean(false)); + throw new RuntimeException("Recon failed to finalize layout features. Startup halted.", e); + } + } + + + /** + * Returns a list of ReconLayoutFeature objects that are registered for finalization. + */ + protected List getRegisteredFeatures() { + List allFeatures = + Arrays.asList(ReconLayoutFeature.values()); + + LOG.info("Current MLV: {}. SLV: {}. Checking features for registration...", currentMLV, determineSLV()); + + List registeredFeatures = allFeatures.stream() + .filter(feature -> feature.getVersion() > currentMLV) + .sorted((a, b) -> Integer.compare(a.getVersion(), b.getVersion())) // Sort by version in ascending order + .collect(Collectors.toList()); + + return registeredFeatures; + } + + /** + * Updates the Metadata Layout Version (MLV) in the database after finalizing a feature. + * This method uses the provided connection to ensure transactional consistency. + * + * @param newVersion The new Metadata Layout Version (MLV) to set. + * @param connection The database connection to use for the update operation. + */ + private void updateSchemaVersion(int newVersion, Connection connection) { + schemaVersionTableManager.updateSchemaVersion(newVersion, connection); + this.currentMLV = newVersion; + LOG.info("MLV updated to: " + newVersion); + } + + public int getCurrentMLV() { + return currentMLV; + } + + public int getCurrentSLV() { + return determineSLV(); + } + +} diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/upgrade/ReconUpgradeAction.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/upgrade/ReconUpgradeAction.java new file mode 100644 index 00000000000..d5fdbdacb7c --- /dev/null +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/upgrade/ReconUpgradeAction.java @@ -0,0 +1,51 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.hadoop.ozone.recon.upgrade; + +import org.apache.hadoop.ozone.recon.scm.ReconStorageContainerManagerFacade; + +/** + * ReconUpgradeAction is an interface for executing upgrade actions in Recon. + */ +public interface ReconUpgradeAction { + + /** + * Defines the different phases during which upgrade actions can be executed. + * Each action type corresponds to a specific point in the upgrade process: + * + * - FINALIZE: This action is executed automatically during the startup + * of Recon when it finalizes the layout upgrade. It ensures that all necessary + * upgrades or schema changes are applied to bring the system in sync with + * the latest version. + */ + enum UpgradeActionType { + FINALIZE + } + + /** + * Execute the upgrade action. + */ + void execute(ReconStorageContainerManagerFacade scmFacade) throws Exception; + + /** + * Provides the type of upgrade phase (e.g., FINALIZE). + */ + UpgradeActionType getType(); +} diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/upgrade/UpgradeActionRecon.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/upgrade/UpgradeActionRecon.java new file mode 100644 index 00000000000..11a6c16e198 --- /dev/null +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/upgrade/UpgradeActionRecon.java @@ -0,0 +1,67 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.hadoop.ozone.recon.upgrade; + +import java.lang.annotation.ElementType; +import java.lang.annotation.Retention; +import java.lang.annotation.RetentionPolicy; +import java.lang.annotation.Target; + +/** + * The {@code UpgradeActionRecon} annotation is used to specify + * upgrade actions that should be executed during particular phases + * of the Recon service layout upgrade process. + * + *

    This annotation can be used to associate an upgrade action + * class with a specific layout feature and upgrade phase. The + * framework will dynamically discover these annotated upgrade + * actions and execute them based on the feature's version and + * the defined action type (e.g., {@link ReconUpgradeAction.UpgradeActionType#FINALIZE}). + * + *

    The annotation is retained at runtime, allowing the reflection-based + * mechanism to scan for annotated classes, register the associated actions, + * and execute them as necessary during the layout upgrade process. + * + * Example usage: + * + *

    + * @UpgradeActionRecon(feature = FEATURE_NAME, type = FINALIZE)
    + * public class FeatureNameUpgradeAction implements ReconUpgradeAction {
    + *   @Override
    + *   public void execute() throws Exception {
    + *     // Custom upgrade logic for FEATURE_1
    + *   }
    + * }
    + * 
    + */ +@Retention(RetentionPolicy.RUNTIME) +@Target(ElementType.TYPE) +public @interface UpgradeActionRecon { + + /** + * Defines the layout feature this upgrade action is associated with. + */ + ReconLayoutFeature feature(); + + /** + * Defines the type of upgrade phase during which the action should be executed. + */ + ReconUpgradeAction.UpgradeActionType type(); +} diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/upgrade/package-info.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/upgrade/package-info.java new file mode 100644 index 00000000000..56a94b1f84a --- /dev/null +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/upgrade/package-info.java @@ -0,0 +1,29 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +/** + * This package contains classes and interfaces for handling + * upgrade actions in Apache Ozone Recon. + * + * The main interface {@link org.apache.hadoop.ozone.recon.upgrade.ReconUpgradeAction} + * defines the structure for actions that need to be executed during an upgrade + * process in Recon. The actions can be triggered automatically + * during startup to ensure the correct version of the schema or + * layout is applied. + */ +package org.apache.hadoop.ozone.recon.upgrade; diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/api/db.json b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/api/db.json index f1d5dc36703..784ee8302e3 100644 --- a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/api/db.json +++ b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/api/db.json @@ -1923,7 +1923,7 @@ "actualReplicaCount": 2, "replicaDeltaCount": 1, "reason": null, - "keys": 1, + "keys": 4, "pipelineID": "a10ffab6-8ed5-414a-aaf5-79890ff3e8a1", "replicas": [ { @@ -1997,7 +1997,7 @@ "actualReplicaCount": 2, "replicaDeltaCount": 2, "reason": null, - "keys": 1, + "keys": 3, "pipelineID": "a10ffab6-8ed5-414a-aaf5-79890ff3e8a1", "replicas": [ { @@ -2071,7 +2071,7 @@ "actualReplicaCount": 2, "replicaDeltaCount": 2, "reason": null, - "keys": 1, + "keys": 2, "pipelineID": "a10ffab6-8ed5-414a-aaf5-79890ff3e8a1", "replicas": [ { @@ -2108,7 +2108,7 @@ "actualReplicaCount": 2, "replicaDeltaCount": 2, "reason": null, - "keys": 1, + "keys": 5, "pipelineID": "a10ffab6-8ed5-414a-aaf5-79890ff3e8a1", "replicas": [ { @@ -2145,7 +2145,7 @@ "actualReplicaCount": 2, "replicaDeltaCount": 2, "reason": null, - "keys": 1, + "keys": 3, "pipelineID": "a10ffab6-8ed5-414a-aaf5-79890ff3e8a1", "replicas": [ { @@ -2182,7 +2182,7 @@ "actualReplicaCount": 2, "replicaDeltaCount": 2, "reason": null, - "keys": 1, + "keys": 6, "pipelineID": "a10ffab6-8ed5-414a-aaf5-79890ff3e8a2", "replicas": [ { @@ -2219,7 +2219,7 @@ "actualReplicaCount": 2, "replicaDeltaCount": 2, "reason": null, - "keys": 1, + "keys": 2, "pipelineID": "a10ffab6-8ed5-414a-aaf5-79890ff3e8a3", "replicas": [ { @@ -5169,438 +5169,317 @@ ] }, "nonFSO": { - "keysSummary": { - "totalUnreplicatedDataSize": 10485760, - "totalReplicatedDataSize": 31457280, - "totalOpenKeys": 10 - }, + "lastKey": "/s3v/legacy/dir1/dir2/dir3/an9uf2eeox/19/113328137261088807", + "replicatedDataSize": 31457280, + "unreplicatedDataSize": 10485760, "nonFSO": [ { - "key": "/-9223372036854775552/-9223372036854775040/-9223372036852420095/01/110569623850191713", - "path": "nonfso 1", - "inStateSince": 1686156886632, - "size": 268435456, - "replicatedSize": 268435456, - "replicationInfo": { - "replicationFactor": "ONE", - "requiredNodes": 1, - "replicationType": "RATIS" - } - }, - { - "key": "/-9223372036854775552/-9223372036854775040/-9223372036852420095/2439/110569623850191714", - "path": "nonfso 2", - "inStateSince": 1686156886632, - "size": 268435456, - "replicatedSize": 268435456, - "replicationInfo": { - "replicationFactor": "ONE", - "requiredNodes": 1, - "replicationType": "RATIS" - } - }, - { - "key": "/-9223372036854775552/-9223372036854775040/-9223372036852420095/2440/110569623850191715", - "path": "nonfso 11", - "inStateSince": 1686156886632, - "size": 268435456, - "replicatedSize": 268435456, - "replicationInfo": { - "replicationFactor": "ONE", - "requiredNodes": 1, - "replicationType": "RATIS" - } - }, - { - "key": "/-9223372036854775552/-9223372036854775040/-9223372036852420095/2441/110569623850191716", - "path": "nonfso 12", - "inStateSince": 1686156886632, - "size": 268435456, - "replicatedSize": 268435456, - "replicationInfo": { - "replicationFactor": "ONE", - "requiredNodes": 1, - "replicationType": "RATIS" - } - }, - { - "key": "/-9223372036854775552/-9223372036854775040/-9223372036852420095/2440/110569623850191717", - "path": "nonfso 21", - "inStateSince": 1686156886632, - "size": 268435456, - "replicatedSize": 268435456, - "replicationInfo": { - "replicationFactor": "ONE", - "requiredNodes": 1, - "replicationType": "RATIS" - } - }, - { - "key": "/-9223372036854775552/-9223372036854775040/-9223372036852420095/2441/110569623850191718", - "path": "nonfso 22", - "inStateSince": 1686156886632, - "size": 268435456, - "replicatedSize": 268435456, - "replicationInfo": { - "replicationFactor": "ONE", - "requiredNodes": 1, - "replicationType": "RATIS" - } - } - ], - "status": "OK" - }, - "fso": { - "fso": [ - { - "key": "/-9223372036854775552/-9223372036854775040/-9223372036852420095/2401/110569623850191713", - "path": "1", - "inStateSince": 1686156886632, - "size": 268435456, - "replicatedSize": 268435456, - "replicationInfo": { - "replicationFactor": "ONE", - "requiredNodes": 1, - "replicationType": "RATIS" - } - }, - { - "key": "/-9223372036854775552/-9223372036854775040/-9223372036852420095/2402/110569623850191714", - "path": "2", - "inStateSince": 1686156887186, - "size": 268435456, - "replicatedSize": 268435456, - "replicationInfo": { - "replicationFactor": "ONE", - "requiredNodes": 1, - "replicationType": "RATIS" - } - }, - { - "key": "/-9223372036854775552/-9223372036854775040/-9223372036852420095/2403/110569623850191715", - "path": "3", - "inStateSince": 1686156887186, - "size": 268435456, - "replicatedSize": 268435456, - "replicationInfo": { - "replicationFactor": "ONE", - "requiredNodes": 1, - "replicationType": "RATIS" - } - }, - { - "key": "/-9223372036854775552/-9223372036854775040/-9223372036852420095/2404/110569623850191716", - "path": "4", - "inStateSince": 1686156887186, - "size": 268435456, - "replicatedSize": 268435456, - "replicationInfo": { - "replicationFactor": "ONE", - "requiredNodes": 1, - "replicationType": "RATIS" - } - }, - { - "key": "/-9223372036854775552/-9223372036854775040/-9223372036852420095/2405/110569623850191717", - "path": "5", - "inStateSince": 1686156887186, - "size": 268435456, - "replicatedSize": 268435456, - "replicationInfo": { - "replicationFactor": "ONE", - "requiredNodes": 1, - "replicationType": "RATIS" - } - }, - { - "key": "/-9223372036854775552/-9223372036854775040/-9223372036852420095/2406/110569623850191718", - "path": "6", - "inStateSince": 1686156887186, - "size": 268435456, - "replicatedSize": 268435456, - "replicationInfo": { - "replicationFactor": "ONE", - "requiredNodes": 1, - "replicationType": "RATIS" - } - }, - { - "key": "/-9223372036854775552/-9223372036854775040/-9223372036852420095/2407/110569623850191719", - "path": "7", - "inStateSince": 1686156887186, - "size": 268435456, - "replicatedSize": 268435456, - "replicationInfo": { - "replicationFactor": "ONE", - "requiredNodes": 1, - "replicationType": "RATIS" - } - }, - { - "key": "/-9223372036854775552/-9223372036854775040/-9223372036852420095/2408/110569623850191720", - "path": "8", - "inStateSince": 1686156887186, - "size": 268435456, - "replicatedSize": 268435456, - "replicationInfo": { - "replicationFactor": "ONE", - "requiredNodes": 1, - "replicationType": "RATIS" - } - }, - { - "key": "/-9223372036854775552/-9223372036854775040/-9223372036852420095/2409/110569623850191721", - "path": "9", - "inStateSince": 1686156887186, - "size": 268435456, - "replicatedSize": 268435456, - "replicationInfo": { - "replicationFactor": "ONE", - "requiredNodes": 1, - "replicationType": "RATIS" - } - }, - { - "key": "fso 10", - "path": "10", - "inStateSince": 1686156887186, - "size": 268435456, - "replicatedSize": 268435456, + "key": "/s3v/legacy/dir1/dir2/dir3/an9uf2eeox/10/113328137245098014", + "path": "dir1/dir2/dir3/an9uf2eeox/10", + "inStateSince": 1729250141069, + "size": 1048576, + "replicatedSize": 3145728, "replicationInfo": { - "replicationFactor": "ONE", - "requiredNodes": 1, + "replicationFactor": "THREE", + "requiredNodes": 3, "replicationType": "RATIS" - } + }, + "creationTime": 1729250161542, + "modificationTime": 1729250161542, + "isKey": true }, { - "key": "/-9223372036854775552/-9223372036854775040/-9223372036852420095/2411/110569623850191722", - "path": "11", - "inStateSince": 1686156886632, - "size": 268435456, - "replicatedSize": 268435456, + "key": "/s3v/legacy/dir1/dir2/dir3/an9uf2eeox/11/113328137245818911", + "path": "dir1/dir2/dir3/an9uf2eeox/11", + "inStateSince": 1729250141080, + "size": 1048576, + "replicatedSize": 3145728, "replicationInfo": { - "replicationFactor": "ONE", - "requiredNodes": 1, + "replicationFactor": "THREE", + "requiredNodes": 3, "replicationType": "RATIS" - } + }, + "creationTime": 1729250161543, + "modificationTime": 1729250161543, + "isKey": true }, { - "key": "fso 12", - "path": "12", - "inStateSince": 1686156887186, - "size": 268435456, - "replicatedSize": 268435456, + "key": "/s3v/legacy/dir1/dir2/dir3/an9uf2eeox/12/113328137247195168", + "path": "dir1/dir2/dir3/an9uf2eeox/12", + "inStateSince": 1729250141091, + "size": 1048576, + "replicatedSize": 3145728, "replicationInfo": { - "replicationFactor": "ONE", - "requiredNodes": 1, + "replicationFactor": "THREE", + "requiredNodes": 3, "replicationType": "RATIS" - } + }, + "creationTime": 1729250161543, + "modificationTime": 1729250161543, + "isKey": true }, { - "key": "fso 13", - "path": "13", - "inStateSince": 1686156886632, - "size": 268435456, - "replicatedSize": 268435456, + "key": "/s3v/legacy/dir1/dir2/dir3/an9uf2eeox/13/113328137248178209", + "path": "dir1/dir2/dir3/an9uf2eeox/13", + "inStateSince": 1729250141116, + "size": 1048576, + "replicatedSize": 3145728, "replicationInfo": { - "replicationFactor": "ONE", - "requiredNodes": 1, + "replicationFactor": "THREE", + "requiredNodes": 3, "replicationType": "RATIS" - } + }, + "creationTime": 1729250161543, + "modificationTime": 1729250161543, + "isKey": true }, { - "key": "fso 14", - "path": "14", - "inStateSince": 1686156887186, - "size": 268435456, - "replicatedSize": 268435456, + "key": "/s3v/legacy/dir1/dir2/dir3/an9uf2eeox/14/113328137249685538", + "path": "dir1/dir2/dir3/an9uf2eeox/14", + "inStateSince": 1729250141139, + "size": 1048576, + "replicatedSize": 3145728, "replicationInfo": { - "replicationFactor": "ONE", - "requiredNodes": 1, + "replicationFactor": "THREE", + "requiredNodes": 3, "replicationType": "RATIS" - } + }, + "creationTime": 1729250161543, + "modificationTime": 1729250161543, + "isKey": true }, { - "key": "fso 15", - "path": "15", - "inStateSince": 1686156886632, - "size": 268435456, - "replicatedSize": 268435456, + "key": "/s3v/legacy/dir1/dir2/dir3/an9uf2eeox/15/113328137250930723", + "path": "dir1/dir2/dir3/an9uf2eeox/15", + "inStateSince": 1729250141158, + "size": 1048576, + "replicatedSize": 3145728, "replicationInfo": { - "replicationFactor": "ONE", - "requiredNodes": 1, + "replicationFactor": "THREE", + "requiredNodes": 3, "replicationType": "RATIS" - } + }, + "creationTime": 1729250161543, + "modificationTime": 1729250161543, + "isKey": true }, { - "key": "fso 16 key", - "path": "16", - "inStateSince": 1686156887186, - "size": 268435456, - "replicatedSize": 268435456, + "key": "/s3v/legacy/dir1/dir2/dir3/an9uf2eeox/16/113328137252569124", + "path": "dir1/dir2/dir3/an9uf2eeox/16", + "inStateSince": 1729250141183, + "size": 1048576, + "replicatedSize": 3145728, "replicationInfo": { - "replicationFactor": "ONE", - "requiredNodes": 1, + "replicationFactor": "THREE", + "requiredNodes": 3, "replicationType": "RATIS" - } + }, + "creationTime": 1729250161543, + "modificationTime": 1729250161543, + "isKey": true }, { - "key": "fso 17 key", - "path": "17", - "inStateSince": 1686156886632, - "size": 268435456, - "replicatedSize": 268435456, + "key": "/s3v/legacy/dir1/dir2/dir3/an9uf2eeox/17/113328137259778085", + "path": "dir1/dir2/dir3/an9uf2eeox/17", + "inStateSince": 1729250141293, + "size": 1048576, + "replicatedSize": 3145728, "replicationInfo": { - "replicationFactor": "ONE", - "requiredNodes": 1, + "replicationFactor": "THREE", + "requiredNodes": 3, "replicationType": "RATIS" - } + }, + "creationTime": 1729250161543, + "modificationTime": 1729250161543, + "isKey": true }, { - "key": "fso 18 key", - "path": "18", - "inStateSince": 1686156887186, - "size": 268435456, - "replicatedSize": 268435456, + "key": "/s3v/legacy/dir1/dir2/dir3/an9uf2eeox/18/113328137261023270", + "path": "dir1/dir2/dir3/an9uf2eeox/18", + "inStateSince": 1729250141312, + "size": 1048576, + "replicatedSize": 3145728, "replicationInfo": { - "replicationFactor": "ONE", - "requiredNodes": 1, + "replicationFactor": "THREE", + "requiredNodes": 3, "replicationType": "RATIS" - } + }, + "creationTime": 1729250161543, + "modificationTime": 1729250161543, + "isKey": true }, { - "key": "fso 19 key", - "path": "19", - "inStateSince": 1686156886632, - "size": 268435456, - "replicatedSize": 268435456, + "key": "/s3v/legacy/dir1/dir2/dir3/an9uf2eeox/19/113328137261088807", + "path": "dir1/dir2/dir3/an9uf2eeox/19", + "inStateSince": 1729250141313, + "size": 1048576, + "replicatedSize": 3145728, "replicationInfo": { - "replicationFactor": "ONE", - "requiredNodes": 1, + "replicationFactor": "THREE", + "requiredNodes": 3, "replicationType": "RATIS" - } - }, - { - "key": "/-9223372036854775552/-9223372036854775040/-9223372036852420095/2411/110569623850191723", - "path": "21", - "inStateSince": 1686156886632, - "size": 268435456, - "replicatedSize": 268435456, + }, + "creationTime": 1729250161543, + "modificationTime": 1729250161543, + "isKey": true + } + ], + "status": "OK" + }, + "fso": { + "lastKey": "/-9223372036854775552/-9223372036854775040/-9223372036854774524/9/113328113600626690", + "replicatedDataSize": 31457280, + "unreplicatedDataSize": 10485760, + "fso": [{ + "key": "/-9223372036854775552/-9223372036854775040/-9223372036854774524/0/113328113600561153", + "path": "dir1/dir2/dir3/pnrnqh5gux/0", + "inStateSince": 1729249780277, + "size": 1048576, + "replicatedSize": 3145728, "replicationInfo": { - "replicationFactor": "ONE", - "requiredNodes": 1, - "replicationType": "RATIS" - } - }, - { - "key": "fso 22", - "path": "22", - "inStateSince": 1686156887186, - "size": 268435456, - "replicatedSize": 268435456, + "replicationFactor": "THREE", + "requiredNodes": 3, + "replicationType": "RATIS" + }, + "creationTime": 1729249820562, + "modificationTime": 1729249820562, + "isKey": true + }, + { + "key": "/-9223372036854775552/-9223372036854775040/-9223372036854774524/1/113328113600626694", + "path": "dir1/dir2/dir3/pnrnqh5gux/1", + "inStateSince": 1729249780277, + "size": 1048576, + "replicatedSize": 3145728, "replicationInfo": { - "replicationFactor": "ONE", - "requiredNodes": 1, - "replicationType": "RATIS" - } - }, - { - "key": "fso 23", - "path": "23", - "inStateSince": 1686156886632, - "size": 268435456, - "replicatedSize": 268435456, + "replicationFactor": "THREE", + "requiredNodes": 3, + "replicationType": "RATIS" + }, + "creationTime": 1729249820562, + "modificationTime": 1729249820562, + "isKey": true + }, + { + "key": "/-9223372036854775552/-9223372036854775040/-9223372036854774524/2/113328113600626691", + "path": "dir1/dir2/dir3/pnrnqh5gux/2", + "inStateSince": 1729249780277, + "size": 1048576, + "replicatedSize": 3145728, "replicationInfo": { - "replicationFactor": "ONE", - "requiredNodes": 1, - "replicationType": "RATIS" - } - }, - { - "key": "fso 24", - "path": "24", - "inStateSince": 1686156887186, - "size": 268435456, - "replicatedSize": 268435456, + "replicationFactor": "THREE", + "requiredNodes": 3, + "replicationType": "RATIS" + }, + "creationTime": 1729249820563, + "modificationTime": 1729249820563, + "isKey": true + }, + { + "key": "/-9223372036854775552/-9223372036854775040/-9223372036854774524/3/113328113600692233", + "path": "dir1/dir2/dir3/pnrnqh5gux/3", + "inStateSince": 1729249780277, + "size": 1048576, + "replicatedSize": 3145728, "replicationInfo": { - "replicationFactor": "ONE", - "requiredNodes": 1, - "replicationType": "RATIS" - } - }, - { - "key": "fso 25", - "path": "25", - "inStateSince": 1686156886632, - "size": 268435456, - "replicatedSize": 268435456, + "replicationFactor": "THREE", + "requiredNodes": 3, + "replicationType": "RATIS" + }, + "creationTime": 1729249820563, + "modificationTime": 1729249820563, + "isKey": true + }, + { + "key": "/-9223372036854775552/-9223372036854775040/-9223372036854774524/4/113328113600626695", + "path": "dir1/dir2/dir3/pnrnqh5gux/4", + "inStateSince": 1729249780277, + "size": 1048576, + "replicatedSize": 3145728, "replicationInfo": { - "replicationFactor": "ONE", - "requiredNodes": 1, - "replicationType": "RATIS" - } - }, - { - "key": "fso 26 key", - "path": "26", - "inStateSince": 1686156887186, - "size": 268435456, - "replicatedSize": 268435456, + "replicationFactor": "THREE", + "requiredNodes": 3, + "replicationType": "RATIS" + }, + "creationTime": 1729249820563, + "modificationTime": 1729249820563, + "isKey": true + }, + { + "key": "/-9223372036854775552/-9223372036854775040/-9223372036854774524/5/113328113600561152", + "path": "dir1/dir2/dir3/pnrnqh5gux/5", + "inStateSince": 1729249780277, + "size": 1048576, + "replicatedSize": 3145728, "replicationInfo": { - "replicationFactor": "ONE", - "requiredNodes": 1, - "replicationType": "RATIS" - } - }, - { - "key": "fso 17 key", - "path": "27", - "inStateSince": 1686156886632, - "size": 268435456, - "replicatedSize": 268435456, + "replicationFactor": "THREE", + "requiredNodes": 3, + "replicationType": "RATIS" + }, + "creationTime": 1729249820564, + "modificationTime": 1729249820564, + "isKey": true + }, + { + "key": "/-9223372036854775552/-9223372036854775040/-9223372036854774524/6/113328113600626692", + "path": "dir1/dir2/dir3/pnrnqh5gux/6", + "inStateSince": 1729249780277, + "size": 1048576, + "replicatedSize": 3145728, "replicationInfo": { - "replicationFactor": "ONE", - "requiredNodes": 1, - "replicationType": "RATIS" - } - }, - { - "key": "fso 18 key", - "path": "28", - "inStateSince": 1686156887186, - "size": 268435456, - "replicatedSize": 268435456, + "replicationFactor": "THREE", + "requiredNodes": 3, + "replicationType": "RATIS" + }, + "creationTime": 1729249820564, + "modificationTime": 1729249820564, + "isKey": true + }, + { + "key": "/-9223372036854775552/-9223372036854775040/-9223372036854774524/7/113328113600626696", + "path": "dir1/dir2/dir3/pnrnqh5gux/7", + "inStateSince": 1729249780277, + "size": 1048576, + "replicatedSize": 3145728, "replicationInfo": { - "replicationFactor": "ONE", - "requiredNodes": 1, - "replicationType": "RATIS" - } - }, - { - "key": "fso 19 key", - "path": "29", - "inStateSince": 1686156886632, - "size": 268435456, - "replicatedSize": 268435456, + "replicationFactor": "THREE", + "requiredNodes": 3, + "replicationType": "RATIS" + }, + "creationTime": 1729249820564, + "modificationTime": 1729249820564, + "isKey": true + }, + { + "key": "/-9223372036854775552/-9223372036854775040/-9223372036854774524/8/113328113600626693", + "path": "dir1/dir2/dir3/pnrnqh5gux/8", + "inStateSince": 1729249780277, + "size": 1048576, + "replicatedSize": 3145728, "replicationInfo": { - "replicationFactor": "ONE", - "requiredNodes": 1, - "replicationType": "RATIS" - } - }, - { - "key": "fso 20 key", - "path": "20", - "inStateSince": 1686156887186, - "size": 268435456, - "replicatedSize": 268435456, + "replicationFactor": "THREE", + "requiredNodes": 3, + "replicationType": "RATIS" + }, + "creationTime": 1729249820564, + "modificationTime": 1729249820564, + "isKey": true + }, + { + "key": "/-9223372036854775552/-9223372036854775040/-9223372036854774524/9/113328113600626690", + "path": "dir1/dir2/dir3/pnrnqh5gux/9", + "inStateSince": 1729249780277, + "size": 1048576, + "replicatedSize": 3145728, "replicationInfo": { - "replicationFactor": "ONE", - "requiredNodes": 1, - "replicationType": "RATIS" - } - } - ], + "replicationFactor": "THREE", + "requiredNodes": 3, + "replicationType": "RATIS" + }, + "creationTime": 1729249820565, + "modificationTime": 1729249820565, + "isKey": true + }], "status": "OK" }, "keydeletePending": { diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/pnpm-lock.yaml b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/pnpm-lock.yaml index 361705adc44..83ed39f34e0 100644 --- a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/pnpm-lock.yaml +++ b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/pnpm-lock.yaml @@ -1896,8 +1896,8 @@ packages: readable-stream: 3.6.2 dev: true - /body-parser@1.20.2: - resolution: {integrity: sha512-ml9pReCu3M61kGlqoTm2umSXTlRTuGTx0bfYj+uIUKKYycG5NtSbeetV3faSU6R7ajOPw0g/J1PvK4qNy7s5bA==} + /body-parser@1.20.3: + resolution: {integrity: sha512-7rAxByjUMqQ3/bHJy7D6OGXvx/MMc4IqBn/X0fcM1QUcAItpZrBEYhWGem+tzXH90c+G01ypMcYJBO9Y30203g==} engines: {node: '>= 0.8', npm: 1.2.8000 || >= 1.4.16} dependencies: bytes: 3.1.2 @@ -1908,7 +1908,7 @@ packages: http-errors: 2.0.0 iconv-lite: 0.4.24 on-finished: 2.4.1 - qs: 6.11.0 + qs: 6.13.0 raw-body: 2.5.2 type-is: 1.6.18 unpipe: 1.0.0 @@ -4104,7 +4104,7 @@ packages: engines: {node: '>=8'} hasBin: true dependencies: - body-parser: 1.20.2 + body-parser: 1.20.3 chalk: 2.4.2 compression: 1.7.4 connect-pause: 0.1.1 @@ -5072,8 +5072,8 @@ packages: engines: {node: '>=6'} dev: true - /qs@6.11.0: - resolution: {integrity: sha512-MvjoMCJwEarSbUYk5O+nmoSzSutSsTwF85zcHPQ9OrlFoZOYIjaqBAJIqIXjptyD5vThxGq52Xu/MaJzRkIk4Q==} + /qs@6.13.0: + resolution: {integrity: sha512-+38qI9SOr8tfZ4QmJNplMUxqjbe7LKvvZgWdExBOmd+egZTtjLB67Gu0HRX3u/XOq7UU2Nx6nsjvS16Z9uwfpg==} engines: {node: '>=0.6'} dependencies: side-channel: 1.0.6 diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/app.less b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/app.less index 1895cabc184..44f53fa9d47 100644 --- a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/app.less +++ b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/app.less @@ -164,4 +164,9 @@ body { .pointer { cursor: pointer; +} + +.data-container { + padding: 24px; + height: 80vh; } \ No newline at end of file diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/app.tsx b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/app.tsx index 78954ebb5a5..0b7607f2978 100644 --- a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/app.tsx +++ b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/app.tsx @@ -22,6 +22,7 @@ import { Switch as AntDSwitch, Layout } from 'antd'; import NavBar from './components/navBar/navBar'; import NavBarV2 from '@/v2/components/navBar/navBar'; import Breadcrumbs from './components/breadcrumbs/breadcrumbs'; +import BreadcrumbsV2 from '@/v2/components/breadcrumbs/breadcrumbs'; import { HashRouter as Router, Switch, Route, Redirect } from 'react-router-dom'; import { routes } from '@/routes'; import { routesV2 } from '@/v2/routes-v2'; @@ -31,6 +32,7 @@ import classNames from 'classnames'; import Loader from '@/v2/components/loader/loader'; import './app.less'; +import NotFound from '@/v2/pages/notFound/notFound'; const { Header, Content, Footer @@ -38,7 +40,7 @@ const { interface IAppState { collapsed: boolean; - enableNewUI: boolean; + enableOldUI: boolean; } class App extends React.Component, IAppState> { @@ -46,7 +48,7 @@ class App extends React.Component, IAppState> { super(props); this.state = { collapsed: false, - enableNewUI: false + enableOldUI: false }; } @@ -55,7 +57,7 @@ class App extends React.Component, IAppState> { }; render() { - const { collapsed, enableNewUI } = this.state; + const { collapsed, enableOldUI } = this.state; const layoutClass = classNames('content-layout', { 'sidebar-collapsed': collapsed }); @@ -63,40 +65,46 @@ class App extends React.Component, IAppState> { { - (enableNewUI) - ? - : + (enableOldUI) + ? + : }
    -
    - - New UI
    } - onChange={(checked: boolean) => { - this.setState({ - enableNewUI: checked - }); - }} /> +
    + {(enableOldUI) ? : } + + Switch to + Old UI
    } + checkedChildren={
    New UI
    } + onChange={(checked: boolean) => { + this.setState({ + enableOldUI: checked + }); + }} /> +
    - - - - - - {(enableNewUI) - ? }> - {routesV2.map( + + }> + + + + + {(enableOldUI) + ? routes.map( (route, index) => - )} - - : routes.map( - (route, index) => - ) - } - + ) + : routesV2.map( + (route, index) => { + return + } + ) + } + + +
    diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/constants/breadcrumbs.constants.tsx b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/constants/breadcrumbs.constants.tsx index 88953a5ed7d..a9f7a53eda4 100644 --- a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/constants/breadcrumbs.constants.tsx +++ b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/constants/breadcrumbs.constants.tsx @@ -27,8 +27,9 @@ export const breadcrumbNameMap: IBreadcrumbNameMap = { '/Datanodes': 'Datanodes', '/Pipelines': 'Pipelines', '/MissingContainers': 'Missing Containers', + '/Containers': 'Containers', '/Insights': 'Insights', '/DiskUsage': 'Disk Usage', '/Heatmap': 'Heatmap', - '/Om': 'Om', + '/Om': 'Om' }; diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/components/breadcrumbs/breadcrumbs.tsx b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/components/breadcrumbs/breadcrumbs.tsx new file mode 100644 index 00000000000..8e1c34decb7 --- /dev/null +++ b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/components/breadcrumbs/breadcrumbs.tsx @@ -0,0 +1,55 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import React from 'react'; +import { Breadcrumb } from 'antd'; +import { HomeOutlined } from '@ant-design/icons'; +import { Link, useLocation } from 'react-router-dom'; + +import { breadcrumbNameMap } from '@/v2/constants/breadcrumbs.constants'; + +const Breadcrumbs: React.FC<{}> = () => { + const location = useLocation(); + //Split and filter to remove empty strings + const pathSnippets = location.pathname.split('/').filter(i => i); + + const extraBreadcrumbItems = pathSnippets.map((_: string, index: number) => { + const url = `/${pathSnippets.slice(0, index + 1).join('/')}`; + return ( + + + {breadcrumbNameMap[url]} + + + ) + }); + + const breadcrumbItems = [( + + + + )].concat(extraBreadcrumbItems); + + return ( + + {breadcrumbItems} + + ); +} + +export default Breadcrumbs; diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/components/navBar/navBar.tsx b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/components/navBar/navBar.tsx index 3da4104634c..1dd1ede48db 100644 --- a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/components/navBar/navBar.tsx +++ b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/components/navBar/navBar.tsx @@ -144,7 +144,7 @@ const NavBar: React.FC = ({ Heatmap diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/components/overviewCard/overviewSummaryCard.tsx b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/components/overviewCard/overviewSummaryCard.tsx index e383512f20e..8736b3e0d29 100644 --- a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/components/overviewCard/overviewSummaryCard.tsx +++ b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/components/overviewCard/overviewSummaryCard.tsx @@ -39,6 +39,7 @@ type OverviewTableCardProps = { data?: string | React.ReactElement; linkToUrl?: string; showHeader?: boolean; + state?: Record; } // ------------- Styles -------------- // @@ -63,15 +64,18 @@ const OverviewSummaryCard: React.FC = ({ columns = [], tableData = [], linkToUrl = '', - showHeader = false + showHeader = false, + state }) => { - const titleElement = (linkToUrl) ? (
    {title} View Insights diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/components/plots/duPieChart.tsx b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/components/plots/duPieChart.tsx new file mode 100644 index 00000000000..2601905a142 --- /dev/null +++ b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/components/plots/duPieChart.tsx @@ -0,0 +1,211 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import React from 'react'; + +import EChart from '@/v2/components/eChart/eChart'; +import { byteToSize } from '@/utils/common'; +import { DUSubpath } from '@/v2/types/diskUsage.types'; + +//-------Types--------// +type PieChartProps = { + path: string; + limit: number; + size: number; + subPaths: DUSubpath[]; + subPathCount: number; + sizeWithReplica: number; + loading: boolean; +} + +//-------Constants---------// +const OTHER_PATH_NAME = 'Other Objects'; +const MIN_BLOCK_SIZE = 0.05; + + +//----------Component---------// +const DUPieChart: React.FC = ({ + path, + limit, + size, + subPaths, + subPathCount, + sizeWithReplica, + loading +}) => { + + const [subpathSize, setSubpathSize] = React.useState(0); + + function getSubpathSize(subpaths: DUSubpath[]): number { + const subpathSize = subpaths + .map((subpath) => subpath.size) + .reduce((acc, curr) => acc + curr, 0); + // If there is no subpaths, then the size will be total size of path + return (subPaths.length === 0) ? size : subpathSize; + } + + function updatePieData() { + /** + * We need to calculate the size of "Other objects" in two cases: + * + * 1) If we have more subpaths listed, than the limit. + * 2) If the limit is set to the maximum limit (30) and we have any number of subpaths. + * In this case we won't necessarily have "Other objects", but we check if the + * other objects's size is more than zero (we will have other objects if there are more than 30 subpaths, + * but we can't check on that, as the response will always have + * 30 subpaths, but from the total size and the subpaths size we can calculate it). + */ + let subpaths: DUSubpath[] = subPaths; + + let pathLabels: string[] = []; + let percentage: string[] = []; + let sizeStr: string[]; + let valuesWithMinBlockSize: number[] = []; + + if (subPathCount > limit) { + // If the subpath count is greater than the provided limit + // Slice the subpath to the limit + subpaths = subpaths.slice(0, limit); + // Add the size of the subpath + const limitedSize = getSubpathSize(subpaths); + const remainingSize = size - limitedSize; + subpaths.push({ + path: OTHER_PATH_NAME, + size: remainingSize, + sizeWithReplica: (sizeWithReplica === -1) + ? -1 + : sizeWithReplica - remainingSize, + isKey: false + }) + } + + if (subPathCount === 0 || subpaths.length === 0) { + // No more subpaths available + pathLabels = [path.split('/').pop() ?? '']; + valuesWithMinBlockSize = [0.1]; + percentage = ['100.00']; + sizeStr = [byteToSize(size, 1)]; + } else { + pathLabels = subpaths.map(subpath => { + const subpathName = subpath.path.split('/').pop() ?? ''; + // Diferentiate keys by removing trailing slash + return (subpath.isKey || subpathName === OTHER_PATH_NAME) + ? subpathName + : subpathName + '/'; + }); + + let values: number[] = [0]; + if (size > 0) { + values = subpaths.map( + subpath => (subpath.size / size) + ); + } + const valueClone = structuredClone(values); + valuesWithMinBlockSize = valueClone?.map( + (val: number) => (val > 0) + ? val + MIN_BLOCK_SIZE + : val + ); + + percentage = values.map(value => (value * 100).toFixed(2)); + sizeStr = subpaths.map((subpath) => byteToSize(subpath.size, 1)); + } + + return valuesWithMinBlockSize.map((key, idx) => { + return { + value: key, + name: pathLabels[idx], + size: sizeStr[idx], + percentage: percentage[idx] + } + }); + } + + React.useEffect(() => { + setSubpathSize(getSubpathSize(subPaths)); + }, [subPaths, limit]); + + const pieData = React.useMemo(() => updatePieData(), [path, subPaths, limit]); + + const eChartsOptions = { + title: { + text: `${byteToSize(subpathSize, 1)} / ${byteToSize(size, 1)}`, + left: 'center', + top: '95%' + }, + tooltip: { + trigger: 'item', + formatter: ({ dataIndex, name, color }) => { + const nameEl = `${name}
    `; + const dataEl = `Total Data Size: ${pieData[dataIndex]['size']}
    ` + const percentageEl = `Percentage: ${pieData[dataIndex]['percentage']} %` + return `${nameEl}${dataEl}${percentageEl}` + } + }, + legend: { + top: '10%', + orient: 'vertical', + left: '0%', + width: '80%' + }, + grid: { + + }, + series: [ + { + type: 'pie', + radius: '70%', + data: pieData.map((value) => { + return { + value: value.value, + name: value.name + } + }), + emphasis: { + itemStyle: { + shadowBlur: 10, + shadowOffsetX: 0, + shadowColor: 'rgba(0, 0, 0, 0.5)' + } + } + } + ] + }; + + const handleLegendChange = ({selected}: {selected: Record}) => { + const filteredPath = subPaths.filter((value) => { + // In case of any leading '/' remove them and add a / at end + // to make it similar to legend + const splitPath = value.path?.split('/'); + const pathName = splitPath[splitPath.length - 1] ?? '' + ((value.isKey) ? '' : '/'); + return selected[pathName]; + }) + const newSize = getSubpathSize(filteredPath); + setSubpathSize(newSize); + } + + return ( + + ); +} + +export default DUPieChart; \ No newline at end of file diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/components/plots/heatmapPlot.tsx b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/components/plots/heatmapPlot.tsx new file mode 100644 index 00000000000..a58a7704dac --- /dev/null +++ b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/components/plots/heatmapPlot.tsx @@ -0,0 +1,151 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import React from 'react'; +import { AgChartsReact } from 'ag-charts-react'; +import { byteToSize } from '@/utils/common'; +import { HeatmapResponse } from '@/v2/types/heatmap.types'; + +type HeatmapPlotProps = { + data: HeatmapResponse; + onClick: (arg0: string) => void; + colorScheme: string[]; + entityType: string; +}; + +const capitalize = (str: T) => { + return str.charAt(0).toUpperCase() + str.slice(1) as Capitalize; +} + +const HeatmapPlot: React.FC = ({ + data, + onClick, + colorScheme, + entityType = '' +}) => { + + const tooltipContent = (params: any) => { + let tooltipContent = ` + Size: + ${byteToSize(params.datum.size, 1)} + `; + if (params.datum.accessCount !== undefined) { + tooltipContent += `
    + Access count: + ${params.datum.accessCount } + `; + } + else{ + tooltipContent += `
    + Max Access Count: + ${params.datum.maxAccessCount} + `;} + if (params.datum.label !== '') { + tooltipContent += `
    + Entity Name: + ${params.datum.label ? params.datum.label.split('/').slice(-1) : ''} + `; + } + tooltipContent += '
    '; + return tooltipContent; + }; + + const heatmapConfig = { + type: 'treemap', + labelKey: 'label',// the name of the key to fetch the label value from + sizeKey: 'normalizedSize',// the name of the key to fetch the value that will determine tile size + colorKey: 'color', + title: { color: '#424242', fontSize: 14, fontFamily: 'Roboto', fontWeight: '600' }, + subtitle: { color: '#424242', fontSize: 12, fontFamily: 'Roboto', fontWeight: '400' }, + tooltip: { + renderer: (params) => { + return { + content: tooltipContent(params) + }; + } + }, + formatter: ({ highlighted }: { highlighted: boolean }) => { + const stroke = highlighted ? '#CED4D9' : '#FFFFFF'; + return { stroke }; + }, + labels: { + color: '#FFFFFF', + fontWeight: 'bold', + fontSize: 12 + }, + tileStroke: '#FFFFFF', + tileStrokeWidth: 1.4, + colorDomain: [ + 0.000, + 0.050, + 0.100, + 0.150, + 0.200, + 0.250, + 0.300, + 0.350, + 0.400, + 0.450, + 0.500, + 0.550, + 0.600, + 0.650, + 0.700, + 0.750, + 0.800, + 0.850, + 0.900, + 0.950, + 1.000 + ], + colorRange: [...colorScheme], + groupFill: '#E6E6E6', + groupStroke: '#E1E2E6', + nodePadding: 3, + labelShadow: { enabled: false }, //labels shadow + gradient: false, + highlightStyle: { + text: { + color: '#424242', + }, + item: { + fill: 'rgba(0, 0 ,0, 0.0)', + }, + }, + listeners: { + nodeClick: (event) => { + var data = event.datum; + // Leaf level box should not call API + if (!data.color) + if (data.path) { + onClick(data.path); + } + }, + }, + } + + const options = { + data, + series: [heatmapConfig], + title: { text: `${capitalize(entityType)} Heatmap`} + }; + + return +} + +export default HeatmapPlot; \ No newline at end of file diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/components/plots/insightsContainerPlot.tsx b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/components/plots/insightsContainerPlot.tsx new file mode 100644 index 00000000000..851c355e765 --- /dev/null +++ b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/components/plots/insightsContainerPlot.tsx @@ -0,0 +1,149 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import React from 'react'; +import filesize from 'filesize'; +import { EChartsOption } from 'echarts'; + +import EChart from '@/v2/components/eChart/eChart'; +import { ContainerCountResponse, ContainerPlotData } from '@/v2/types/insights.types'; + +type ContainerSizeDistributionProps = { + containerCountResponse: ContainerCountResponse[]; + containerSizeError: string | undefined; +} + +const size = filesize.partial({ standard: 'iec', round: 0 }); + +const ContainerSizeDistribution: React.FC = ({ + containerCountResponse, + containerSizeError +}) => { + + const [containerPlotData, setContainerPlotData] = React.useState({ + containerCountValues: [], + containerCountMap: new Map() + }); + + function updatePlotData() { + const containerCountMap: Map = containerCountResponse.reduce( + (map: Map, current) => { + const containerSize = current.containerSize; + const oldCount = map.get(containerSize) ?? 0; + map.set(containerSize, oldCount + current.count); + return map; + }, + new Map() + ); + + const containerCountValues = Array.from(containerCountMap.keys()).map(value => { + const upperbound = size(value); + const upperboundPwr = Math.log2(value); + + const lowerbound = upperboundPwr > 10 ? size(2 ** (upperboundPwr - 1)) : size(0); + return `${lowerbound} - ${upperbound}`; + }); + + setContainerPlotData({ + containerCountValues: containerCountValues, + containerCountMap: containerCountMap + }); + } + + React.useEffect(() => { + updatePlotData(); + }, []); + + const { containerCountMap, containerCountValues } = containerPlotData; + + const containerPlotOptions: EChartsOption = { + tooltip: { + trigger: 'item', + formatter: ({ data }) => { + return `Size Range: ${data.name}
    Count: ${data.value}` + } + }, + legend: { + orient: 'vertical', + left: 'right' + }, + series: { + type: 'pie', + radius: '50%', + data: Array.from(containerCountMap?.values() ?? []).map((value, idx) => { + return { + value: value, + name: containerCountValues[idx] ?? '' + } + }), + }, + graphic: (containerSizeError) ? { + type: 'group', + left: 'center', + top: 'middle', + z: 100, + children: [ + { + type: 'rect', + left: 'center', + top: 'middle', + z: 100, + shape: { + width: 500, + height: 500 + }, + style: { + fill: 'rgba(256, 256, 256, 0.5)' + } + }, + { + type: 'rect', + left: 'center', + top: 'middle', + z: 100, + shape: { + width: 500, + height: 40 + }, + style: { + fill: '#FC909B' + } + }, + { + type: 'text', + left: 'center', + top: 'middle', + z: 100, + style: { + text: `No data available. ${containerSizeError}`, + font: '20px sans-serif' + } + } + ] + } : undefined + } + + return (<> + + ) +} + +export default ContainerSizeDistribution; \ No newline at end of file diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/components/plots/insightsFilePlot.tsx b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/components/plots/insightsFilePlot.tsx new file mode 100644 index 00000000000..bb6453ed7c1 --- /dev/null +++ b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/components/plots/insightsFilePlot.tsx @@ -0,0 +1,251 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import React from 'react'; +import filesize from 'filesize'; +import { EChartsOption } from 'echarts'; +import { ValueType } from 'react-select'; + +import EChart from '@/v2/components/eChart/eChart'; +import MultiSelect, { Option } from '@/v2/components/select/multiSelect'; +import { FileCountResponse, FilePlotData } from '@/v2/types/insights.types'; + + +//-----Types------ +type FileSizeDistributionProps = { + volumeOptions: Option[]; + volumeBucketMap: Map>; + fileCountResponse: FileCountResponse[]; + fileCountError: string | undefined; +} + +const size = filesize.partial({ standard: 'iec', round: 0 }); + +const dropdownStyles: React.CSSProperties = { + display: 'flex', + justifyContent: 'space-between' +} + +const FileSizeDistribution: React.FC = ({ + volumeOptions = [], + volumeBucketMap, + fileCountResponse, + fileCountError +}) => { + + const [bucketOptions, setBucketOptions] = React.useState([]); + const [selectedBuckets, setSelectedBuckets] = React.useState([]); + const [selectedVolumes, setSelectedVolumes] = React.useState([]); + const [isBucketSelectionEnabled, setBucketSelectionEnabled] = React.useState(false); + + const [filePlotData, setFilePlotData] = React.useState({ + fileCountValues: [], + fileCountMap: new Map() + }); + + function handleVolumeChange(selectedVolumes: ValueType) { + + // Disable bucket selection options if more than one volume is selected or no volumes present + // If there is only one volume then the bucket selection is enabled + const bucketSelectionDisabled = ((selectedVolumes as Option[])?.length > 1 + && volumeBucketMap.size !== 1); + + let bucketOptions: Option[] = []; + + // Update buckets if only one volume is selected + if (selectedVolumes?.length === 1) { + const selectedVolume = selectedVolumes[0].value; + if (volumeBucketMap.has(selectedVolume)) { + bucketOptions = Array.from( + volumeBucketMap.get(selectedVolume)! + ).map(bucket => ({ + label: bucket, + value: bucket + })); + } + } + setBucketOptions([...bucketOptions]); + setSelectedVolumes(selectedVolumes as Option[]); + setSelectedBuckets([...bucketOptions]); + setBucketSelectionEnabled(!bucketSelectionDisabled); + } + + function handleBucketChange(selectedBuckets: ValueType) { + setSelectedBuckets(selectedBuckets as Option[]); + } + + function updatePlotData() { + // Aggregate count across volumes and buckets for use in plot + let filteredData = fileCountResponse; + const selectedVolumeValues = new Set(selectedVolumes.map(option => option.value)); + const selectedBucketValues = new Set(selectedBuckets.map(option => option.value)); + if (selectedVolumes.length >= 0) { + // Not all volumes are selected, need to filter based on the selected values + filteredData = filteredData.filter(data => selectedVolumeValues.has(data.volume)); + + // We have selected a volume but all the buckets are deselected + if (selectedVolumes.length === 1 && selectedBuckets.length === 0) { + // Since no buckets are selected there is no data + filteredData = []; + } + } + if (selectedBuckets.length > 0) { + // Not all buckcets are selected, filter based on the selected values + filteredData = filteredData.filter(data => selectedBucketValues.has(data.bucket)); + } + + // This is a map of 'size : count of the size' + const fileCountMap: Map = filteredData.reduce( + (map: Map, current) => { + const fileSize = current.fileSize; + const oldCount = map.get(fileSize) ?? 0; + map.set(fileSize, oldCount + current.count); + return map; + }, + new Map + ); + + // Calculate the previous power of 2 to find the lower bound of the range + // Ex: for 2048, the lower bound is 1024 + const fileCountValues = Array.from(fileCountMap.keys()).map(value => { + const upperbound = size(value); + const upperboundPwr = Math.log2(value); + // For 1024 i.e 2^10, the lower bound is 0, so we start binning after 2^10 + const lowerbound = upperboundPwr > 10 ? size(2 ** (upperboundPwr - 1)) : size(0); + return `${lowerbound} - ${upperbound}`; + }); + + setFilePlotData({ + fileCountValues: fileCountValues, + // set the sorted value by size for the map + fileCountMap: new Map([...fileCountMap.entries()].sort((a, b) => a[0] - b[0])) + }); + } + + // If the response is updated or the volume-bucket data is updated, update plot + React.useEffect(() => { + updatePlotData(); + handleVolumeChange(volumeOptions); + }, [ + fileCountResponse, volumeBucketMap + ]); + + // If the selected volumes and buckets change, update plot + React.useEffect(() => { + updatePlotData(); + }, [selectedVolumes, selectedBuckets]) + + const { fileCountValues, fileCountMap } = filePlotData; + + const filePlotOptions: EChartsOption = { + xAxis: { + type: 'category', + data: [...fileCountValues] ?? [] + }, + yAxis: { + type: 'value' + }, + tooltip: { + trigger: 'item', + formatter: ({ name, value }) => { + return `Size Range: ${name}
    Count: ${value}` + } + }, + series: { + itemStyle: { + color: '#04AD78' + }, + data: Array.from(fileCountMap?.values() ?? []), + type: 'bar' + }, + graphic: (fileCountError) ? { + type: 'group', + left: 'center', + top: 'middle', + z: 100, + children: [ + { + type: 'rect', + left: 'center', + top: 'middle', + z: 100, + shape: { + width: 500, + height: 40 + }, + style: { + fill: '#FC909B' + } + }, + { + type: 'text', + left: 'center', + top: 'middle', + z: 100, + style: { + text: `No data available. ${fileCountError}`, + font: '20px sans-serif' + } + } + ] + } : undefined + } + + return (<> +
    + { }} + fixedColumn='' + columnLength={volumeOptions.length} + style={{ + control: (baseStyles, state) => ({ + ...baseStyles, + minWidth: 345 + }) + }} /> + { }} + fixedColumn='' + columnLength={bucketOptions.length} + isDisabled={!isBucketSelectionEnabled} + style={{ + control: (baseStyles, state) => ({ + ...baseStyles, + minWidth: 345 + }) + }} /> +
    + + ) +} + +export default FileSizeDistribution; \ No newline at end of file diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/components/select/multiSelect.tsx b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/components/select/multiSelect.tsx index 07b3f9eafa1..3dfe19f9b45 100644 --- a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/components/select/multiSelect.tsx +++ b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/components/select/multiSelect.tsx @@ -23,7 +23,8 @@ import { components, OptionProps, ValueType, - ValueContainerProps + ValueContainerProps, + StylesConfig } from 'react-select'; import { selectStyles } from "@/v2/constants/select.constants"; @@ -41,6 +42,7 @@ interface MultiSelectProps extends ReactSelectProps { placeholder: string; fixedColumn: string; columnLength: number; + style?: StylesConfig; onChange: (arg0: ValueType) => void; onTagClose: (arg0: string) => void; } @@ -72,9 +74,11 @@ const MultiSelect: React.FC = ({ selected = [], maxSelected = 5, placeholder = 'Columns', + isDisabled = false, fixedColumn, columnLength, tagRef, + style, onTagClose = () => { }, // Assign default value as a void function onChange = () => { }, // Assign default value as a void function ...props @@ -90,34 +94,40 @@ const MultiSelect: React.FC = ({ ? child : null )} - {placeholder}: {selected.length} selected + {isDisabled + ? placeholder + : `${placeholder}: ${selected.length} selected` +} ); }; + const finalStyles = {...selectStyles, ...style ?? {}} + return ( option.value === fixedColumn} - onChange={(selected: ValueType) => { - if (selected?.length === options.length) return onChange!(options); - return onChange!(selected); - }} - styles={selectStyles} /> + {...props} + isMulti={true} + closeMenuOnSelect={false} + hideSelectedOptions={false} + isClearable={false} + isSearchable={false} + controlShouldRenderValue={false} + classNamePrefix='multi-select' + options={options} + components={{ + ValueContainer, + Option + }} + placeholder={placeholder} + value={selected} + isOptionDisabled={(option) => option.value === fixedColumn} + isDisabled={isDisabled} + onChange={(selected: ValueType) => { + if (selected?.length === options.length) return onChange!(options); + return onChange!(selected); + }} + styles={finalStyles} /> ) } diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/components/tables/bucketsTable.tsx b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/components/tables/bucketsTable.tsx index b26ae251f95..0060177795b 100644 --- a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/components/tables/bucketsTable.tsx +++ b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/components/tables/bucketsTable.tsx @@ -255,7 +255,7 @@ const BucketsTable: React.FC = ({ dataSource={getFilteredData(data)} columns={filterSelectedColumns()} loading={loading} - rowKey='volume' + rowKey={(record: Bucket) => `${record.volumeName}/${record.name}`} pagination={paginationConfig} scroll={{ x: 'max-content', scrollToFirstRowOnChange: true }} locale={{ filterTitle: '' }} diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/components/tables/containersTable.tsx b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/components/tables/containersTable.tsx new file mode 100644 index 00000000000..1bb1b5456b5 --- /dev/null +++ b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/components/tables/containersTable.tsx @@ -0,0 +1,259 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import React, { useRef } from 'react'; +import filesize from 'filesize'; +import { AxiosError } from 'axios'; +import { Popover, Table } from 'antd'; +import { + ColumnsType, + TablePaginationConfig +} from 'antd/es/table'; +import { NodeIndexOutlined } from '@ant-design/icons'; + +import { getFormattedTime } from '@/v2/utils/momentUtils'; +import { showDataFetchError } from '@/utils/common'; +import { AxiosGetHelper } from '@/utils/axiosRequestHelper'; +import { + Container, ContainerKeysResponse, ContainerReplica, + ContainerTableProps, + ExpandedRowState, KeyResponse +} from '@/v2/types/container.types'; + +const size = filesize.partial({ standard: 'iec' }); + +export const COLUMNS: ColumnsType = [ + { + title: 'Container ID', + dataIndex: 'containerID', + key: 'containerID', + sorter: (a: Container, b: Container) => a.containerID - b.containerID + }, + { + title: 'No. of Keys', + dataIndex: 'keys', + key: 'keys', + sorter: (a: Container, b: Container) => a.keys - b.keys + }, + { + title: 'Actual/Expected Replica(s)', + dataIndex: 'expectedReplicaCount', + key: 'expectedReplicaCount', + render: (expectedReplicaCount: number, record: Container) => { + const actualReplicaCount = record.actualReplicaCount; + return ( + + {actualReplicaCount} / {expectedReplicaCount} + + ); + } + }, + { + title: 'Datanodes', + dataIndex: 'replicas', + key: 'replicas', + render: (replicas: ContainerReplica[]) => { + const renderDatanodes = (replicas: ContainerReplica[]) => { + return replicas?.map((replica: any, idx: number) => ( +
    + {replica.datanodeHost} +
    + )) + } + + return ( + + {replicas.length} datanodes + + ) + } + }, + { + title: 'Pipeline ID', + dataIndex: 'pipelineID', + key: 'pipelineID' + }, + { + title: 'Unhealthy Since', + dataIndex: 'unhealthySince', + key: 'unhealthySince', + render: (unhealthySince: number) => getFormattedTime(unhealthySince, 'lll'), + sorter: (a: Container, b: Container) => a.unhealthySince - b.unhealthySince + } +]; + +const KEY_TABLE_COLUMNS: ColumnsType = [ + { + title: 'Volume', + dataIndex: 'Volume', + key: 'Volume' + }, + { + title: 'Bucket', + dataIndex: 'Bucket', + key: 'Bucket' + }, + { + title: 'Key', + dataIndex: 'Key', + key: 'Key' + }, + { + title: 'Size', + dataIndex: 'DataSize', + key: 'DataSize', + render: (dataSize: number) =>
    {size(dataSize)}
    + }, + { + title: 'Date Created', + dataIndex: 'CreationTime', + key: 'CreationTime', + render: (date: string) => getFormattedTime(date, 'lll') + }, + { + title: 'Date Modified', + dataIndex: 'ModificationTime', + key: 'ModificationTime', + render: (date: string) => getFormattedTime(date, 'lll') + }, + { + title: 'Path', + dataIndex: 'CompletePath', + key: 'path' + } +]; + +const ContainerTable: React.FC = ({ + data, + loading, + selectedColumns, + expandedRow, + expandedRowSetter, + searchColumn = 'containerID', + searchTerm = '' +}) => { + + const cancelSignal = useRef(); + + function filterSelectedColumns() { + const columnKeys = selectedColumns.map((column) => column.value); + return COLUMNS.filter( + (column) => columnKeys.indexOf(column.key as string) >= 0 + ); + } + + function loadRowData(containerID: number) { + const { request, controller } = AxiosGetHelper( + `/api/v1/containers/${containerID}/keys`, + cancelSignal.current + ); + cancelSignal.current = controller; + + request.then(response => { + const containerKeysResponse: ContainerKeysResponse = response.data; + expandedRowSetter({ + ...expandedRow, + [containerID]: { + ...expandedRow[containerID], + loading: false, + dataSource: containerKeysResponse.keys, + totalCount: containerKeysResponse.totalCount + } + }); + }).catch(error => { + expandedRowSetter({ + ...expandedRow, + [containerID]: { + ...expandedRow[containerID], + loading: false + } + }); + showDataFetchError((error as AxiosError).toString()); + }); + } + + function getFilteredData(data: Container[]) { + + return data?.filter( + (container: Container) => { + return (searchColumn === 'containerID') + ? container[searchColumn].toString().includes(searchTerm) + : container[searchColumn].includes(searchTerm) + } + ) ?? []; + } + + function onRowExpandClick(expanded: boolean, record: Container) { + if (expanded) { + loadRowData(record.containerID); + } + else { + cancelSignal.current && cancelSignal.current.abort(); + } + } + + function expandedRowRender(record: Container) { + const containerId = record.containerID + const containerKeys: ExpandedRowState = expandedRow[containerId]; + const dataSource = containerKeys?.dataSource ?? []; + const paginationConfig: TablePaginationConfig = { + showTotal: (total: number, range) => `${range[0]}-${range[1]} of ${total} Keys` + } + + return ( +
    Namespace:{{$ctrl.jmx.Namespace}}
    Started: {{$ctrl.jmx.StartedTimeInMillis | date : 'medium'}}
    {{typestat.key}}{{typestat.value[0]}} {{typestat.value[1]}}
    `${record.Volume}/${record.Bucket}/${record.Key}`} + locale={{ filterTitle: '' }} /> + ) + }; + + const paginationConfig: TablePaginationConfig = { + showTotal: (total: number, range) => ( + `${range[0]}-${range[1]} of ${total} Containers` + ), + showSizeChanger: true + }; + + return ( +
    +
    + + ); +} + +export default ContainerTable; \ No newline at end of file diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/components/tables/insights/containerMismatchTable.tsx b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/components/tables/insights/containerMismatchTable.tsx new file mode 100644 index 00000000000..818eca37f8e --- /dev/null +++ b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/components/tables/insights/containerMismatchTable.tsx @@ -0,0 +1,215 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import React from 'react'; +import { AxiosError } from 'axios'; +import { + Dropdown, + Menu, + Popover, + Table, + Tooltip +} from 'antd'; +import { + ColumnsType, + TablePaginationConfig +} from 'antd/es/table'; +import { + MenuProps as FilterMenuProps +} from 'antd/es/menu'; +import { FilterFilled, InfoCircleOutlined } from '@ant-design/icons'; +import { ValueType } from 'react-select'; + +import Search from '@/v2/components/search/search'; +import SingleSelect, { Option } from '@/v2/components/select/singleSelect'; +import { showDataFetchError } from '@/utils/common'; +import { AxiosGetHelper } from '@/utils/axiosRequestHelper'; +import { useDebounce } from '@/v2/hooks/debounce.hook'; +import { LIMIT_OPTIONS } from '@/v2/constants/limit.constants'; + +import { + Container, + MismatchContainersResponse, + Pipelines +} from '@/v2/types/insights.types'; + + +//-----Types----- +type ContainerMismatchTableProps = { + paginationConfig: TablePaginationConfig; + limit: Option; + handleLimitChange: (arg0: ValueType) => void; + expandedRowRender: (arg0: any) => JSX.Element; + onRowExpand: (arg0: boolean, arg1: any) => void; +} + +//-----Components------ +const ContainerMismatchTable: React.FC = ({ + paginationConfig, + limit, + onRowExpand, + expandedRowRender, + handleLimitChange +}) => { + + const [loading, setLoading] = React.useState(false); + const [data, setData] = React.useState(); + const [searchTerm, setSearchTerm] = React.useState(''); + + const cancelSignal = React.useRef(); + const debouncedSearch = useDebounce(searchTerm, 300); + + const handleExistAtChange: FilterMenuProps['onClick'] = ({ key }) => { + if (key === 'OM') { + fetchMismatchContainers('SCM'); + } else { + fetchMismatchContainers('OM'); + } + } + + function filterData(data: Container[] | undefined) { + return data?.filter( + (data: Container) => data.containerId.toString().includes(debouncedSearch) + ); + } + + const COLUMNS: ColumnsType = [ + { + title: 'Container ID', + dataIndex: 'containerId', + key: 'containerId', + width: '20%' + + }, + { + title: 'Count Of Keys', + dataIndex: 'numberOfKeys', + key: 'numberOfKeys', + sorter: (a: Container, b: Container) => a.numberOfKeys - b.numberOfKeys + }, + { + title: 'Pipelines', + dataIndex: 'pipelines', + key: 'pipelines', + render: (pipelines: Pipelines[]) => { + const renderPipelineIds = (pipelineIds: Pipelines[]) => { + return pipelineIds?.map(pipeline => ( +
    + {pipeline.id.id} +
    + )); + } + return ( + + {pipelines.length} pipelines + + ) + } + }, + { + title: <> + + OM + SCM + + }> + + + + SCM: Container exists at SCM but missing at OM.
    + OM: Container exist at OM but missing at SCM. + }> + +
    + , + dataIndex: 'existsAt' + } + ]; + + function fetchMismatchContainers(missingIn: string) { + setLoading(true); + const { request, controller } = AxiosGetHelper( + `/api/v1/containers/mismatch?limit=${limit.value}&missingIn=${missingIn}`, + cancelSignal.current + ); + + cancelSignal.current = controller; + request.then(response => { + const mismatchedContainers: MismatchContainersResponse = response?.data; + setData(mismatchedContainers?.containerDiscrepancyInfo ?? []); + setLoading(false); + }).catch(error => { + setLoading(false); + showDataFetchError((error as AxiosError).toString()); + }) + } + + React.useEffect(() => { + //Fetch containers missing in OM by default + fetchMismatchContainers('OM'); + + return (() => { + cancelSignal.current && cancelSignal.current.abort(); + }) + }, [limit.value]); + + return ( + <> +
    +
    + +
    + ) => setSearchTerm(e.target.value) + } + onChange={() => { }} /> +
    +
    + + ) +} + +export default ContainerMismatchTable; \ No newline at end of file diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/components/tables/insights/deletePendingDirsTable.tsx b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/components/tables/insights/deletePendingDirsTable.tsx new file mode 100644 index 00000000000..f0c6fc8161e --- /dev/null +++ b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/components/tables/insights/deletePendingDirsTable.tsx @@ -0,0 +1,142 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import React from 'react'; +import { AxiosError } from 'axios'; +import Table, { + ColumnsType, + TablePaginationConfig +} from 'antd/es/table'; +import { ValueType } from 'react-select'; + +import Search from '@/v2/components/search/search'; +import SingleSelect, { Option } from '@/v2/components/select/singleSelect'; +import { AxiosGetHelper } from '@/utils/axiosRequestHelper'; +import { byteToSize, showDataFetchError } from '@/utils/common'; +import { getFormattedTime } from '@/v2/utils/momentUtils'; +import { useDebounce } from '@/v2/hooks/debounce.hook'; +import { LIMIT_OPTIONS } from '@/v2/constants/limit.constants'; + +import { DeletedDirInfo } from '@/v2/types/insights.types'; + +//-----Types------ +type DeletePendingDirTableProps = { + paginationConfig: TablePaginationConfig + limit: Option; + handleLimitChange: (arg0: ValueType) => void; +} + +//-----Constants------ +const COLUMNS: ColumnsType = [{ + title: 'Directory Name', + dataIndex: 'key', + key: 'key' +}, +{ + title: 'In state since', + dataIndex: 'inStateSince', + key: 'inStateSince', + render: (inStateSince: number) => { + return getFormattedTime(inStateSince, 'll LTS'); + } +}, +{ + title: 'Path', + dataIndex: 'path', + key: 'path' +}, +{ + title: 'Size', + dataIndex: 'size', + key: 'size', + render: (dataSize: number) => byteToSize(dataSize, 1) +}]; + +//-----Components------ +const DeletePendingDirTable: React.FC = ({ + limit, + paginationConfig, + handleLimitChange +}) => { + + const [loading, setLoading] = React.useState(false); + const [data, setData] = React.useState(); + const [searchTerm, setSearchTerm] = React.useState(''); + + const cancelSignal = React.useRef(); + const debouncedSearch = useDebounce(searchTerm, 300); + + function filterData(data: DeletedDirInfo[] | undefined) { + return data?.filter( + (data: DeletedDirInfo) => data.key.includes(debouncedSearch) + ); + } + + function loadData() { + setLoading(true); + + const { request, controller } = AxiosGetHelper( + `/api/v1/keys/deletePending/dirs?limit=${limit.value}`, + cancelSignal.current + ); + cancelSignal.current = controller; + + request.then(response => { + setData(response?.data?.deletedDirInfo ?? []); + setLoading(false); + }).catch(error => { + setLoading(false); + showDataFetchError((error as AxiosError).toString()); + }); + } + + React.useEffect(() => { + loadData(); + + return (() => cancelSignal.current && cancelSignal.current.abort()); + }, [limit.value]); + + return (<> +
    +
    + +
    + ) => setSearchTerm(e.target.value) + } + onChange={() => { }} /> +
    +
    + ) +} + +export default DeletePendingDirTable; \ No newline at end of file diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/components/tables/insights/deletePendingKeysTable.tsx b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/components/tables/insights/deletePendingKeysTable.tsx new file mode 100644 index 00000000000..65ada495641 --- /dev/null +++ b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/components/tables/insights/deletePendingKeysTable.tsx @@ -0,0 +1,194 @@ +/* +* Licensed to the Apache Software Foundation (ASF) under one +* or more contributor license agreements. See the NOTICE file +* distributed with this work for additional information +* regarding copyright ownership. The ASF licenses this file +* to you under the Apache License, Version 2.0 (the +* "License"); you may not use this file except in compliance +* with the License. You may obtain a copy of the License at +* +* http://www.apache.org/licenses/LICENSE-2.0 +* +* Unless required by applicable law or agreed to in writing, software +* distributed under the License is distributed on an "AS IS" BASIS, +* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +* See the License for the specific language governing permissions and +* limitations under the License. +*/ + +import React from 'react'; +import { AxiosError } from 'axios'; +import Table, { + ColumnsType, + TablePaginationConfig +} from 'antd/es/table'; +import { ValueType } from 'react-select'; + +import Search from '@/v2/components/search/search'; +import SingleSelect, { Option } from '@/v2/components/select/singleSelect'; +import ExpandedPendingKeysTable from '@/v2/components/tables/insights/expandedPendingKeysTable'; +import { AxiosGetHelper } from '@/utils/axiosRequestHelper'; +import { byteToSize, showDataFetchError } from '@/utils/common'; +import { useDebounce } from '@/v2/hooks/debounce.hook'; +import { LIMIT_OPTIONS } from '@/v2/constants/limit.constants'; + +import { + DeletePendingKey, + DeletePendingKeysResponse +} from '@/v2/types/insights.types'; + +//-----Types------ +type DeletePendingKeysTableProps = { + paginationConfig: TablePaginationConfig + limit: Option; + handleLimitChange: (arg0: ValueType) => void; +} + +type DeletePendingKeysColumns = { + fileName: string; + keyName: string; + dataSize: number; + keyCount: number; +} + +type ExpandedDeletePendingKeys = { + omKeyInfoList: DeletePendingKey[] +} + +//------Constants------ +const COLUMNS: ColumnsType = [ + { + title: 'Key Name', + dataIndex: 'fileName', + key: 'fileName' + }, + { + title: 'Path', + dataIndex: 'keyName', + key: 'keyName', + }, + { + title: 'Total Data Size', + dataIndex: 'dataSize', + key: 'dataSize', + render: (dataSize: number) => byteToSize(dataSize, 1) + }, + { + title: 'Total Key Count', + dataIndex: 'keyCount', + key: 'keyCount', + } +]; + +let expandedDeletePendingKeys: ExpandedDeletePendingKeys[] = []; + +//-----Components------ +const DeletePendingKeysTable: React.FC = ({ + paginationConfig, + limit, + handleLimitChange +}) => { + const [loading, setLoading] = React.useState(false); + const [data, setData] = React.useState(); + const [searchTerm, setSearchTerm] = React.useState(''); + + const cancelSignal = React.useRef(); + const debouncedSearch = useDebounce(searchTerm, 300); + + function filterData(data: DeletePendingKeysColumns[] | undefined) { + return data?.filter( + (data: DeletePendingKeysColumns) => data.keyName.includes(debouncedSearch) + ); + } + + function expandedRowRender(record: DeletePendingKeysColumns) { + const filteredData = expandedDeletePendingKeys?.flatMap((info) => ( + info.omKeyInfoList?.filter((key) => key.keyName === record.keyName) + )); + return ( + + ) + } + + function fetchDeletePendingKeys() { + setLoading(true); + const { request, controller } = AxiosGetHelper( + `/api/v1/keys/deletePending?limit=${limit.value}`, + cancelSignal.current + ); + cancelSignal.current = controller; + + request.then(response => { + const deletePendingKeys: DeletePendingKeysResponse = response?.data; + let deletedKeyData = []; + // Sum up the data size and organize related key information + deletedKeyData = deletePendingKeys?.deletedKeyInfo?.flatMap((keyInfo) => { + expandedDeletePendingKeys.push(keyInfo); + let count = 0; + let item: DeletePendingKey = keyInfo.omKeyInfoList?.reduce((obj, curr) => { + count += 1; + return { ...curr, dataSize: obj.dataSize + curr.dataSize }; + }, { ...keyInfo.omKeyInfoList[0], dataSize: 0 }); + + return { + dataSize: item.dataSize, + fileName: item.fileName, + keyName: item.keyName, + path: item.path, + keyCount: count + } + }); + setData(deletedKeyData); + setLoading(false); + }).catch(error => { + setLoading(false); + showDataFetchError((error as AxiosError).toString()); + }) + } + + React.useEffect(() => { + fetchDeletePendingKeys(); + expandedDeletePendingKeys = []; + + return (() => { + cancelSignal.current && cancelSignal.current.abort(); + }) + }, [limit.value]); + + return ( + <> +
    +
    + +
    + ) => setSearchTerm(e.target.value) + } + onChange={() => { }} /> +
    +
    + + ) +} + +export default DeletePendingKeysTable; \ No newline at end of file diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/components/tables/insights/deletedContainerKeysTable.tsx b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/components/tables/insights/deletedContainerKeysTable.tsx new file mode 100644 index 00000000000..9aaf62a63d6 --- /dev/null +++ b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/components/tables/insights/deletedContainerKeysTable.tsx @@ -0,0 +1,163 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import React from 'react'; +import { AxiosError } from 'axios'; +import Table, { + ColumnsType, + TablePaginationConfig +} from 'antd/es/table'; +import { ValueType } from 'react-select'; + +import Search from '@/v2/components/search/search'; +import SingleSelect, { Option } from '@/v2/components/select/singleSelect'; +import { AxiosGetHelper } from '@/utils/axiosRequestHelper'; +import { showDataFetchError } from '@/utils/common'; +import { useDebounce } from '@/v2/hooks/debounce.hook'; +import { LIMIT_OPTIONS } from '@/v2/constants/limit.constants'; + +import { + Container, + DeletedContainerKeysResponse, + Pipelines +} from '@/v2/types/insights.types'; + +//------Types------- +type DeletedContainerKeysTableProps = { + paginationConfig: TablePaginationConfig; + limit: Option; + handleLimitChange: (arg0: ValueType) => void; + onRowExpand: (arg0: boolean, arg1: any) => void; + expandedRowRender: (arg0: any) => JSX.Element; +} + +//------Constants------ +const COLUMNS: ColumnsType = [ + { + title: 'Container ID', + dataIndex: 'containerId', + key: 'containerId', + width: '20%' + }, + { + title: 'Count Of Keys', + dataIndex: 'numberOfKeys', + key: 'numberOfKeys', + sorter: (a: Container, b: Container) => a.numberOfKeys - b.numberOfKeys + }, + { + title: 'Pipelines', + dataIndex: 'pipelines', + key: 'pipelines', + render: (pipelines: Pipelines[]) => ( +
    + {pipelines && pipelines.map((pipeline: any) => ( +
    + {pipeline.id.id} +
    + ))} +
    + ) + } +]; + +//-----Components------ +const DeletedContainerKeysTable: React.FC = ({ + limit, + paginationConfig, + handleLimitChange, + onRowExpand, + expandedRowRender +}) => { + + const [loading, setLoading] = React.useState(false); + const [data, setData] = React.useState(); + const [searchTerm, setSearchTerm] = React.useState(''); + + const cancelSignal = React.useRef(); + const debouncedSearch = useDebounce(searchTerm, 300); + + function filterData(data: Container[] | undefined) { + return data?.filter( + (data: Container) => data.containerId.toString().includes(debouncedSearch) + ); + } + + function fetchDeletedKeys() { + const { request, controller } = AxiosGetHelper( + `/api/v1/containers/mismatch/deleted?limit=${limit.value}`, + cancelSignal.current + ) + cancelSignal.current = controller; + + request.then(response => { + setLoading(true); + const deletedContainerKeys: DeletedContainerKeysResponse = response?.data; + setData(deletedContainerKeys?.containers ?? []); + setLoading(false); + }).catch(error => { + setLoading(false); + showDataFetchError((error as AxiosError).toString()); + }); + } + + React.useEffect(() => { + fetchDeletedKeys(); + + return (() => { + cancelSignal.current && cancelSignal.current.abort(); + }) + }, [limit.value]); + + + return ( + <> +
    +
    + +
    + ) => setSearchTerm(e.target.value) + } + onChange={() => { }} /> +
    +
    + + ) +} + +export default DeletedContainerKeysTable; \ No newline at end of file diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/components/tables/insights/expandedKeyTable.tsx b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/components/tables/insights/expandedKeyTable.tsx new file mode 100644 index 00000000000..8b54937e473 --- /dev/null +++ b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/components/tables/insights/expandedKeyTable.tsx @@ -0,0 +1,93 @@ +/* +* Licensed to the Apache Software Foundation (ASF) under one +* or more contributor license agreements. See the NOTICE file +* distributed with this work for additional information +* regarding copyright ownership. The ASF licenses this file +* to you under the Apache License, Version 2.0 (the +* "License"); you may not use this file except in compliance +* with the License. You may obtain a copy of the License at +* +* http://www.apache.org/licenses/LICENSE-2.0 +* +* Unless required by applicable law or agreed to in writing, software +* distributed under the License is distributed on an "AS IS" BASIS, +* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +* See the License for the specific language governing permissions and +* limitations under the License. +*/ + +import React from 'react'; +import moment from 'moment'; +import filesize from 'filesize'; +import Table, { + ColumnsType, + TablePaginationConfig +} from 'antd/es/table'; + +import { MismatchKeys } from '@/v2/types/insights.types'; + + +const size = filesize.partial({ standard: 'iec' }); + +//-----Types------ +type ExpandedKeyTableProps = { + loading: boolean; + data: MismatchKeys[]; + paginationConfig: TablePaginationConfig; +} + +//-----Constants----- +const COLUMNS: ColumnsType = [ + { + title: 'Volume', + dataIndex: 'Volume', + key: 'Volume' + }, + { + title: 'Bucket', + dataIndex: 'Bucket', + key: 'Bucket' + }, + { + title: 'Key', + dataIndex: 'Key', + key: 'Key' + }, + { + title: 'Size', + dataIndex: 'DataSize', + key: 'DataSize', + render: (dataSize: number) =>
    {size(dataSize)}
    + }, + { + title: 'Date Created', + dataIndex: 'CreationTime', + key: 'CreationTime', + render: (date: string) => moment(date).format('lll') + }, + { + title: 'Date Modified', + dataIndex: 'ModificationTime', + key: 'ModificationTime', + render: (date: string) => moment(date).format('lll') + } +]; + +//-----Components------ +const ExpandedKeyTable: React.FC = ({ + loading, + data, + paginationConfig +}) => { + return ( +
    + ) +} + +export default ExpandedKeyTable; \ No newline at end of file diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/components/tables/insights/expandedPendingKeysTable.tsx b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/components/tables/insights/expandedPendingKeysTable.tsx new file mode 100644 index 00000000000..accb390303b --- /dev/null +++ b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/components/tables/insights/expandedPendingKeysTable.tsx @@ -0,0 +1,81 @@ +/* +* Licensed to the Apache Software Foundation (ASF) under one +* or more contributor license agreements. See the NOTICE file +* distributed with this work for additional information +* regarding copyright ownership. The ASF licenses this file +* to you under the Apache License, Version 2.0 (the +* "License"); you may not use this file except in compliance +* with the License. You may obtain a copy of the License at +* +* http://www.apache.org/licenses/LICENSE-2.0 +* +* Unless required by applicable law or agreed to in writing, software +* distributed under the License is distributed on an "AS IS" BASIS, +* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +* See the License for the specific language governing permissions and +* limitations under the License. +*/ + +import React from 'react'; +import Table, { + ColumnsType, + TablePaginationConfig +} from 'antd/es/table'; + +import { byteToSize } from '@/utils/common'; +import { getFormattedTime } from '@/v2/utils/momentUtils'; + +import { DeletePendingKey } from '@/v2/types/insights.types'; + +//--------Types-------- +type ExpandedPendingKeysTableProps = { + data: DeletePendingKey[]; + paginationConfig: TablePaginationConfig; +} + +//--------Constants-------- +const COLUMNS: ColumnsType = [{ + title: 'Data Size', + dataIndex: 'dataSize', + key: 'dataSize', + render: (dataSize: any) => dataSize = dataSize > 0 ? byteToSize(dataSize, 1) : dataSize +}, +{ + title: 'Replicated Data Size', + dataIndex: 'replicatedSize', + key: 'replicatedSize', + render: (replicatedSize: any) => replicatedSize = replicatedSize > 0 ? byteToSize(replicatedSize, 1) : replicatedSize +}, +{ + title: 'Creation Time', + dataIndex: 'creationTime', + key: 'creationTime', + render: (creationTime: number) => { + return getFormattedTime(creationTime, 'll LTS'); + } +}, +{ + title: 'Modification Time', + dataIndex: 'modificationTime', + key: 'modificationTime', + render: (modificationTime: number) => { + return getFormattedTime(modificationTime, 'll LTS'); + } +}] + +//--------Component-------- +const ExpandedPendingKeysTable: React.FC = ({ + data, + paginationConfig +}) => { + return ( +
    + ) +} + +export default ExpandedPendingKeysTable; \ No newline at end of file diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/components/tables/insights/openKeysTable.tsx b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/components/tables/insights/openKeysTable.tsx new file mode 100644 index 00000000000..02c73c77528 --- /dev/null +++ b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/components/tables/insights/openKeysTable.tsx @@ -0,0 +1,213 @@ +/* +* Licensed to the Apache Software Foundation (ASF) under one +* or more contributor license agreements. See the NOTICE file +* distributed with this work for additional information +* regarding copyright ownership. The ASF licenses this file +* to you under the Apache License, Version 2.0 (the +* "License"); you may not use this file except in compliance +* with the License. You may obtain a copy of the License at +* +* http://www.apache.org/licenses/LICENSE-2.0 +* +* Unless required by applicable law or agreed to in writing, software +* distributed under the License is distributed on an "AS IS" BASIS, +* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +* See the License for the specific language governing permissions and +* limitations under the License. +*/ + +import React from 'react'; +import { AxiosError } from 'axios'; +import { + Dropdown, + Menu, + Table +} from 'antd'; +import { + ColumnsType, + TablePaginationConfig +} from 'antd/es/table'; +import { MenuProps } from 'antd/es/menu'; +import { FilterFilled } from '@ant-design/icons'; +import { ValueType } from 'react-select'; + +import Search from '@/v2/components/search/search'; +import SingleSelect, { Option } from '@/v2/components/select/singleSelect'; +import { AxiosGetHelper } from '@/utils/axiosRequestHelper'; +import { byteToSize, showDataFetchError } from '@/utils/common'; +import { getFormattedTime } from '@/v2/utils/momentUtils'; +import { useDebounce } from '@/v2/hooks/debounce.hook'; +import { LIMIT_OPTIONS } from '@/v2/constants/limit.constants'; + +import { OpenKeys, OpenKeysResponse } from '@/v2/types/insights.types'; + + +//--------Types-------- +type OpenKeysTableProps = { + limit: Option; + paginationConfig: TablePaginationConfig; + handleLimitChange: (arg0: ValueType) => void; +} + +//-----Components------ +const OpenKeysTable: React.FC = ({ + limit, + paginationConfig, + handleLimitChange +}) => { + const [loading, setLoading] = React.useState(false); + const [data, setData] = React.useState(); + const [searchTerm, setSearchTerm] = React.useState(''); + + const cancelSignal = React.useRef(); + const debouncedSearch = useDebounce(searchTerm, 300); + + function filterData(data: OpenKeys[] | undefined) { + return data?.filter( + (data: OpenKeys) => data.path.includes(debouncedSearch) + ); + } + + function fetchOpenKeys(isFso: boolean) { + setLoading(true); + + const { request, controller } = AxiosGetHelper( + `/api/v1/keys/open?includeFso=${isFso}&includeNonFso=${!isFso}&limit=${limit.value}`, + cancelSignal.current + ); + cancelSignal.current = controller; + + request.then(response => { + const openKeys: OpenKeysResponse = response?.data ?? { 'fso': [] }; + let allOpenKeys: OpenKeys[]; + if (isFso) { + allOpenKeys = openKeys['fso']?.map((key: OpenKeys) => ({ + ...key, + type: 'FSO' + })) ?? []; + } else { + allOpenKeys = openKeys['nonFSO']?.map((key: OpenKeys) => ({ + ...key, + type: 'Non FSO' + })) ?? []; + } + + setData(allOpenKeys); + setLoading(false); + }).catch(error => { + setLoading(false); + showDataFetchError((error as AxiosError).toString()); + }); + } + + const handleKeyTypeChange: MenuProps['onClick'] = (e) => { + if (e.key === 'fso') { + fetchOpenKeys(true); + } else { + fetchOpenKeys(false); + } + } + + const COLUMNS: ColumnsType = [{ + title: 'Key Name', + dataIndex: 'path', + key: 'path' + }, + { + title: 'Size', + dataIndex: 'size', + key: 'size', + render: (size: any) => size = byteToSize(size, 1) + }, + { + title: 'Path', + dataIndex: 'key', + key: 'key', + width: '270px' + }, + { + title: 'In state since', + dataIndex: 'inStateSince', + key: 'inStateSince', + render: (inStateSince: number) => { + return getFormattedTime(inStateSince, 'll LTS'); + } + }, + { + title: 'Replication Factor', + dataIndex: 'replicationInfo', + key: 'replicationfactor', + render: (replicationInfo: any) => ( +
    + {Object.values(replicationInfo)[0]} +
    + ) + }, + { + title: 'Replication Type', + dataIndex: 'replicationInfo', + key: 'replicationtype', + render: (replicationInfo: any) => ( +
    + { +
    + {Object.values(replicationInfo)[2]} +
    + } +
    + ) + }, { + title: <> + + FSO + Non-FSO + + }> + + + , + dataIndex: 'type', + key: 'type', + render: (type: string) =>
    {type}
    + }]; + + React.useEffect(() => { + // Fetch FSO open keys by default + fetchOpenKeys(true); + + return (() => cancelSignal.current && cancelSignal.current.abort()); + }, [limit.value]); + + return ( + <> +
    +
    + +
    + ) => setSearchTerm(e.target.value) + } + onChange={() => { }} /> +
    +
    + + ); +} + +export default OpenKeysTable; \ No newline at end of file diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/components/tables/volumesTable.tsx b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/components/tables/volumesTable.tsx index 4de0d713fce..ecfbf730a2a 100644 --- a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/components/tables/volumesTable.tsx +++ b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/components/tables/volumesTable.tsx @@ -96,7 +96,6 @@ const VolumesTable: React.FC = ({ React.useEffect(() => { // On table mount add the actions column - console.log("Adding new column"); const actionsColumn: ColumnType = { title: 'Actions', key: 'actions', diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/constants/breadcrumbs.constants.tsx b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/constants/breadcrumbs.constants.tsx new file mode 100644 index 00000000000..807a68cc8d2 --- /dev/null +++ b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/constants/breadcrumbs.constants.tsx @@ -0,0 +1,34 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +type BreadcrumbNameMap = { + [path: string]: string; +} + +export const breadcrumbNameMap: BreadcrumbNameMap = { + '/Overview': 'Overview', + '/Volumes': 'Volumes', + '/Buckets': 'Buckets', + '/Datanodes': 'Datanodes', + '/Pipelines': 'Pipelines', + '/Containers': 'Containers', + '/Insights': 'Insights', + '/DiskUsage': 'Disk Usage', + '/Heatmap': 'Heatmap', + '/Om': 'OM DB Insights' +}; diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/constants/heatmap.constants.tsx b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/constants/heatmap.constants.tsx new file mode 100644 index 00000000000..63a8476648f --- /dev/null +++ b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/constants/heatmap.constants.tsx @@ -0,0 +1,47 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +export const colourScheme = { + amberAlert: [ + '#FFCF88', + '#FFCA87', + '#FFC586', + '#FFC085', + '#FFBB83', + '#FFB682', + '#FFB181', + '#FFA676', + '#FF9F6F', + '#FF9869', + '#FF9262', + '#FF8B5B', + '#FF8455', + '#FF7D4E', + '#FF8282', + '#FF7776', + '#FF6D6A', + '#FF625F', + '#FF5753', + '#FF4D47', + '#FF423B' + ] +}; + +export const TIME_PERIODS: string[] = ['24H', '7D', '90D'] +export const ENTITY_TYPES: string[] = ['key', 'bucket', 'volume'] +export const ROOT_PATH = '/' diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/constants/limit.constants.tsx b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/constants/limit.constants.tsx new file mode 100644 index 00000000000..b76c51c8960 --- /dev/null +++ b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/constants/limit.constants.tsx @@ -0,0 +1,37 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +import { Option } from '@/v2/components/select/singleSelect'; + +export const LIMIT_OPTIONS: Option[] = [ + { + label: '1000', + value: '1000' + }, + { + label: '5000', + value: '5000' + }, + { + label: '10000', + value: '10000' + }, + { + label: '20000', + value: '20000' + } +]; diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/pages/buckets/buckets.tsx b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/pages/buckets/buckets.tsx index 1e2de307b17..1c039f42709 100644 --- a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/pages/buckets/buckets.tsx +++ b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/pages/buckets/buckets.tsx @@ -26,10 +26,12 @@ import AclPanel from '@/v2/components/aclDrawer/aclDrawer'; import Search from '@/v2/components/search/search'; import MultiSelect from '@/v2/components/select/multiSelect'; import SingleSelect, { Option } from '@/v2/components/select/singleSelect'; +import BucketsTable, { COLUMNS } from '@/v2/components/tables/bucketsTable'; import { AutoReloadHelper } from '@/utils/autoReloadHelper'; import { AxiosGetHelper, cancelRequests } from "@/utils/axiosRequestHelper"; import { showDataFetchError } from '@/utils/common'; +import { LIMIT_OPTIONS } from '@/v2/constants/limit.constants'; import { useDebounce } from '@/v2/hooks/debounce.hook'; import { @@ -39,27 +41,6 @@ import { } from '@/v2/types/bucket.types'; import './buckets.less'; -import BucketsTable, { COLUMNS } from '@/v2/components/tables/bucketsTable'; - - -const LIMIT_OPTIONS: Option[] = [ - { - label: '1000', - value: '1000' - }, - { - label: '5000', - value: '5000' - }, - { - label: '10000', - value: '10000' - }, - { - label: '20000', - value: '20000' - } -] const SearchableColumnOpts = [{ label: 'Bucket', @@ -282,7 +263,7 @@ const Buckets: React.FC<{}> = () => { onReload={loadData} /> -
    +
    diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/pages/containers/containers.less b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/pages/containers/containers.less new file mode 100644 index 00000000000..f6328ccee64 --- /dev/null +++ b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/pages/containers/containers.less @@ -0,0 +1,50 @@ +/* +* Licensed to the Apache Software Foundation (ASF) under one +* or more contributor license agreements. See the NOTICE file +* distributed with this work for additional information +* regarding copyright ownership. The ASF licenses this file +* to you under the Apache License, Version 2.0 (the +* "License"); you may not use this file except in compliance +* with the License. You may obtain a copy of the License at +* +* http://www.apache.org/licenses/LICENSE-2.0 +* +* Unless required by applicable law or agreed to in writing, software +* distributed under the License is distributed on an "AS IS" BASIS, +* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +* See the License for the specific language governing permissions and +* limitations under the License. +*/ + +.content-div { + min-height: unset; + + .table-header-section { + display: flex; + justify-content: space-between; + align-items: center; + + .table-filter-section { + font-size: 14px; + font-weight: normal; + display: flex; + column-gap: 8px; + padding: 16px 8px; + align-items: center; + } + } +} + +.highlight-content { + color: #989898; + + .highlight-content-value { + color: #000000; + font-weight: 400; + font-size: 30px; + } +} + +.datanode-container-v2 { + padding: 6px 0px; +} \ No newline at end of file diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/pages/containers/containers.tsx b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/pages/containers/containers.tsx new file mode 100644 index 00000000000..78f6424c6e7 --- /dev/null +++ b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/pages/containers/containers.tsx @@ -0,0 +1,283 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import React, { useRef, useState } from "react"; +import moment from "moment"; +import { AxiosError } from "axios"; +import { Card, Row, Tabs } from "antd"; +import { ValueType } from "react-select/src/types"; + +import Search from "@/v2/components/search/search"; +import MultiSelect, { Option } from "@/v2/components/select/multiSelect"; +import ContainerTable, { COLUMNS } from "@/v2/components/tables/containersTable"; +import AutoReloadPanel from "@/components/autoReloadPanel/autoReloadPanel"; +import { showDataFetchError } from "@/utils/common"; +import { AutoReloadHelper } from "@/utils/autoReloadHelper"; +import { AxiosGetHelper, cancelRequests } from "@/utils/axiosRequestHelper"; +import { useDebounce } from "@/v2/hooks/debounce.hook"; + +import { + Container, + ContainerState, + ExpandedRow +} from "@/v2/types/container.types"; + +import './containers.less'; + + +const SearchableColumnOpts = [{ + label: 'Container ID', + value: 'containerID' +}, { + label: 'Pipeline ID', + value: 'pipelineID' +}] + +const defaultColumns = COLUMNS.map(column => ({ + label: column.title as string, + value: column.key as string +})); + +const Containers: React.FC<{}> = () => { + + const cancelSignal = useRef(); + + const [state, setState] = useState({ + lastUpdated: 0, + columnOptions: defaultColumns, + missingContainerData: [], + underReplicatedContainerData: [], + overReplicatedContainerData: [], + misReplicatedContainerData: [], + }); + const [expandedRow, setExpandedRow] = useState({}); + + const [loading, setLoading] = useState(false); + const [selectedColumns, setSelectedColumns] = useState(defaultColumns); + const [searchTerm, setSearchTerm] = useState(''); + const [selectedTab, setSelectedTab] = useState('1'); + const [searchColumn, setSearchColumn] = useState<'containerID' | 'pipelineID'>('containerID'); + + const debouncedSearch = useDebounce(searchTerm, 300); + + function loadData() { + setLoading(true); + + const { request, controller } = AxiosGetHelper( + '/api/v1/containers/unhealthy', + cancelSignal.current + ); + + cancelSignal.current = controller; + + request.then(response => { + const containers: Container[] = response.data.containers; + + const missingContainerData: Container[] = containers?.filter( + container => container.containerState === 'MISSING' + ) ?? []; + const underReplicatedContainerData: Container[] = containers?.filter( + container => container.containerState === 'UNDER_REPLICATED' + ) ?? []; + const overReplicatedContainerData: Container[] = containers?.filter( + container => container.containerState === 'OVER_REPLICATED' + ) ?? []; + const misReplicatedContainerData: Container[] = containers?.filter( + container => container.containerState === 'MIS_REPLICATED' + ) ?? []; + + setState({ + ...state, + missingContainerData: missingContainerData, + underReplicatedContainerData: underReplicatedContainerData, + overReplicatedContainerData: overReplicatedContainerData, + misReplicatedContainerData: misReplicatedContainerData, + lastUpdated: Number(moment()) + }); + setLoading(false) + }).catch(error => { + setLoading(false); + showDataFetchError((error as AxiosError).toString()); + }); + } + + function handleColumnChange(selected: ValueType) { + setSelectedColumns(selected as Option[]); + } + + const autoReloadHelper: AutoReloadHelper = new AutoReloadHelper(loadData); + + React.useEffect(() => { + autoReloadHelper.startPolling(); + loadData(); + + return (() => { + autoReloadHelper.stopPolling(); + cancelRequests([cancelSignal.current!]) + }) + }, []); + + const { + lastUpdated, columnOptions, + missingContainerData, underReplicatedContainerData, + overReplicatedContainerData, misReplicatedContainerData + } = state; + + // Mapping the data to the Tab keys for enabling/disabling search + const dataToTabKeyMap: Record = { + 1: missingContainerData, + 2: underReplicatedContainerData, + 3: overReplicatedContainerData, + 4: misReplicatedContainerData + } + + const highlightData = ( +
    +
    + Missing
    + {missingContainerData?.length ?? 'N/A'} +
    +
    + Under-Replicated
    + {underReplicatedContainerData?.length ?? 'N/A'} +
    +
    + Over-Replicated
    + {overReplicatedContainerData?.length ?? 'N/A'} +
    +
    + Mis-Replicated
    + {misReplicatedContainerData?.length ?? 'N/A'} +
    +
    + ) + + return ( + <> +
    + Containers + +
    +
    +
    + + + {highlightData} + + +
    +
    +
    +
    + { }} + columnLength={columnOptions.length} /> +
    + ) => setSearchTerm(e.target.value) + } + onChange={(value) => { + setSearchTerm(''); + setSearchColumn(value as 'containerID' | 'pipelineID'); + }} /> +
    + setSelectedTab(activeKey)}> + + + + + + + + + + + + + +
    +
    + + ); +} + +export default Containers; \ No newline at end of file diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/pages/datanodes/datanodes.tsx b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/pages/datanodes/datanodes.tsx index 13022dc05e0..fe22d08dafd 100644 --- a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/pages/datanodes/datanodes.tsx +++ b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/pages/datanodes/datanodes.tsx @@ -234,7 +234,7 @@ const Datanodes: React.FC<{}> = () => { togglePolling={autoReloadHelper.handleAutoReloadToggle} onReload={loadData} />
    -
    +
    diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/pages/diskUsage/diskUsage.tsx b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/pages/diskUsage/diskUsage.tsx index 1e92780619b..57d7a612c34 100644 --- a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/pages/diskUsage/diskUsage.tsx +++ b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/pages/diskUsage/diskUsage.tsx @@ -27,7 +27,7 @@ import { import { ValueType } from 'react-select'; import DUMetadata from '@/v2/components/duMetadata/duMetadata'; -import DUPieChart from '@/v2/components/duPieChart/duPieChart'; +import DUPieChart from '@/v2/components/plots/duPieChart'; import SingleSelect, { Option } from '@/v2/components/select/singleSelect'; import DUBreadcrumbNav from '@/v2/components/duBreadcrumbNav/duBreadcrumbNav'; import { showDataFetchError } from '@/utils/common'; @@ -103,7 +103,7 @@ const DiskUsage: React.FC<{}> = () => {
    Disk Usage
    -
    +
    = () => { + + const [state, setState] = useState({ + heatmapResponse: { + label: '', + path: '', + children: [], + size: 0, + maxAccessCount: 0, + minAccessCount: 0 + }, + entityType: CONSTANTS.ENTITY_TYPES[0], + date: CONSTANTS.TIME_PERIODS[0] + }); + + const [inputPathState, setInputPathState] = useState({ + inputPath: CONSTANTS.ROOT_PATH, + isInputPathValid: undefined, + helpMessage: '' + }); + + const [isLoading, setLoading] = useState(false); + const [treeEndpointFailed, setTreeEndpointFailed] = useState(false); + + const location = useLocation(); + const cancelSignal = useRef(); + const cancelDisabledFeatureSignal = useRef(); + + const [isHeatmapEnabled, setIsHeatmapEnabled] = useState(location?.state?.isHeatmapEnabled); + + + function handleChange(e: ChangeEvent) { + const value = e.target.value; + // Only allow letters, numbers,underscores and forward slashes and hyphen + const regex = /^[a-zA-Z0-9_/-]*$/; + + let inputValid = undefined; + let helpMessage = ''; + if (!regex.test(value)) { + helpMessage = 'Please enter valid path'; + inputValid = 'error'; + } + setInputPathState({ + inputPath: value, + isInputPathValid: inputValid as InputPathValidTypes, + helpMessage: helpMessage + }); + } + + function handleSubmit() { + updateHeatmap(inputPathState.inputPath, state.entityType, state.date); + } + + const normalize = (min: number, max: number, size: number) => { + // Since there can be a huge difference between the largest entity size + // and the smallest entity size, it might cause some blocks to render smaller + // we are normalizing the size to ensure all entities are visible + //Normaized Size using Deviation and mid Point + const mean = (max + min) / 2; + const highMean = (max + mean) / 2; + const lowMean1 = (min + mean) / 2; + const lowMean2 = (lowMean1 + min) / 2; + + if (size > highMean) { + const newsize = highMean + (size * 0.1); + return (newsize); + } + if (size < lowMean2) { + const diff = (lowMean2 - size) / 2; + const newSize = lowMean2 - diff; + return (newSize); + } + + return size; + }; + + const updateSize = (obj: HeatmapResponse | HeatmapChild) => { + //Normalize Size so other blocks also get visualized if size is large in bytes minimize and if size is too small make it big + // it will only apply on leaf level as checking color property + if (obj.hasOwnProperty('size') && obj.hasOwnProperty('color')) { + + // hide block at key,volume,bucket level if size accessCount and maxAccessCount are zero apply normalized size only for leaf level + if ((obj as HeatmapChild)?.size === 0 && (obj as HeatmapChild)?.accessCount === 0) { + obj['normalizedSize'] = 0; + } else if ((obj as HeatmapResponse)?.size === 0 && (obj as HeatmapResponse)?.maxAccessCount === 0) { + obj['normalizedSize'] = 0; + } + else if (obj?.size === 0 && ((obj as HeatmapChild)?.accessCount >= 0 || (obj as HeatmapResponse).maxAccessCount >= 0)) { + obj['normalizedSize'] = 1; + obj.size = 0; + } + else { + const newSize = normalize(minSize, maxSize, obj.size); + obj['normalizedSize'] = newSize; + } + } + + if (obj.hasOwnProperty('children')) { + (obj as HeatmapResponse)?.children.forEach(child => updateSize(child)); + } + return obj as HeatmapResponse; + }; + + const updateHeatmap = (path: string, entityType: string, date: string | number) => { + // Only perform requests if the heatmap is enabled + if (isHeatmapEnabled) { + setLoading(true); + // We want to ensure these are not empty as they will be passed as path params + if (date && path && entityType) { + const { request, controller } = AxiosGetHelper( + `/api/v1/heatmap/readaccess?startDate=${date}&path=${path}&entityType=${entityType}`, + cancelSignal.current + ); + cancelSignal.current = controller; + + request.then(response => { + if (response?.status === 200) { + minSize = response.data.minAccessCount; + maxSize = response.data.maxAccessCount; + const heatmapResponse: HeatmapResponse = updateSize(response.data); + setLoading(false); + setState(prevState => ({ + ...prevState, + heatmapResponse: heatmapResponse + })); + } else { + const error = new Error((response.status).toString()) as IResponseError; + error.status = response.status; + error.message = `Failed to fetch Heatmap Response with status ${error.status}` + throw error; + } + }).catch(error => { + setLoading(false); + setInputPathState(prevState => ({ + ...prevState, + inputPath: CONSTANTS.ROOT_PATH + })); + setTreeEndpointFailed(true); + if (error.response.status !== 404) { + showDataFetchError(error.message.toString()); + } + }); + } else { + setLoading(false); + } + + } + } + + const updateHeatmapParent = (path: string) => { + setInputPathState(prevState => ({ + ...prevState, + inputPath: path + })); + } + + function isDateDisabled(current: Moment) { + return current > moment() || current < moment().subtract(90, 'day'); + } + + function getIsHeatmapEnabled() { + const disabledfeaturesEndpoint = `/api/v1/features/disabledFeatures`; + const { request, controller } = AxiosGetHelper( + disabledfeaturesEndpoint, + cancelDisabledFeatureSignal.current + ) + cancelDisabledFeatureSignal.current = controller; + request.then(response => { + setIsHeatmapEnabled(!response?.data?.includes('HEATMAP')); + }).catch(error => { + showDataFetchError((error as Error).toString()); + }); + } + + React.useEffect(() => { + // We do not know if heatmap is enabled or not, so set it + if (isHeatmapEnabled === undefined) { + getIsHeatmapEnabled(); + } + updateHeatmap(inputPathState.inputPath, state.entityType, state.date); + + return (() => { + cancelSignal.current && cancelSignal.current.abort(); + }) + }, [isHeatmapEnabled, state.entityType, state.date]); + + const handleDatePickerChange = (date: moment.MomentInput) => { + setState(prevState => ({ + ...prevState, + date: moment(date).unix() + })); + }; + + const handleMenuChange: MenuProps["onClick"] = (e) => { + if (CONSTANTS.ENTITY_TYPES.includes(e.key as string)) { + minSize = Infinity; + maxSize = 0; + setState(prevState => ({ + ...prevState, + entityType: e.key as string, + })); + } + }; + + const handleCalendarChange: MenuProps["onClick"] = (e) => { + if (CONSTANTS.TIME_PERIODS.includes(e.key as string)) { + setState(prevState => ({ + ...prevState, + date: e.key + })); + } + }; + + const { date, entityType, heatmapResponse } = state; + const { inputPath, helpMessage, isInputPathValid } = inputPathState; + + const menuCalendar = ( + + + 24 Hour + + + 7 Days + + + 90 Days + + + + { e.stopPropagation() }} + disabledDate={isDateDisabled} /> + + + + ); + + const entityTypeMenu = ( + + + Volume + + + Bucket + + + Key + + + ); + + function getErrorContent() { + if (!isHeatmapEnabled) { + return + } + + if (treeEndpointFailed) { + return + } + } + + return ( + <> +
    + Heatmap +
    +
    + { + (!isHeatmapEnabled || treeEndpointFailed) + ? getErrorContent() + :
    +
    +
    +
    +

    Path

    + + + +
    +
    + + + +
    +
    + + + +
    +
    +
    +
    +
    + Less Accessed +
    +
    +
    + Moderate Accessed +
    +
    +
    + Most Accessed +
    +
    +
    + {isLoading + ? + : (Object.keys(heatmapResponse).length > 0 && (heatmapResponse.label !== null || heatmapResponse.path !== null)) + ?
    + +
    + : + } +
    + } +
    + + ); +} + +export default Heatmap; \ No newline at end of file diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/pages/insights/insights.less b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/pages/insights/insights.less new file mode 100644 index 00000000000..dfc0ef31438 --- /dev/null +++ b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/pages/insights/insights.less @@ -0,0 +1,36 @@ +/* +* Licensed to the Apache Software Foundation (ASF) under one +* or more contributor license agreements. See the NOTICE file +* distributed with this work for additional information +* regarding copyright ownership. The ASF licenses this file +* to you under the Apache License, Version 2.0 (the +* "License"); you may not use this file except in compliance +* with the License. You may obtain a copy of the License at +* +* http://www.apache.org/licenses/LICENSE-2.0 +* +* Unless required by applicable law or agreed to in writing, software +* distributed under the License is distributed on an "AS IS" BASIS, +* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +* See the License for the specific language governing permissions and +* limitations under the License. +*/ + +.content-div { + min-height: unset; + + .table-header-section { + display: flex; + justify-content: space-between; + align-items: center; + + .table-filter-section { + font-size: 14px; + font-weight: normal; + display: flex; + column-gap: 8px; + padding: 16px 8px; + align-items: center; + } + } +} diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/pages/insights/insights.tsx b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/pages/insights/insights.tsx new file mode 100644 index 00000000000..f2a2c3e3f7d --- /dev/null +++ b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/pages/insights/insights.tsx @@ -0,0 +1,196 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import React, { useState } from 'react'; +import axios, { + CanceledError, + AxiosError +} from 'axios'; +import { Row, Col, Card, Result } from 'antd'; + +import { showDataFetchError } from '@/utils/common'; +import { PromiseAllSettledGetHelper } from '@/utils/axiosRequestHelper'; + +import { Option } from '@/v2/components/select/multiSelect'; +import FileSizeDistribution from '@/v2/components/plots/insightsFilePlot'; +import ContainerSizeDistribution from '@/v2/components/plots/insightsContainerPlot'; + +import { + FileCountResponse, + InsightsState, + PlotResponse, +} from '@/v2/types/insights.types'; + +const Insights: React.FC<{}> = () => { + + const [loading, setLoading] = useState(false); + const [state, setState] = useState({ + volumeBucketMap: new Map>(), + volumeOptions: [], + fileCountError: undefined, + containerSizeError: undefined + }); + const [plotResponse, setPlotResponse] = useState({ + fileCountResponse: [{ + volume: '', + bucket: '', + fileSize: 0, + count: 0 + }], + containerCountResponse: [{ + containerSize: 0, + count: 0 + }] + }); + + const cancelInsightSignal = React.useRef(); + + function loadData() { + setLoading(true); + const { requests, controller } = PromiseAllSettledGetHelper([ + '/api/v1/utilization/fileCount', + '/api/v1/utilization/containerCount' + ], cancelInsightSignal.current); + + cancelInsightSignal.current = controller; + requests.then(axios.spread(( + fileCountResponse: Awaited>, + containerCountResponse: Awaited> + ) => { + let fileAPIError; + let containerAPIError; + let responseError = [ + fileCountResponse, + containerCountResponse + ].filter((resp) => resp.status === 'rejected'); + + if (responseError.length !== 0) { + responseError.forEach((err) => { + if (err.reason.toString().includes('CancelledError')) { + throw new CanceledError('canceled', 'ERR_CANCELED'); + } else { + if (err.reason.config.url.includes("fileCount")) { + fileAPIError = err.reason.toString(); + } else { + containerAPIError = err.reason.toString(); + } + } + }); + } + + // Construct volume -> bucket[] map for populating filters + // Ex: vol1 -> [bucket1, bucket2], vol2 -> [bucket1] + const volumeBucketMap: Map> = fileCountResponse.value?.data?.reduce( + (map: Map>, current: FileCountResponse) => { + const volume = current.volume; + const bucket = current.bucket; + if (map.has(volume)) { + const buckets = Array.from(map.get(volume)!); + map.set(volume, new Set([...buckets, bucket])); + } else { + map.set(volume, new Set().add(bucket)); + } + return map; + }, + new Map>() + ); + const volumeOptions: Option[] = Array.from(volumeBucketMap.keys()).map(k => ({ + label: k, + value: k + })); + + setState({ + ...state, + volumeBucketMap: volumeBucketMap, + volumeOptions: volumeOptions, + fileCountError: fileAPIError, + containerSizeError: containerAPIError + }); + setPlotResponse({ + fileCountResponse: fileCountResponse.value?.data ?? [{ + volume: '', + bucket: '', + fileSize: 0, + count: 0 + }], + containerCountResponse: containerCountResponse.value?.data ?? [{ + containerSize: 0, + count: 0 + }] + }); + setLoading(false); + })).catch(error => { + setLoading(false); + showDataFetchError((error as AxiosError).toString()); + }) + } + + React.useEffect(() => { + loadData(); + + return (() => { + cancelInsightSignal.current && cancelInsightSignal.current.abort(); + }) + }, []); + + return ( + <> +
    + Insights +
    +
    + { + loading + ? + : <> + +
    + + {plotResponse.fileCountResponse?.length > 0 + ? + : } + + + + + + {plotResponse.containerCountResponse?.length > 0 + ? + : } + + + + + } + + + ) + +} + +export default Insights; \ No newline at end of file diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/pages/insights/omInsights.tsx b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/pages/insights/omInsights.tsx new file mode 100644 index 00000000000..732af0aa00e --- /dev/null +++ b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/pages/insights/omInsights.tsx @@ -0,0 +1,208 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import React from 'react'; +import { AxiosError } from 'axios'; +import { ValueType } from 'react-select'; +import { Tabs, Tooltip } from 'antd'; +import { TablePaginationConfig } from 'antd/es/table'; +import { InfoCircleOutlined } from '@ant-design/icons'; + +import { Option } from '@/v2/components/select/singleSelect'; +import ContainerMismatchTable from '@/v2/components/tables/insights/containerMismatchTable'; +import DeletedContainerKeysTable from '@/v2/components/tables/insights/deletedContainerKeysTable'; +import DeletePendingDirTable from '@/v2/components/tables/insights/deletePendingDirsTable'; +import DeletePendingKeysTable from '@/v2/components/tables/insights/deletePendingKeysTable'; +import ExpandedKeyTable from '@/v2/components/tables/insights/expandedKeyTable'; +import OpenKeysTable from '@/v2/components/tables/insights/openKeysTable'; +import { showDataFetchError } from '@/utils/common'; +import { AxiosGetHelper } from '@/utils/axiosRequestHelper'; + +import { + Container, + ExpandedRow, + ExpandedRowState, + MismatchKeysResponse +} from '@/v2/types/insights.types'; + +import './insights.less'; +import { useLocation } from 'react-router-dom'; + + +const OMDBInsights: React.FC<{}> = () => { + + const [loading, setLoading] = React.useState(false); + const [expandedRowData, setExpandedRowData] = React.useState({}); + const [selectedLimit, setSelectedLimit] = React.useState = () => { ) } ]} - linkToUrl='/Om' /> + linkToUrl='/Om' + state={{activeTab: '3'}} /> + + OM ID:  + {omServiceId} + + | + + SCM ID:  + {scmServiceId} + ); diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/pages/pipelines/pipelines.tsx b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/pages/pipelines/pipelines.tsx index 9059da91f91..f6ff87c7e13 100644 --- a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/pages/pipelines/pipelines.tsx +++ b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/pages/pipelines/pipelines.tsx @@ -120,7 +120,7 @@ const Pipelines: React.FC<{}> = () => { togglePolling={autoReloadHelper.handleAutoReloadToggle} onReload={loadData} /> -
    +
    diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/pages/volumes/volumes.tsx b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/pages/volumes/volumes.tsx index cb25cedbcec..b4614d387f3 100644 --- a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/pages/volumes/volumes.tsx +++ b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/pages/volumes/volumes.tsx @@ -30,6 +30,7 @@ import Search from '@/v2/components/search/search'; import { showDataFetchError } from '@/utils/common'; import { AutoReloadHelper } from '@/utils/autoReloadHelper'; import { AxiosGetHelper, cancelRequests } from "@/utils/axiosRequestHelper"; +import { LIMIT_OPTIONS } from '@/v2/constants/limit.constants'; import { useDebounce } from '@/v2/hooks/debounce.hook'; import { @@ -55,13 +56,6 @@ const SearchableColumnOpts = [ } ] -const LIMIT_OPTIONS: Option[] = [ - { label: '1000', value: '1000' }, - { label: '5000', value: "5000" }, - { label: '10000', value: "10000" }, - { label: '20000', value: "20000" } -] - const Volumes: React.FC<{}> = () => { const cancelSignal = useRef(); @@ -182,7 +176,7 @@ const Volumes: React.FC<{}> = () => { onReload={loadData} />
    -
    +
    diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/routes-v2.tsx b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/routes-v2.tsx index 20907fd3ad5..fb2dc0b9c45 100644 --- a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/routes-v2.tsx +++ b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/routes-v2.tsx @@ -23,6 +23,11 @@ const Buckets = lazy(() => import('@/v2/pages/buckets/buckets')); const Datanodes = lazy(() => import('@/v2/pages/datanodes/datanodes')); const Pipelines = lazy(() => import('@/v2/pages/pipelines/pipelines')); const DiskUsage = lazy(() => import('@/v2/pages/diskUsage/diskUsage')); +const Containers = lazy(() => import('@/v2/pages/containers/containers')); +const Insights = lazy(() => import('@/v2/pages/insights/insights')); +const OMDBInsights = lazy(() => import('@/v2/pages/insights/omInsights')); +const Heatmap = lazy(() => import('@/v2/pages/heatmap/heatmap')); + export const routesV2 = [ { @@ -48,5 +53,21 @@ export const routesV2 = [ { path: '/DiskUsage', component: DiskUsage + }, + { + path: '/Containers', + component: Containers + }, + { + path: '/Insights', + component: Insights + }, + { + path: '/Om', + component: OMDBInsights + }, + { + path: '/Heatmap', + component: Heatmap } ]; diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/types/container.types.ts b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/types/container.types.ts new file mode 100644 index 00000000000..2467a0f26fd --- /dev/null +++ b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/types/container.types.ts @@ -0,0 +1,94 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import { Option } from "@/v2/components/select/multiSelect"; + +export type ContainerReplica = { + containerId: number; + datanodeUuid: string; + datanodeHost: string; + firstSeenTime: number; + lastSeenTime: number; + lastBcsId: number; +} + +export type Container = { + containerID: number; + containerState: string; + unhealthySince: number; + expectedReplicaCount: number; + actualReplicaCount: number; + replicaDeltaCount: number; + reason: string; + keys: number; + pipelineID: string; + replicas: ContainerReplica[]; +} + +type KeyResponseBlock = { + containerID: number; + localID: number; +} + +export type KeyResponse = { + Volume: string; + Bucket: string; + Key: string; + DataSize: number; + CompletePath: string; + Versions: number[]; + Blocks: Record; + CreationTime: string; + ModificationTime: string; +} + +export type ContainerKeysResponse = { + totalCount: number; + keys: KeyResponse[]; +} + +export type ContainerTableProps = { + loading: boolean; + data: Container[]; + searchColumn: 'containerID' | 'pipelineID'; + searchTerm: string; + selectedColumns: Option[]; + expandedRow: ExpandedRow; + expandedRowSetter: (arg0: ExpandedRow) => void; +} + + +export type ExpandedRow = { + [key: number]: ExpandedRowState; +} + +export type ExpandedRowState = { + loading: boolean; + containerId: number; + dataSource: KeyResponse[]; + totalCount: number; +} + +export type ContainerState = { + lastUpdated: number; + columnOptions: Option[]; + missingContainerData: Container[]; + underReplicatedContainerData: Container[]; + overReplicatedContainerData: Container[]; + misReplicatedContainerData: Container[]; +} \ No newline at end of file diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/types/heatmap.types.ts b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/types/heatmap.types.ts new file mode 100644 index 00000000000..a76db22a6fe --- /dev/null +++ b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/types/heatmap.types.ts @@ -0,0 +1,51 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +export type InputPathValidTypes = 'error' | 'success' | 'warning' | 'validating' | undefined; + +export type HeatmapChild = { + label: string; + size: number; + accessCount: number; + color: number; +} + +export type InputPathState = { + inputPath: string; + isInputPathValid: InputPathValidTypes; + helpMessage: string; +} + +export type HeatmapResponse = { + label: string; + path: string; + maxAccessCount: number; + minAccessCount: number; + size: number; + children: HeatmapChild[]; +} + +export type HeatmapState = { + heatmapResponse: HeatmapResponse; + entityType: string; + date: string | number; +} + +export interface IResponseError extends Error { + status?: number; +} \ No newline at end of file diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/types/insights.types.ts b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/types/insights.types.ts new file mode 100644 index 00000000000..d608a2bc8d1 --- /dev/null +++ b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/types/insights.types.ts @@ -0,0 +1,199 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import { Option } from "@/v2/components/select/multiSelect"; + +export type FileCountResponse = { + volume: string; + bucket: string; + fileSize: number; + count: number; +} + +export type ContainerCountResponse = { + containerSize: number; + count: number; +} + +export type PlotResponse = { + fileCountResponse: FileCountResponse[], + containerCountResponse: ContainerCountResponse[] +} + +export type FilePlotData = { + fileCountValues: string[]; + fileCountMap: Map; +} + +export type ContainerPlotData = { + containerCountValues: string[]; + containerCountMap: Map; +} + +export type InsightsState = { + volumeBucketMap: Map>; + volumeOptions: Option[]; + fileCountError: string | undefined; + containerSizeError: string | undefined; +} + +//-------------------------// +//---OM DB Insights types--- +//-------------------------// +type ReplicationConfig = { + replicationFactor: string; + requiredNodes: number; + replicationType: string; +} + +export type Pipelines = { + id: { + id: string; + }, + replicationConfig: ReplicationConfig; + healthy: boolean; +} + +// Container Mismatch Info +export type Container = { + containerId: number; + numberOfKeys: number; + pipelines: Pipelines[]; + existsAt: 'OM' | 'SCM'; +} + +export type MismatchContainersResponse = { + containerDiscrepancyInfo: Container[]; +} + +// Deleted Container Keys +export type DeletedContainerKeysResponse = { + containers: Container[]; +} + +export type MismatchKeys = { + Volume: string; + Bucket: string; + Key: string; + DataSize: number; + Versions: number[]; + Blocks: Record + CreationTime: string; + ModificationTime: string; +} + +export type MismatchKeysResponse = { + totalCount: number; + keys: MismatchKeys[]; +} + +// Open Keys +export type OpenKeys = { + key: string; + path: string; + inStateSince: number; + size: number; + replicatedSize: number; + replicationInfo: { + data: number; + parity: number; + ecChunkSize: number; + codec: string; + replicationType: string; + requiredNodes: number; + } + creationTime: number; + modificationTime: number; + isKey: boolean; +} + +export type OpenKeysResponse = { + lastKey: string; + replicatedDataSize: number; + unreplicatedDataSize: number; + fso?: OpenKeys[]; + nonFSO?: OpenKeys[]; +} + +//Keys pending deletion +export type DeletePendingKey = { + objectID: number; + updateID: number; + parentObjectID: number; + volumeName: string; + bucketName: string; + keyName: string; + dataSize: number; + creationTime: number; + modificationTime: number; + replicationConfig: ReplicationConfig; + fileChecksum: number | null; + fileName: string; + file: boolean; + path: string; + hsync: boolean; + replicatedSize: number; + fileEncryptionInfo: string | null; + objectInfo: string; + updateIDSet: boolean; +} + +export type DeletePendingKeysResponse = { + lastKey: string; + keysSummary: { + totalUnreplicatedDataSize: number, + totalReplicatedDataSize: number, + totalDeletedKeys: number + }, + replicatedDataSize: number; + unreplicatedDataSize: number; + deletedKeyInfo: { + omKeyInfoList: DeletePendingKey[] + }[]; +} + +//Directories Pending for Deletion +export type DeletedDirInfo = { + key: string; + path: string; + inStateSince: number; + size: number; + replicatedSize: number; + replicationInfo: ReplicationConfig; + creationTime: number; + modificationTime: number; + isKey: boolean; +} + +export type DeletedDirReponse = { + lastKey: string; + replicatedDataSize: number; + unreplicatedDataSize: number; + deletedDirInfo: DeletedDirInfo[]; + status: string; +} + +export type ExpandedRow = { + [key: number]: ExpandedRowState; +} + +export type ExpandedRowState = { + containerId: number; + dataSource: MismatchKeys[]; + totalCount: number; +} diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/utils/momentUtils.ts b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/utils/momentUtils.ts index fb553d0db3f..daaae2d54d3 100644 --- a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/utils/momentUtils.ts +++ b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/utils/momentUtils.ts @@ -61,3 +61,8 @@ export function getDurationFromTimestamp(timestamp: number): string { return (elapsedTime.length === 0) ? 'Just now' : elapsedTime.join(' '); } + +export function getFormattedTime(time: number | string, format: string) { + if (typeof time === 'string') return moment(time).format(format); + return (time > 0) ? moment(time).format(format) : 'N/A'; +} diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/views/datanodes/datanodes.tsx b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/views/datanodes/datanodes.tsx index d7fdf2b9eb8..4a3c11c11b0 100644 --- a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/views/datanodes/datanodes.tsx +++ b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/views/datanodes/datanodes.tsx @@ -66,7 +66,6 @@ interface IDatanodeResponse { version: string; setupTime: number; revision: string; - buildDate: string; networkLocation: string; } @@ -92,7 +91,6 @@ interface IDatanode { version: string; setupTime: number; revision: string; - buildDate: string; networkLocation: string; } @@ -331,15 +329,6 @@ const COLUMNS = [ sorter: (a: IDatanode, b: IDatanode) => a.revision.localeCompare(b.revision), defaultSortOrder: 'ascend' as const }, - { - title: 'Build Date', - dataIndex: 'buildDate', - key: 'buildDate', - isVisible: true, - isSearchable: true, - sorter: (a: IDatanode, b: IDatanode) => a.buildDate.localeCompare(b.buildDate), - defaultSortOrder: 'ascend' as const - }, { title: 'Network Location', dataIndex: 'networkLocation', @@ -446,7 +435,6 @@ export class Datanodes extends React.Component, IDatanode version: datanode.version, setupTime: datanode.setupTime, revision: datanode.revision, - buildDate: datanode.buildDate, networkLocation: datanode.networkLocation }; }); diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/views/insights/om/om.tsx b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/views/insights/om/om.tsx index fdd25929d03..f092708348b 100644 --- a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/views/insights/om/om.tsx +++ b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/views/insights/om/om.tsx @@ -281,8 +281,9 @@ const DELETED_TAB_COLUMNS = [ const PENDINGDIR_TAB_COLUMNS = [ { title: 'Directory Name', - dataIndex: 'path', - key: 'path' + dataIndex: 'key', + isSearchable: true, + key: 'key' }, { title: 'In state since', @@ -294,9 +295,8 @@ const PENDINGDIR_TAB_COLUMNS = [ }, { title: 'Path', - dataIndex: 'key', - key: 'key', - isSearchable: true, + dataIndex: 'path', + key: 'path', width: '450px' }, { diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestContainerEndpoint.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestContainerEndpoint.java index 3c39e4192d2..da5484c9b89 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestContainerEndpoint.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestContainerEndpoint.java @@ -62,10 +62,12 @@ import org.apache.hadoop.ozone.recon.scm.ReconPipelineManager; import org.apache.hadoop.ozone.recon.scm.ReconStorageContainerManagerFacade; import org.apache.hadoop.ozone.recon.spi.ReconContainerMetadataManager; +import org.apache.hadoop.ozone.recon.spi.ReconNamespaceSummaryManager; import org.apache.hadoop.ozone.recon.spi.StorageContainerServiceProvider; import org.apache.hadoop.ozone.recon.spi.impl.OzoneManagerServiceProviderImpl; import org.apache.hadoop.ozone.recon.spi.impl.StorageContainerServiceProviderImpl; import org.apache.hadoop.ozone.recon.tasks.ContainerKeyMapperTask; +import org.apache.hadoop.ozone.recon.tasks.NSSummaryTaskWithFSO; import org.hadoop.ozone.recon.schema.ContainerSchemaDefinition.UnHealthyContainerStates; import org.hadoop.ozone.recon.schema.tables.pojos.UnhealthyContainers; import org.junit.jupiter.api.BeforeEach; @@ -121,6 +123,7 @@ public class TestContainerEndpoint { LoggerFactory.getLogger(TestContainerEndpoint.class); private OzoneStorageContainerManager ozoneStorageContainerManager; + private ReconNamespaceSummaryManager reconNamespaceSummaryManager; private ReconContainerManager reconContainerManager; private ContainerStateManager containerStateManager; private ReconPipelineManager reconPipelineManager; @@ -198,6 +201,8 @@ private void initializeInjector() throws Exception { containerEndpoint = reconTestInjector.getInstance(ContainerEndpoint.class); containerHealthSchemaManager = reconTestInjector.getInstance(ContainerHealthSchemaManager.class); + this.reconNamespaceSummaryManager = + reconTestInjector.getInstance(ReconNamespaceSummaryManager.class); pipeline = getRandomPipeline(); pipelineID = pipeline.getId(); @@ -472,6 +477,10 @@ public void testGetKeysForContainer() throws IOException { // Now to check if the ContainerEndpoint also reads the File table // Set up test data for FSO keys setUpFSOData(); + NSSummaryTaskWithFSO nSSummaryTaskWithFso = + new NSSummaryTaskWithFSO(reconNamespaceSummaryManager, + reconOMMetadataManager, new OzoneConfiguration()); + nSSummaryTaskWithFso.reprocessWithFSO(reconOMMetadataManager); // Reprocess the container key mapper to ensure the latest mapping is used reprocessContainerKeyMapper(); response = containerEndpoint.getKeysForContainer(20L, -1, ""); @@ -556,6 +565,10 @@ public void testGetKeysForContainerWithPrevKey() throws IOException { setUpFSOData(); // Reprocess the container key mapper to ensure the latest mapping is used reprocessContainerKeyMapper(); + NSSummaryTaskWithFSO nSSummaryTaskWithFso = + new NSSummaryTaskWithFSO(reconNamespaceSummaryManager, + reconOMMetadataManager, new OzoneConfiguration()); + nSSummaryTaskWithFso.reprocessWithFSO(reconOMMetadataManager); response = containerEndpoint.getKeysForContainer(20L, -1, "/0/1/2/file7"); // Ensure that the expected number of keys is returned diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestDeletedKeysSearchEndpoint.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestDeletedKeysSearchEndpoint.java new file mode 100644 index 00000000000..5f3d0fa1268 --- /dev/null +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestDeletedKeysSearchEndpoint.java @@ -0,0 +1,549 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

    + * http://www.apache.org/licenses/LICENSE-2.0 + *

    + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.recon.api; + +import org.apache.hadoop.hdds.client.StandaloneReplicationConfig; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos; +import org.apache.hadoop.hdds.scm.server.OzoneStorageContainerManager; +import org.apache.hadoop.ozone.om.OMMetadataManager; +import org.apache.hadoop.ozone.om.OmMetadataManagerImpl; +import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; +import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo; +import org.apache.hadoop.ozone.recon.ReconTestInjector; +import org.apache.hadoop.ozone.recon.api.types.KeyInsightInfoResponse; +import org.apache.hadoop.ozone.recon.persistence.AbstractReconSqlDBTest; +import org.apache.hadoop.ozone.recon.persistence.ContainerHealthSchemaManager; +import org.apache.hadoop.ozone.recon.recovery.ReconOMMetadataManager; +import org.apache.hadoop.ozone.recon.scm.ReconStorageContainerManagerFacade; +import org.apache.hadoop.ozone.recon.spi.StorageContainerServiceProvider; +import org.apache.hadoop.ozone.recon.spi.impl.OzoneManagerServiceProviderImpl; +import org.apache.hadoop.ozone.recon.spi.impl.StorageContainerServiceProviderImpl; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.io.TempDir; + +import javax.ws.rs.core.Response; +import java.io.File; +import java.io.IOException; +import java.nio.file.Files; +import java.nio.file.Path; +import java.util.ArrayList; +import java.util.List; +import java.util.UUID; + +import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_DB_DIRS; +import static org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.getTestReconOmMetadataManager; +import static org.apache.hadoop.ozone.recon.ReconServerConfigKeys.OZONE_RECON_NSSUMMARY_FLUSH_TO_DB_MAX_THRESHOLD; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.mockito.Mockito.mock; + +/** + * Test class for DeletedKeysSearchEndpoint. + * + * This class tests various scenarios for searching deleted keys within a + * given volume, bucket, and directory structure. The tests include: + * + * 1. Test Root Level Search Restriction: Ensures searching at the root level returns a bad request. + * 2. Test Volume Level Search Restriction: Ensures searching at the volume level returns a bad request. + * 3. Test Bucket Level Search: Verifies search results within different types of buckets, both FSO and OBS. + * 4. Test Directory Level Search: Validates searching inside specific directories. + * 5. Test Key Level Search: Confirms search results for specific keys within buckets, both FSO and OBS. + * 6. Test Key Level Search Under Directory: Verifies searching for keys within nested directories. + * 7. Test Search Under Nested Directory: Checks search results within nested directories. + * 8. Test Limit Search: Tests the limit functionality of the search API. + * 9. Test Search Deleted Keys with Bad Request: Ensures bad requests with invalid params return correct responses. + * 10. Test Last Key in Response: Confirms the presence of the last key in paginated responses. + * 11. Test Search Deleted Keys with Pagination: Verifies paginated search results. + * 12. Test Search in Empty Bucket: Checks the response for searching within an empty bucket. + */ +public class TestDeletedKeysSearchEndpoint extends AbstractReconSqlDBTest { + + @TempDir + private Path temporaryFolder; + private ReconOMMetadataManager reconOMMetadataManager; + private OMDBInsightEndpoint omdbInsightEndpoint; + private OzoneConfiguration ozoneConfiguration; + private static final String ROOT_PATH = "/"; + private OMMetadataManager omMetadataManager; + + @BeforeEach + public void setUp() throws Exception { + ozoneConfiguration = new OzoneConfiguration(); + ozoneConfiguration.setLong(OZONE_RECON_NSSUMMARY_FLUSH_TO_DB_MAX_THRESHOLD, 100); + omMetadataManager = initializeNewOmMetadataManager( + Files.createDirectory(temporaryFolder.resolve("JunitOmDBDir")).toFile()); + reconOMMetadataManager = getTestReconOmMetadataManager(omMetadataManager, + Files.createDirectory(temporaryFolder.resolve("OmMetataDir")).toFile()); + + ReconTestInjector reconTestInjector = + new ReconTestInjector.Builder(temporaryFolder.toFile()) + .withReconSqlDb() + .withReconOm(reconOMMetadataManager) + .withOmServiceProvider(mock(OzoneManagerServiceProviderImpl.class)) + .addBinding(OzoneStorageContainerManager.class, + ReconStorageContainerManagerFacade.class) + .withContainerDB() + .addBinding(StorageContainerServiceProvider.class, + mock(StorageContainerServiceProviderImpl.class)) + .addBinding(OMDBInsightEndpoint.class) + .addBinding(ContainerHealthSchemaManager.class) + .build(); + omdbInsightEndpoint = reconTestInjector.getInstance(OMDBInsightEndpoint.class); + populateOMDB(); + } + + + private static OMMetadataManager initializeNewOmMetadataManager(File omDbDir) throws IOException { + OzoneConfiguration omConfiguration = new OzoneConfiguration(); + omConfiguration.set(OZONE_OM_DB_DIRS, omDbDir.getAbsolutePath()); + return new OmMetadataManagerImpl(omConfiguration, null); + } + + @Test + public void testRootLevelSearchRestriction() throws IOException { + String rootPath = "/"; + Response response = omdbInsightEndpoint.getDeletedKeyInfo(20, "", rootPath); + assertEquals(Response.Status.BAD_REQUEST.getStatusCode(), response.getStatus()); + String entity = (String) response.getEntity(); + assertTrue(entity.contains("Invalid startPrefix: Path must be at the bucket level or deeper"), + "Expected a message indicating the path must be at the bucket level or deeper"); + } + + @Test + public void testEmptySearchPrefix() throws IOException { + Response response = omdbInsightEndpoint.getDeletedKeyInfo(100, "", ""); + // In this case we get all the keys from the OMDB + assertEquals(Response.Status.OK.getStatusCode(), response.getStatus()); + KeyInsightInfoResponse result = (KeyInsightInfoResponse) response.getEntity(); + assertEquals(16, result.getRepeatedOmKeyInfoList().size()); + + // Set limit to 10 and pass empty search prefix + response = omdbInsightEndpoint.getDeletedKeyInfo(10, "", ""); + // In this case we get all the keys from the OMDB + assertEquals(Response.Status.OK.getStatusCode(), response.getStatus()); + result = (KeyInsightInfoResponse) response.getEntity(); + assertEquals(10, result.getRepeatedOmKeyInfoList().size()); + } + + @Test + public void testVolumeLevelSearchRestriction() throws IOException { + String volumePath = "/vola"; + Response response = omdbInsightEndpoint.getDeletedKeyInfo(20, "", volumePath); + assertEquals(Response.Status.BAD_REQUEST.getStatusCode(), response.getStatus()); + String entity = (String) response.getEntity(); + assertTrue(entity.contains("Invalid startPrefix: Path must be at the bucket level or deeper"), + "Expected a message indicating the path must be at the bucket level or deeper"); + + volumePath = "/volb"; + response = omdbInsightEndpoint.getDeletedKeyInfo(20, "", volumePath); + assertEquals(Response.Status.BAD_REQUEST.getStatusCode(), response.getStatus()); + entity = (String) response.getEntity(); + assertTrue(entity.contains("Invalid startPrefix: Path must be at the bucket level or deeper"), + "Expected a message indicating the path must be at the bucket level or deeper"); + } + + @Test + public void testBucketLevelSearch() throws IOException { + // Search inside FSO bucket + Response response = omdbInsightEndpoint.getDeletedKeyInfo(20, "", "/volb/bucketb1"); + assertEquals(200, response.getStatus()); + KeyInsightInfoResponse result = (KeyInsightInfoResponse) response.getEntity(); + assertEquals(7, result.getRepeatedOmKeyInfoList().size()); + + response = omdbInsightEndpoint.getDeletedKeyInfo(2, "", "/volb/bucketb1"); + assertEquals(200, response.getStatus()); + result = (KeyInsightInfoResponse) response.getEntity(); + assertEquals(2, result.getRepeatedOmKeyInfoList().size()); + + // Search inside OBS bucket + response = omdbInsightEndpoint.getDeletedKeyInfo(20, "", "/volc/bucketc1"); + assertEquals(200, response.getStatus()); + result = (KeyInsightInfoResponse) response.getEntity(); + assertEquals(9, result.getRepeatedOmKeyInfoList().size()); + + response = omdbInsightEndpoint.getDeletedKeyInfo(20, "", "/vola/nonexistentbucket"); + assertEquals(Response.Status.NO_CONTENT.getStatusCode(), response.getStatus()); + String entity = (String) response.getEntity(); + assertTrue(entity.contains("No keys matched the search prefix"), + "Expected a message indicating no keys were found"); + } + + @Test + public void testDirectoryLevelSearch() throws IOException { + Response response = omdbInsightEndpoint.getDeletedKeyInfo(20, "", "/volc/bucketc1/dirc1"); + assertEquals(200, response.getStatus()); + KeyInsightInfoResponse result = (KeyInsightInfoResponse) response.getEntity(); + assertEquals(4, result.getRepeatedOmKeyInfoList().size()); + + response = omdbInsightEndpoint.getDeletedKeyInfo(20, "", "/volc/bucketc1/dirc2"); + assertEquals(200, response.getStatus()); + result = (KeyInsightInfoResponse) response.getEntity(); + assertEquals(5, result.getRepeatedOmKeyInfoList().size()); + + response = omdbInsightEndpoint.getDeletedKeyInfo(20, "", "/volb/bucketb1/nonexistentdir"); + assertEquals(Response.Status.NO_CONTENT.getStatusCode(), response.getStatus()); + String entity = (String) response.getEntity(); + assertTrue(entity.contains("No keys matched the search prefix"), + "Expected a message indicating no keys were found"); + } + + @Test + public void testKeyLevelSearch() throws IOException { + // FSO Bucket key-level search + Response response = + omdbInsightEndpoint.getDeletedKeyInfo(10, "", "/volb/bucketb1/fileb1"); + assertEquals(200, response.getStatus()); + KeyInsightInfoResponse result = + (KeyInsightInfoResponse) response.getEntity(); + assertEquals(1, result.getRepeatedOmKeyInfoList().size()); + + response = + omdbInsightEndpoint.getDeletedKeyInfo(10, "", "/volb/bucketb1/fileb2"); + assertEquals(200, response.getStatus()); + result = (KeyInsightInfoResponse) response.getEntity(); + assertEquals(1, result.getRepeatedOmKeyInfoList().size()); + + // Test with non-existent key + response = omdbInsightEndpoint.getDeletedKeyInfo(1, "", "/volb/bucketb1/nonexistentfile"); + assertEquals(Response.Status.NO_CONTENT.getStatusCode(), + response.getStatus()); + String entity = (String) response.getEntity(); + assertTrue(entity.contains("No keys matched the search prefix"), + "Expected a message indicating no keys were found"); + } + + @Test + public void testKeyLevelSearchUnderDirectory() throws IOException { + // FSO Bucket key-level search under directory + Response response = + omdbInsightEndpoint.getDeletedKeyInfo(10, "", "/volb/bucketb1/dir1/file1"); + assertEquals(200, response.getStatus()); + KeyInsightInfoResponse result = (KeyInsightInfoResponse) response.getEntity(); + assertEquals(1, result.getRepeatedOmKeyInfoList().size()); + + response = omdbInsightEndpoint.getDeletedKeyInfo(10, "", + "/volb/bucketb1/dir1/nonexistentfile"); + assertEquals(Response.Status.NO_CONTENT.getStatusCode(), + response.getStatus()); + String entity = (String) response.getEntity(); + assertTrue(entity.contains("No keys matched the search prefix"), + "Expected a message indicating no keys were found"); + } + + @Test + public void testSearchUnderNestedDirectory() throws IOException { + // OBS Bucket nested directory search + Response response = + omdbInsightEndpoint.getDeletedKeyInfo(20, "", "/volc/bucketc1/dirc1"); + assertEquals(200, response.getStatus()); + KeyInsightInfoResponse result = (KeyInsightInfoResponse) response.getEntity(); + assertEquals(4, result.getRepeatedOmKeyInfoList().size()); + + response = omdbInsightEndpoint.getDeletedKeyInfo(20, "", "/volc/bucketc1/dirc1/dirc11"); + assertEquals(200, response.getStatus()); + result = (KeyInsightInfoResponse) response.getEntity(); + assertEquals(2, result.getRepeatedOmKeyInfoList().size()); + + response = omdbInsightEndpoint.getDeletedKeyInfo(20, "", "/volc/bucketc1/dirc1/dirc11/dirc111"); + assertEquals(200, response.getStatus()); + result = (KeyInsightInfoResponse) response.getEntity(); + assertEquals(1, result.getRepeatedOmKeyInfoList().size()); + + response = omdbInsightEndpoint.getDeletedKeyInfo(20, "", "/volc/bucketc1/dirc1/dirc11/dirc111/nonexistentfile"); + assertEquals(Response.Status.NO_CONTENT.getStatusCode(), response.getStatus()); + String entity = (String) response.getEntity(); + assertTrue(entity.contains("No keys matched the search prefix"), + "Expected a message indicating no keys were found"); + + response = omdbInsightEndpoint.getDeletedKeyInfo(20, "", "/volc/bucketc1/dirc1/dirc11/nonexistentfile"); + assertEquals(Response.Status.NO_CONTENT.getStatusCode(), response.getStatus()); + entity = (String) response.getEntity(); + assertTrue(entity.contains("No keys matched the search prefix"), + "Expected a message indicating no keys were found"); + } + + @Test + public void testLimitSearch() throws IOException { + Response response = omdbInsightEndpoint.getDeletedKeyInfo(2, "", "/volb/bucketb1"); + assertEquals(200, response.getStatus()); + KeyInsightInfoResponse result = (KeyInsightInfoResponse) response.getEntity(); + assertEquals(2, result.getRepeatedOmKeyInfoList().size()); + } + + @Test + public void testSearchDeletedKeysWithBadRequest() throws IOException { + int negativeLimit = -1; + Response response = omdbInsightEndpoint.getDeletedKeyInfo(negativeLimit, "", "@323232"); + assertEquals(Response.Status.BAD_REQUEST.getStatusCode(), response.getStatus()); + String entity = (String) response.getEntity(); + assertTrue(entity.contains("Invalid startPrefix: Path must be at the bucket level or deeper"), + "Expected a message indicating the path must be at the bucket level or deeper"); + + response = omdbInsightEndpoint.getDeletedKeyInfo(20, "", "///"); + assertEquals(Response.Status.BAD_REQUEST.getStatusCode(), response.getStatus()); + entity = (String) response.getEntity(); + assertTrue(entity.contains("Invalid startPrefix: Path must be at the bucket level or deeper"), + "Expected a message indicating the path must be at the bucket level or deeper"); + } + + @Test + public void testLastKeyInResponse() throws IOException { + Response response = omdbInsightEndpoint.getDeletedKeyInfo(20, "", "/volb/bucketb1"); + assertEquals(200, response.getStatus()); + KeyInsightInfoResponse result = (KeyInsightInfoResponse) response.getEntity(); + assertEquals(7, result.getRepeatedOmKeyInfoList().size()); + + // Compute the expected last key from the last entry in the result list + String computedLastKey = "/" + + result.getRepeatedOmKeyInfoList().get(6).getOmKeyInfoList().get(0).getVolumeName() + "/" + + result.getRepeatedOmKeyInfoList().get(6).getOmKeyInfoList().get(0).getBucketName() + "/" + + result.getRepeatedOmKeyInfoList().get(6).getOmKeyInfoList().get(0).getKeyName() + "/"; + + // Check that the last key in the response starts with the expected value + assertTrue(result.getLastKey().startsWith(computedLastKey)); + } + + @Test + public void testSearchDeletedKeysWithPagination() throws IOException { + String startPrefix = "/volb/bucketb1"; + int limit = 2; + String prevKey = ""; + + Response response = omdbInsightEndpoint.getDeletedKeyInfo(limit, prevKey, startPrefix); + assertEquals(200, response.getStatus()); + KeyInsightInfoResponse result = (KeyInsightInfoResponse) response.getEntity(); + assertEquals(2, result.getRepeatedOmKeyInfoList().size()); + + prevKey = result.getLastKey(); + assertNotNull(prevKey, "Last key should not be null"); + + response = omdbInsightEndpoint.getDeletedKeyInfo(limit, prevKey, startPrefix); + assertEquals(200, response.getStatus()); + result = (KeyInsightInfoResponse) response.getEntity(); + assertEquals(2, result.getRepeatedOmKeyInfoList().size()); + + prevKey = result.getLastKey(); + assertNotNull(prevKey, "Last key should not be null"); + + response = omdbInsightEndpoint.getDeletedKeyInfo(limit, prevKey, startPrefix); + assertEquals(200, response.getStatus()); + result = (KeyInsightInfoResponse) response.getEntity(); + assertEquals(2, result.getRepeatedOmKeyInfoList().size()); + + prevKey = result.getLastKey(); + assertNotNull(prevKey, "Last key should not be null"); + + response = omdbInsightEndpoint.getDeletedKeyInfo(limit, prevKey, startPrefix); + assertEquals(200, response.getStatus()); + result = (KeyInsightInfoResponse) response.getEntity(); + assertEquals(1, result.getRepeatedOmKeyInfoList().size()); + // Compute the expected last key from the last entry in the result list + String computedLastKey = "/" + + result.getRepeatedOmKeyInfoList().get(0).getOmKeyInfoList().get(0) + .getVolumeName() + "/" + + result.getRepeatedOmKeyInfoList().get(0).getOmKeyInfoList().get(0) + .getBucketName() + "/" + + result.getRepeatedOmKeyInfoList().get(0).getOmKeyInfoList().get(0) + .getKeyName() + "/"; + + // Check that the last key in the response starts with the expected value + assertTrue(result.getLastKey().startsWith(computedLastKey)); + } + + @Test + public void testSearchInEmptyBucket() throws IOException { + Response response = omdbInsightEndpoint.getDeletedKeyInfo(20, "", "/volb/bucketb2"); + assertEquals(Response.Status.NO_CONTENT.getStatusCode(), response.getStatus()); + String entity = (String) response.getEntity(); + assertTrue(entity.contains("No keys matched the search prefix"), + "Expected a message indicating no keys were found"); + } + + @Test + public void testPrevKeyProvidedStartPrefixEmpty() throws IOException { + // Case 1: prevKey provided, startPrefix empty + // Seek to the prevKey, skip the first matching record, then return remaining records until limit is reached. + String prevKey = "/volb/bucketb1/fileb3"; // This key exists, will skip it + int limit = 3; + String startPrefix = ""; // Empty startPrefix + + Response response = omdbInsightEndpoint.getDeletedKeyInfo(limit, prevKey, startPrefix); + assertEquals(200, response.getStatus()); + KeyInsightInfoResponse result = (KeyInsightInfoResponse) response.getEntity(); + + // Assert that we get the next 3 records after skipping the prevKey + assertEquals(3, result.getRepeatedOmKeyInfoList().size()); + assertEquals("fileb4", result.getRepeatedOmKeyInfoList().get(0).getOmKeyInfoList().get(0).getKeyName()); + } + + @Test + public void testPrevKeyEmptyStartPrefixEmpty() throws IOException { + // Case 2: prevKey empty, startPrefix empty + // No need to seek, start from the first record and return records until limit is reached. + String prevKey = ""; // Empty prevKey + int limit = 100; + String startPrefix = ""; // Empty startPrefix + + Response response = omdbInsightEndpoint.getDeletedKeyInfo(limit, prevKey, startPrefix); + assertEquals(200, response.getStatus()); + KeyInsightInfoResponse result = (KeyInsightInfoResponse) response.getEntity(); + + // Assert that we get all the 16 records currently in the deleted keys table + assertEquals(16, result.getRepeatedOmKeyInfoList().size()); + } + + @Test + public void testPrevKeyEmptyStartPrefixProvided() throws IOException { + // Case 3: prevKey empty, startPrefix provided + // Seek to the startPrefix and return matching records until limit is reached. + String prevKey = ""; // Empty prevKey + int limit = 2; + String startPrefix = "/volb/bucketb1/fileb"; // Seek to startPrefix and match files + + Response response = omdbInsightEndpoint.getDeletedKeyInfo(limit, prevKey, startPrefix); + assertEquals(200, response.getStatus()); + KeyInsightInfoResponse result = (KeyInsightInfoResponse) response.getEntity(); + + // Assert that we get the first 2 records that match startPrefix + assertEquals(2, result.getRepeatedOmKeyInfoList().size()); + assertEquals("fileb1", result.getRepeatedOmKeyInfoList().get(0).getOmKeyInfoList().get(0).getKeyName()); + } + + @Test + public void testPrevKeyProvidedStartPrefixProvided() throws IOException { + // Case 4: prevKey provided, startPrefix provided + // Seek to the prevKey, skip it, and return remaining records matching startPrefix until limit is reached. + String prevKey = "/volb/bucketb1/fileb2"; // This key exists, will skip it + int limit = 3; + String startPrefix = "/volb/bucketb1"; // Matching prefix + + Response response = omdbInsightEndpoint.getDeletedKeyInfo(limit, prevKey, startPrefix); + assertEquals(200, response.getStatus()); + KeyInsightInfoResponse result = (KeyInsightInfoResponse) response.getEntity(); + + // Assert that we get the next 2 records that match startPrefix after skipping prevKey having fileb2 + assertEquals(3, result.getRepeatedOmKeyInfoList().size()); + assertEquals("fileb3", result.getRepeatedOmKeyInfoList().get(0).getOmKeyInfoList().get(0).getKeyName()); + } + + + /** + * Populates the OMDB with a set of deleted keys for testing purposes. + * This diagram is for reference: + * * root + * ├── volb (Total Size: 7000KB) + * │ ├── bucketb1 (Total Size: 7000KB) + * │ │ ├── fileb1 (Size: 1000KB) + * │ │ ├── fileb2 (Size: 1000KB) + * │ │ ├── fileb3 (Size: 1000KB) + * │ │ ├── fileb4 (Size: 1000KB) + * │ │ ├── fileb5 (Size: 1000KB) + * │ │ ├── dir1 (Total Size: 2000KB) + * │ │ │ ├── file1 (Size: 1000KB) + * │ │ │ └── file2 (Size: 1000KB) + * ├── volc (Total Size: 9000KB) + * │ ├── bucketc1 (Total Size: 9000KB) + * │ │ ├── dirc1 (Total Size: 4000KB) + * │ │ │ ├── filec1 (Size: 1000KB) + * │ │ │ ├── filec2 (Size: 1000KB) + * │ │ │ ├── dirc11 (Total Size: 2000KB) + * │ │ │ ├── filec11 (Size: 1000KB) + * │ │ │ └── dirc111 (Total Size: 1000KB) + * │ │ │ └── filec111 (Size: 1000KB) + * │ │ ├── dirc2 (Total Size: 5000KB) + * │ │ │ ├── filec3 (Size: 1000KB) + * │ │ │ ├── filec4 (Size: 1000KB) + * │ │ │ ├── filec5 (Size: 1000KB) + * │ │ │ ├── filec6 (Size: 1000KB) + * │ │ │ └── filec7 (Size: 1000KB) + * + * @throws Exception if an error occurs while creating deleted keys. + */ + private void populateOMDB() throws Exception { + + createDeletedKey("fileb1", "bucketb1", "volb", 1000); + createDeletedKey("fileb2", "bucketb1", "volb", 1000); + createDeletedKey("fileb3", "bucketb1", "volb", 1000); + createDeletedKey("fileb4", "bucketb1", "volb", 1000); + createDeletedKey("fileb5", "bucketb1", "volb", 1000); + + createDeletedKey("dir1/file1", "bucketb1", "volb", 1000); + createDeletedKey("dir1/file2", "bucketb1", "volb", 1000); + + createDeletedKey("dirc1/filec1", "bucketc1", "volc", 1000); + createDeletedKey("dirc1/filec2", "bucketc1", "volc", 1000); + createDeletedKey("dirc2/filec3", "bucketc1", "volc", 1000); + createDeletedKey("dirc2/filec4", "bucketc1", "volc", 1000); + createDeletedKey("dirc2/filec5", "bucketc1", "volc", 1000); + createDeletedKey("dirc2/filgetec6", "bucketc1", "volc", 1000); + createDeletedKey("dirc2/filec7", "bucketc1", "volc", 1000); + + // create nested directories and files in bucketc1 + createDeletedKey("dirc1/dirc11/filec11", "bucketc1", "volc", 1000); + createDeletedKey("dirc1/dirc11/dirc111/filec111", "bucketc1", "volc", 1000); + } + + private void createDeletedKey(String keyName, String bucketName, + String volumeName, long dataSize) throws IOException { + // Construct the deleted key path + String deletedKey = "/" + volumeName + "/" + bucketName + "/" + keyName + "/" + + UUID.randomUUID().getMostSignificantBits(); + + // Create a list to hold OmKeyInfo objects + List omKeyInfos = new ArrayList<>(); + + // Build OmKeyInfo object + OmKeyInfo omKeyInfo = new OmKeyInfo.Builder() + .setVolumeName(volumeName) + .setBucketName(bucketName) + .setKeyName(keyName) + .setDataSize(dataSize) + .setObjectID(UUID.randomUUID().getMostSignificantBits()) + .setReplicationConfig(StandaloneReplicationConfig.getInstance( + HddsProtos.ReplicationFactor.ONE)) + .build(); + + // Add the OmKeyInfo object to the list + omKeyInfos.add(omKeyInfo); + + // Create a RepeatedOmKeyInfo object with the list of OmKeyInfo + RepeatedOmKeyInfo repeatedOmKeyInfo = new RepeatedOmKeyInfo(omKeyInfos); + + // Write the deleted key information to the OM metadata manager + writeDeletedKeysToOm(reconOMMetadataManager, deletedKey, repeatedOmKeyInfo); + } + + /** + * Writes deleted key information to the Ozone Manager metadata table. + * @param omMetadataManager the Ozone Manager metadata manager + * @param deletedKey the name of the deleted key + * @param repeatedOmKeyInfo the RepeatedOmKeyInfo object containing key information + * @throws IOException if there is an error accessing the metadata table + */ + public static void writeDeletedKeysToOm(OMMetadataManager omMetadataManager, + String deletedKey, + RepeatedOmKeyInfo repeatedOmKeyInfo) throws IOException { + // Put the deleted key information into the deleted table + omMetadataManager.getDeletedTable().put(deletedKey, repeatedOmKeyInfo); + } + +} diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestEndpoints.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestEndpoints.java index c3d2fd484a5..f1dafa2c75b 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestEndpoints.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestEndpoints.java @@ -378,7 +378,6 @@ public void setUp() throws Exception { .setDatanodeDetails(datanodeDetailsProto) .setVersion("0.6.0") .setSetupTime(1596347628802L) - .setBuildDate("2020-08-01T08:50Z") .setRevision("3346f493fa1690358add7bb9f3e5b52545993f36") .build(); StorageReportProto storageReportProto1 = @@ -409,7 +408,6 @@ public void setUp() throws Exception { .setDatanodeDetails(datanodeDetailsProto2) .setVersion("0.6.0") .setSetupTime(1596347636802L) - .setBuildDate("2020-08-01T08:50Z") .setRevision("3346f493fa1690358add7bb9f3e5b52545993f36") .build(); StorageReportProto storageReportProto3 = @@ -441,7 +439,6 @@ public void setUp() throws Exception { .setDatanodeDetails(datanodeDetailsProto3) .setVersion("0.6.0") .setSetupTime(1596347628802L) - .setBuildDate("2020-08-01T08:50Z") .setRevision("3346f493fa1690358add7bb9f3e5b52545993f36") .build(); StorageReportProto storageReportProto5 = diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithFSO.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithFSO.java index 54da926601e..9cda6d6e451 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithFSO.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithFSO.java @@ -89,6 +89,7 @@ import static org.apache.hadoop.ozone.recon.ReconServerConfigKeys.OZONE_RECON_NSSUMMARY_FLUSH_TO_DB_MAX_THRESHOLD; import static org.junit.jupiter.api.Assertions.assertArrayEquals; import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertThrows; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; import static org.mockito.Mockito.anyLong; @@ -791,8 +792,9 @@ public void testConstructFullPath() throws IOException { .setParentObjectID(DIR_TWO_OBJECT_ID) .build(); // Call constructFullPath and verify the result - fullPath = ReconUtils.constructFullPath(keyInfo, - reconNamespaceSummaryManager, reconOMMetadataManager); + OmKeyInfo finalKeyInfo = keyInfo; + assertThrows(ServiceNotReadyException.class, () -> ReconUtils.constructFullPath(finalKeyInfo, + reconNamespaceSummaryManager, reconOMMetadataManager)); } @Test @@ -813,8 +815,8 @@ public void testConstructFullPathWithNegativeParentIdTriggersRebuild() throws IO .setParentObjectID(dirOneObjectId) .build(); - String result = ReconUtils.constructFullPath(keyInfo, mockSummaryManager, mockMetadataManager); - assertEquals("", result, "Expected an empty string return due to rebuild trigger"); + assertThrows(ServiceNotReadyException.class, () -> + ReconUtils.constructFullPath(keyInfo, mockSummaryManager, mockMetadataManager)); } @Test @@ -836,7 +838,8 @@ public void testLoggingWhenParentIdIsNegative() throws IOException { .setParentObjectID(1L) .build(); - ReconUtils.constructFullPath(keyInfo, mockManager, null); + assertThrows(ServiceNotReadyException.class, () -> + ReconUtils.constructFullPath(keyInfo, mockManager, null)); // Assert ArgumentCaptor logCaptor = ArgumentCaptor.forClass(String.class); diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestOmDBInsightEndPoint.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestOmDBInsightEndPoint.java index 74c58cd9d38..61a9711876e 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestOmDBInsightEndPoint.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestOmDBInsightEndPoint.java @@ -37,10 +37,11 @@ import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo; import org.apache.hadoop.ozone.recon.ReconTestInjector; import org.apache.hadoop.ozone.recon.ReconUtils; -import org.apache.hadoop.ozone.recon.api.types.KeyEntityInfo; +import org.apache.hadoop.ozone.recon.api.types.KeyEntityInfoProtoWrapper; import org.apache.hadoop.ozone.recon.api.types.KeyInsightInfoResponse; import org.apache.hadoop.ozone.recon.api.types.ListKeysResponse; import org.apache.hadoop.ozone.recon.api.types.NSSummary; +import org.apache.hadoop.ozone.recon.api.types.ResponseStatus; import org.apache.hadoop.ozone.recon.persistence.AbstractReconSqlDBTest; import org.apache.hadoop.ozone.recon.persistence.ContainerHealthSchemaManager; import org.apache.hadoop.ozone.recon.recovery.ReconOMMetadataManager; @@ -62,6 +63,7 @@ import org.junit.jupiter.api.io.TempDir; import javax.ws.rs.core.Response; +import java.io.IOException; import java.nio.file.Files; import java.nio.file.Path; import java.sql.Timestamp; @@ -88,6 +90,7 @@ import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertTrue; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; @@ -216,6 +219,7 @@ public class TestOmDBInsightEndPoint extends AbstractReconSqlDBTest { private static final long KEY_TWENTY_TWO_OBJECT_ID = 37L; private static final long KEY_TWENTY_THREE_OBJECT_ID = 38L; private static final long KEY_TWENTY_FOUR_OBJECT_ID = 39L; + private static final long KEY_TWENTY_FIVE_OBJECT_ID = 42L; private static final long EMPTY_OBS_BUCKET_OBJECT_ID = 40L; private static final long EMPTY_FSO_BUCKET_OBJECT_ID = 41L; @@ -241,6 +245,7 @@ public class TestOmDBInsightEndPoint extends AbstractReconSqlDBTest { private static final long KEY_SEVENTEEN_SIZE = 2 * OzoneConsts.KB + 1; // bin 2 private static final long KEY_EIGHTEEN_SIZE = OzoneConsts.KB + 1; // bin 1 private static final long KEY_NINETEEN_SIZE = 2 * OzoneConsts.KB + 1; // bin 2 + private static final long KEY_TWENTY_SIZE = OzoneConsts.KB + 1; // bin 1 private static final String OBS_BUCKET_PATH = "/volume1/obs-bucket"; private static final String FSO_BUCKET_PATH = "/volume1/fso-bucket"; @@ -893,11 +898,11 @@ public void testGetOpenKeyInfo() throws Exception { .get("/sampleVol/bucketOne/key_one"); assertEquals("key_one", omKeyInfo1.getKeyName()); Response openKeyInfoResp = - omdbInsightEndpoint.getOpenKeyInfo(-1, "", true, true); + omdbInsightEndpoint.getOpenKeyInfo(-1, "", "", true, true); KeyInsightInfoResponse keyInsightInfoResp = (KeyInsightInfoResponse) openKeyInfoResp.getEntity(); assertNotNull(keyInsightInfoResp); - assertEquals("key_one", + assertEquals("sampleVol/bucketOne/key_one", keyInsightInfoResp.getNonFSOKeyInfoList().get(0).getPath()); } @@ -1040,7 +1045,7 @@ public void testGetOpenKeyInfoLimitParam() throws Exception { reconOMMetadataManager.getOpenKeyTable(getBucketLayout()) .put("/sampleVol/bucketOne/key_three", omKeyInfo3); Response openKeyInfoResp = - omdbInsightEndpoint.getOpenKeyInfo(2, "", true, true); + omdbInsightEndpoint.getOpenKeyInfo(2, "", "", true, true); KeyInsightInfoResponse keyInsightInfoResp = (KeyInsightInfoResponse) openKeyInfoResp.getEntity(); assertNotNull(keyInsightInfoResp); @@ -1049,10 +1054,10 @@ public void testGetOpenKeyInfoLimitParam() throws Exception { assertEquals(0, keyInsightInfoResp.getFsoKeyInfoList().size()); assertEquals(2, keyInsightInfoResp.getFsoKeyInfoList().size() + keyInsightInfoResp.getNonFSOKeyInfoList().size()); - assertEquals("key_three", + assertEquals("sampleVol/bucketOne/key_three", keyInsightInfoResp.getNonFSOKeyInfoList().get(1).getPath()); - openKeyInfoResp = omdbInsightEndpoint.getOpenKeyInfo(3, "", true, true); + openKeyInfoResp = omdbInsightEndpoint.getOpenKeyInfo(3, "", "", true, true); keyInsightInfoResp = (KeyInsightInfoResponse) openKeyInfoResp.getEntity(); assertNotNull(keyInsightInfoResp); @@ -1061,7 +1066,7 @@ public void testGetOpenKeyInfoLimitParam() throws Exception { assertEquals(1, keyInsightInfoResp.getFsoKeyInfoList().size()); assertEquals(3, keyInsightInfoResp.getFsoKeyInfoList().size() + keyInsightInfoResp.getNonFSOKeyInfoList().size()); - assertEquals("key_three", + assertEquals("sampleVol/bucketOne/key_three", keyInsightInfoResp.getNonFSOKeyInfoList().get(1).getPath()); } @@ -1103,7 +1108,7 @@ public void testGetOpenKeyInfoWithIncludeFsoAndIncludeNonFsoParams() // CASE 1 :- Display only FSO keys in response // includeFsoKeys=true, includeNonFsoKeys=false Response openKeyInfoResp = - omdbInsightEndpoint.getOpenKeyInfo(10, "", true, false); + omdbInsightEndpoint.getOpenKeyInfo(10, "", "", true, false); KeyInsightInfoResponse keyInsightInfoResp = (KeyInsightInfoResponse) openKeyInfoResp.getEntity(); assertNotNull(keyInsightInfoResp); @@ -1115,7 +1120,7 @@ public void testGetOpenKeyInfoWithIncludeFsoAndIncludeNonFsoParams() // CASE 2 :- Display only Non-FSO keys in response // includeFsoKeys=false, includeNonFsoKeys=true openKeyInfoResp = - omdbInsightEndpoint.getOpenKeyInfo(10, "", false, true); + omdbInsightEndpoint.getOpenKeyInfo(10, "", "", false, true); keyInsightInfoResp = (KeyInsightInfoResponse) openKeyInfoResp.getEntity(); assertNotNull(keyInsightInfoResp); assertEquals(0, @@ -1126,7 +1131,7 @@ public void testGetOpenKeyInfoWithIncludeFsoAndIncludeNonFsoParams() // CASE 3 :- Display both FSO and Non-FSO keys in response // includeFsoKeys=true, includeNonFsoKeys=true openKeyInfoResp = - omdbInsightEndpoint.getOpenKeyInfo(10, "", true, true); + omdbInsightEndpoint.getOpenKeyInfo(10, "", "", true, true); keyInsightInfoResp = (KeyInsightInfoResponse) openKeyInfoResp.getEntity(); assertNotNull(keyInsightInfoResp); assertEquals(4, @@ -1137,45 +1142,39 @@ public void testGetOpenKeyInfoWithIncludeFsoAndIncludeNonFsoParams() // CASE 4 :- Don't Display both FSO and Non-FSO keys in response // includeFsoKeys=false, includeNonFsoKeys=false openKeyInfoResp = - omdbInsightEndpoint.getOpenKeyInfo(10, "", false, false); - keyInsightInfoResp = (KeyInsightInfoResponse) openKeyInfoResp.getEntity(); - assertNotNull(keyInsightInfoResp); - assertEquals(0, - keyInsightInfoResp.getFsoKeyInfoList().size()); - assertEquals(0, - keyInsightInfoResp.getNonFSOKeyInfoList().size()); + omdbInsightEndpoint.getOpenKeyInfo(10, "", "", false, false); + assertEquals(204, openKeyInfoResp.getStatus()); + String entity = (String) openKeyInfoResp.getEntity(); + assertTrue(entity.contains("No keys matched the search prefix"), + "Expected a message indicating no keys were found"); } @Test public void testGetOpenKeyInfoPrevKeyParam() throws Exception { OmKeyInfo omKeyInfo1 = - getOmKeyInfo("sampleVol", "bucketOne", "key_one", true); + getOmKeyInfo("sampleVol", "bucketOne", "key_1", true); OmKeyInfo omKeyInfo2 = - getOmKeyInfo("sampleVol", "bucketOne", "key_two", true); + getOmKeyInfo("sampleVol", "bucketOne", "key_2", true); OmKeyInfo omKeyInfo3 = - getOmKeyInfo("sampleVol", "bucketOne", "key_three", true); + getOmKeyInfo("sampleVol", "bucketOne", "key_3", true); reconOMMetadataManager.getOpenKeyTable(getBucketLayout()) - .put("/sampleVol/bucketOne/key_one", omKeyInfo1); + .put("/sampleVol/bucketOne/key_1", omKeyInfo1); reconOMMetadataManager.getOpenKeyTable(BucketLayout.FILE_SYSTEM_OPTIMIZED) - .put("/sampleVol/bucketOne/key_two", omKeyInfo2); + .put("/sampleVol/bucketOne/key_2", omKeyInfo2); reconOMMetadataManager.getOpenKeyTable(getBucketLayout()) - .put("/sampleVol/bucketOne/key_three", omKeyInfo3); + .put("/sampleVol/bucketOne/key_3", omKeyInfo3); Response openKeyInfoResp = - omdbInsightEndpoint.getOpenKeyInfo(-1, "/sampleVol/bucketOne/key_one", + omdbInsightEndpoint.getOpenKeyInfo(-1, "/sampleVol/bucketOne/key_1", "", true, true); KeyInsightInfoResponse keyInsightInfoResp = (KeyInsightInfoResponse) openKeyInfoResp.getEntity(); assertNotNull(keyInsightInfoResp); - assertEquals(1, - keyInsightInfoResp.getNonFSOKeyInfoList().size()); + assertEquals(1, keyInsightInfoResp.getNonFSOKeyInfoList().size()); assertEquals(1, keyInsightInfoResp.getFsoKeyInfoList().size()); - assertEquals(2, keyInsightInfoResp.getFsoKeyInfoList().size() + - keyInsightInfoResp.getNonFSOKeyInfoList().size()); - assertEquals("key_three", - keyInsightInfoResp.getNonFSOKeyInfoList().get(0).getPath()); - assertEquals("key_two", - keyInsightInfoResp.getFsoKeyInfoList().get(0).getPath()); + assertEquals(2, keyInsightInfoResp.getFsoKeyInfoList().size() + keyInsightInfoResp.getNonFSOKeyInfoList().size()); + assertEquals("sampleVol/bucketOne/key_3", keyInsightInfoResp.getNonFSOKeyInfoList().get(0).getPath()); + assertEquals("sampleVol/bucketOne/key_2", keyInsightInfoResp.getFsoKeyInfoList().get(0).getPath()); } @Test @@ -1212,7 +1211,7 @@ public void testGetDeletedKeyInfoLimitParam() throws Exception { reconOMMetadataManager.getDeletedTable() .put("/sampleVol/bucketOne/key_three", repeatedOmKeyInfo3); - Response deletedKeyInfo = omdbInsightEndpoint.getDeletedKeyInfo(2, ""); + Response deletedKeyInfo = omdbInsightEndpoint.getDeletedKeyInfo(2, "", ""); KeyInsightInfoResponse keyInsightInfoResp = (KeyInsightInfoResponse) deletedKeyInfo.getEntity(); assertNotNull(keyInsightInfoResp); @@ -1244,7 +1243,7 @@ public void testGetDeletedKeyInfoPrevKeyParam() throws Exception { .put("/sampleVol/bucketOne/key_three", repeatedOmKeyInfo3); Response deletedKeyInfo = omdbInsightEndpoint.getDeletedKeyInfo(2, - "/sampleVol/bucketOne/key_one"); + "/sampleVol/bucketOne/key_one", ""); KeyInsightInfoResponse keyInsightInfoResp = (KeyInsightInfoResponse) deletedKeyInfo.getEntity(); assertNotNull(keyInsightInfoResp); @@ -1278,7 +1277,7 @@ public void testGetDeletedKeyInfo() throws Exception { .get("/sampleVol/bucketOne/key_one"); assertEquals("key_one", repeatedOmKeyInfo1.getOmKeyInfoList().get(0).getKeyName()); - Response deletedKeyInfo = omdbInsightEndpoint.getDeletedKeyInfo(-1, ""); + Response deletedKeyInfo = omdbInsightEndpoint.getDeletedKeyInfo(-1, "", ""); KeyInsightInfoResponse keyInsightInfoResp = (KeyInsightInfoResponse) deletedKeyInfo.getEntity(); assertNotNull(keyInsightInfoResp); @@ -1287,6 +1286,128 @@ public void testGetDeletedKeyInfo() throws Exception { .get(0).getKeyName()); } + @Test + public void testGetDeletedKeysWithPrevKeyProvidedAndStartPrefixEmpty() + throws Exception { + // Prepare mock data in the deletedTable. + for (int i = 1; i <= 10; i++) { + OmKeyInfo omKeyInfo = + getOmKeyInfo("sampleVol", "bucketOne", "deleted_key_" + i, true); + reconOMMetadataManager.getDeletedTable() + .put("/sampleVol/bucketOne/deleted_key_" + i, + new RepeatedOmKeyInfo(omKeyInfo)); + } + + // Case 1: prevKey provided, startPrefix empty + Response deletedKeyInfoResponse = omdbInsightEndpoint.getDeletedKeyInfo(5, + "/sampleVol/bucketOne/deleted_key_3", ""); + KeyInsightInfoResponse keyInsightInfoResp = + (KeyInsightInfoResponse) deletedKeyInfoResponse.getEntity(); + + // Validate that the response skips the prevKey and returns subsequent records. + assertNotNull(keyInsightInfoResp); + assertEquals(5, keyInsightInfoResp.getRepeatedOmKeyInfoList().size()); + assertEquals("deleted_key_4", + keyInsightInfoResp.getRepeatedOmKeyInfoList().get(0).getOmKeyInfoList().get(0).getKeyName()); + assertEquals("deleted_key_8", + keyInsightInfoResp.getRepeatedOmKeyInfoList().get(4).getOmKeyInfoList().get(0).getKeyName()); + } + + @Test + public void testGetDeletedKeysWithPrevKeyEmptyAndStartPrefixEmpty() + throws Exception { + // Prepare mock data in the deletedTable. + for (int i = 1; i < 10; i++) { + OmKeyInfo omKeyInfo = + getOmKeyInfo("sampleVol", "bucketOne", "deleted_key_" + i, true); + reconOMMetadataManager.getDeletedTable() + .put("/sampleVol/bucketOne/deleted_key_" + i, new RepeatedOmKeyInfo(omKeyInfo)); + } + + // Case 2: prevKey empty, startPrefix empty + Response deletedKeyInfoResponse = + omdbInsightEndpoint.getDeletedKeyInfo(5, "", ""); + KeyInsightInfoResponse keyInsightInfoResp = + (KeyInsightInfoResponse) deletedKeyInfoResponse.getEntity(); + + // Validate that the response retrieves from the beginning. + assertNotNull(keyInsightInfoResp); + assertEquals(5, keyInsightInfoResp.getRepeatedOmKeyInfoList().size()); + assertEquals("deleted_key_1", + keyInsightInfoResp.getRepeatedOmKeyInfoList().get(0).getOmKeyInfoList().get(0).getKeyName()); + assertEquals("deleted_key_5", + keyInsightInfoResp.getRepeatedOmKeyInfoList().get(4).getOmKeyInfoList().get(0).getKeyName()); + } + + @Test + public void testGetDeletedKeysWithStartPrefixProvidedAndPrevKeyEmpty() + throws Exception { + // Prepare mock data in the deletedTable. + for (int i = 1; i < 5; i++) { + OmKeyInfo omKeyInfo = + getOmKeyInfo("sampleVol", "bucketOne", "deleted_key_" + i, true); + reconOMMetadataManager.getDeletedTable() + .put("/sampleVol/bucketOne/deleted_key_" + i, new RepeatedOmKeyInfo(omKeyInfo)); + } + for (int i = 5; i < 10; i++) { + OmKeyInfo omKeyInfo = + getOmKeyInfo("sampleVol", "bucketTwo", "deleted_key_" + i, true); + reconOMMetadataManager.getDeletedTable() + .put("/sampleVol/bucketTwo/deleted_key_" + i, new RepeatedOmKeyInfo(omKeyInfo)); + } + + // Case 3: startPrefix provided, prevKey empty + Response deletedKeyInfoResponse = + omdbInsightEndpoint.getDeletedKeyInfo(5, "", + "/sampleVol/bucketOne/"); + KeyInsightInfoResponse keyInsightInfoResp = + (KeyInsightInfoResponse) deletedKeyInfoResponse.getEntity(); + + // Validate that the response retrieves starting from the prefix. + assertNotNull(keyInsightInfoResp); + assertEquals(4, keyInsightInfoResp.getRepeatedOmKeyInfoList().size()); + assertEquals("deleted_key_1", + keyInsightInfoResp.getRepeatedOmKeyInfoList().get(0).getOmKeyInfoList().get(0).getKeyName()); + assertEquals("deleted_key_4", + keyInsightInfoResp.getRepeatedOmKeyInfoList().get(3).getOmKeyInfoList().get(0).getKeyName()); + } + + @Test + public void testGetDeletedKeysWithBothPrevKeyAndStartPrefixProvided() + throws IOException { + // Prepare mock data in the deletedTable. + for (int i = 1; i < 10; i++) { + OmKeyInfo omKeyInfo = + getOmKeyInfo("sampleVol", "bucketOne", "deleted_key_" + i, true); + reconOMMetadataManager.getDeletedTable() + .put("/sampleVol/bucketOne/deleted_key_" + i, new RepeatedOmKeyInfo(omKeyInfo)); + } + for (int i = 10; i < 15; i++) { + OmKeyInfo omKeyInfo = + getOmKeyInfo("sampleVol", "bucketTwo", "deleted_key_" + i, true); + reconOMMetadataManager.getDeletedTable() + .put("/sampleVol/bucketTwo/deleted_key_" + i, new RepeatedOmKeyInfo(omKeyInfo)); + } + + // Case 4: startPrefix and prevKey provided + Response deletedKeyInfoResponse = + omdbInsightEndpoint.getDeletedKeyInfo(5, + "/sampleVol/bucketOne/deleted_key_5", + "/sampleVol/bucketOne/"); + + KeyInsightInfoResponse keyInsightInfoResp = + (KeyInsightInfoResponse) deletedKeyInfoResponse.getEntity(); + + // Validate that the response retrieves starting from the prefix and skips the prevKey. + assertNotNull(keyInsightInfoResp); + assertEquals(4, keyInsightInfoResp.getRepeatedOmKeyInfoList().size()); + assertEquals("deleted_key_6", + keyInsightInfoResp.getRepeatedOmKeyInfoList().get(0).getOmKeyInfoList().get(0).getKeyName()); + assertEquals("deleted_key_9", + keyInsightInfoResp.getRepeatedOmKeyInfoList().get(3).getOmKeyInfoList().get(0).getKeyName()); + } + + private OmKeyInfo getOmKeyInfo(String volumeName, String bucketName, String keyName, boolean isFile) { return new OmKeyInfo.Builder() @@ -1456,7 +1577,7 @@ public void testListKeysFSOBucket() { "", 1000); ListKeysResponse listKeysResponse = (ListKeysResponse) bucketResponse.getEntity(); assertEquals(6, listKeysResponse.getKeys().size()); - KeyEntityInfo keyEntityInfo = listKeysResponse.getKeys().get(0); + KeyEntityInfoProtoWrapper keyEntityInfo = listKeysResponse.getKeys().get(0); assertEquals("volume1/fso-bucket/dir1/file1", keyEntityInfo.getPath()); assertEquals("/1/10/11/file1", keyEntityInfo.getKey()); assertEquals("/1/10/13/testfile", listKeysResponse.getLastKey()); @@ -1488,7 +1609,7 @@ public void testListKeysFSOBucketWithLimitAndPagination() { "", 2); ListKeysResponse listKeysResponse = (ListKeysResponse) bucketResponse.getEntity(); assertEquals(2, listKeysResponse.getKeys().size()); - KeyEntityInfo keyEntityInfo = listKeysResponse.getKeys().get(0); + KeyEntityInfoProtoWrapper keyEntityInfo = listKeysResponse.getKeys().get(0); assertEquals("volume1/fso-bucket/dir1/file1", keyEntityInfo.getPath()); assertEquals("/1/10/11/testfile", listKeysResponse.getLastKey()); assertEquals("RATIS", keyEntityInfo.getReplicationConfig().getReplicationType().toString()); @@ -1530,7 +1651,7 @@ public void testListKeysFSOBucketDirOnePathWithLimitTwoAndPagination() { "", 2); ListKeysResponse listKeysResponse = (ListKeysResponse) bucketResponse.getEntity(); assertEquals(2, listKeysResponse.getKeys().size()); - KeyEntityInfo keyEntityInfo = listKeysResponse.getKeys().get(0); + KeyEntityInfoProtoWrapper keyEntityInfo = listKeysResponse.getKeys().get(0); assertEquals("volume1/fso-bucket/dir1/file1", keyEntityInfo.getPath()); assertEquals("/1/10/11/testfile", listKeysResponse.getLastKey()); assertEquals("RATIS", keyEntityInfo.getReplicationConfig().getReplicationType().toString()); @@ -1572,7 +1693,7 @@ public void testListKeysFSOBucketDirOnePathWithLimitOneAndPagination() { "", 1); ListKeysResponse listKeysResponse = (ListKeysResponse) bucketResponse.getEntity(); assertEquals(1, listKeysResponse.getKeys().size()); - KeyEntityInfo keyEntityInfo = listKeysResponse.getKeys().get(0); + KeyEntityInfoProtoWrapper keyEntityInfo = listKeysResponse.getKeys().get(0); assertEquals("volume1/fso-bucket/dir1/file1", keyEntityInfo.getPath()); assertEquals("/1/10/11/file1", listKeysResponse.getLastKey()); assertEquals("RATIS", keyEntityInfo.getReplicationConfig().getReplicationType().toString()); @@ -1623,7 +1744,7 @@ public void testListKeysFSOBucketTwoPathWithLimitAcrossDirsAtBucketLevel() { "", 3); ListKeysResponse listKeysResponse = (ListKeysResponse) bucketResponse.getEntity(); assertEquals(3, listKeysResponse.getKeys().size()); - KeyEntityInfo keyEntityInfo = listKeysResponse.getKeys().get(0); + KeyEntityInfoProtoWrapper keyEntityInfo = listKeysResponse.getKeys().get(0); assertEquals("volume1/fso-bucket2/dir8/file1", keyEntityInfo.getPath()); assertEquals("/1/30/32/file1", listKeysResponse.getLastKey()); assertEquals("RATIS", keyEntityInfo.getReplicationConfig().getReplicationType().toString()); @@ -1656,7 +1777,7 @@ public void testListKeysFSOBucketDirTwoPathWithLimitAndPagination() { "", 2); ListKeysResponse listKeysResponse = (ListKeysResponse) bucketResponse.getEntity(); assertEquals(2, listKeysResponse.getKeys().size()); - KeyEntityInfo keyEntityInfo = listKeysResponse.getKeys().get(0); + KeyEntityInfoProtoWrapper keyEntityInfo = listKeysResponse.getKeys().get(0); assertEquals("volume1/fso-bucket/dir1/dir2/file1", keyEntityInfo.getPath()); assertEquals("/1/10/12/testfile", listKeysResponse.getLastKey()); assertEquals("RATIS", keyEntityInfo.getReplicationConfig().getReplicationType().toString()); @@ -1689,7 +1810,7 @@ public void testListKeysFSOBucketDirThreePathWithLimitAndPagination() { "", 2); ListKeysResponse listKeysResponse = (ListKeysResponse) bucketResponse.getEntity(); assertEquals(2, listKeysResponse.getKeys().size()); - KeyEntityInfo keyEntityInfo = listKeysResponse.getKeys().get(0); + KeyEntityInfoProtoWrapper keyEntityInfo = listKeysResponse.getKeys().get(0); assertEquals("volume1/fso-bucket/dir1/dir2/dir3/file1", keyEntityInfo.getPath()); assertEquals("/1/10/13/testfile", listKeysResponse.getLastKey()); assertEquals("RATIS", keyEntityInfo.getReplicationConfig().getReplicationType().toString()); @@ -1776,7 +1897,7 @@ public void testListKeysOBSBucketWithLimitAndPagination() throws Exception { "", 2); ListKeysResponse listKeysResponse = (ListKeysResponse) bucketResponse.getEntity(); assertEquals(2, listKeysResponse.getKeys().size()); - KeyEntityInfo keyEntityInfo = listKeysResponse.getKeys().get(0); + KeyEntityInfoProtoWrapper keyEntityInfo = listKeysResponse.getKeys().get(0); assertEquals("volume1/obs-bucket/key1", keyEntityInfo.getPath()); assertEquals("/volume1/obs-bucket/key1/key2", listKeysResponse.getLastKey()); assertEquals("RATIS", keyEntityInfo.getReplicationConfig().getReplicationType().toString()); @@ -1817,6 +1938,18 @@ public void testListKeysForEmptyOBSBucket() { assertEquals("", listKeysResponse.getLastKey()); } + @Test + public void testListKeysWhenNSSummaryNotInitialized() throws Exception { + reconNamespaceSummaryManager.clearNSSummaryTable(); + // bucket level DU + Response bucketResponse = + omdbInsightEndpoint.listKeys("RATIS", "", 0, FSO_BUCKET_TWO_PATH, + "", 1000); + ListKeysResponse listKeysResponse = (ListKeysResponse) bucketResponse.getEntity(); + assertEquals(ResponseStatus.INITIALIZING, listKeysResponse.getStatus()); + assertEquals(Response.Status.SERVICE_UNAVAILABLE.getStatusCode(), bucketResponse.getStatus()); + } + @Test public void testListKeysForEmptyFSOBucket() { Response bucketResponse = omdbInsightEndpoint.listKeys("RATIS", "", 0, EMPTY_FSO_BUCKET_PATH, diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestOpenContainerCount.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestOpenContainerCount.java index f64d93707a2..e320c19069e 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestOpenContainerCount.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestOpenContainerCount.java @@ -300,7 +300,6 @@ public void setUp() throws Exception { .setDatanodeDetails(datanodeDetailsProto) .setVersion("0.6.0") .setSetupTime(1596347628802L) - .setBuildDate("2020-08-01T08:50Z") .setRevision("3346f493fa1690358add7bb9f3e5b52545993f36") .build(); diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestOMDBInsightSearchEndpoint.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestOpenKeysSearchEndpoint.java similarity index 81% rename from hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestOMDBInsightSearchEndpoint.java rename to hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestOpenKeysSearchEndpoint.java index ab16f349af2..f55d988cfe0 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestOMDBInsightSearchEndpoint.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestOpenKeysSearchEndpoint.java @@ -81,12 +81,12 @@ * 11. Test Search Open Keys with Pagination: Verifies paginated search results. * 12. Test Search in Empty Bucket: Checks the response for searching within an empty bucket. */ -public class TestOMDBInsightSearchEndpoint extends AbstractReconSqlDBTest { +public class TestOpenKeysSearchEndpoint extends AbstractReconSqlDBTest { @TempDir private Path temporaryFolder; private ReconOMMetadataManager reconOMMetadataManager; - private OMDBInsightSearchEndpoint omdbInsightSearchEndpoint; + private OMDBInsightEndpoint omdbInsightEndpoint; private OzoneConfiguration ozoneConfiguration; private static final String ROOT_PATH = "/"; private static final String TEST_USER = "TestUser"; @@ -97,11 +97,9 @@ public class TestOMDBInsightSearchEndpoint extends AbstractReconSqlDBTest { @BeforeEach public void setUp() throws Exception { ozoneConfiguration = new OzoneConfiguration(); - ozoneConfiguration.setLong(OZONE_RECON_NSSUMMARY_FLUSH_TO_DB_MAX_THRESHOLD, - 100); + ozoneConfiguration.setLong(OZONE_RECON_NSSUMMARY_FLUSH_TO_DB_MAX_THRESHOLD, 100); omMetadataManager = initializeNewOmMetadataManager( - Files.createDirectory(temporaryFolder.resolve("JunitOmDBDir")) - .toFile()); + Files.createDirectory(temporaryFolder.resolve("JunitOmDBDir")).toFile()); reconOMMetadataManager = getTestReconOmMetadataManager(omMetadataManager, Files.createDirectory(temporaryFolder.resolve("OmMetataDir")).toFile()); @@ -118,11 +116,8 @@ public void setUp() throws Exception { .addBinding(OMDBInsightEndpoint.class) .addBinding(ContainerHealthSchemaManager.class) .build(); - reconNamespaceSummaryManager = - reconTestInjector.getInstance(ReconNamespaceSummaryManager.class); - omdbInsightSearchEndpoint = reconTestInjector.getInstance( - OMDBInsightSearchEndpoint.class); - + reconNamespaceSummaryManager = reconTestInjector.getInstance(ReconNamespaceSummaryManager.class); + omdbInsightEndpoint = reconTestInjector.getInstance(OMDBInsightEndpoint.class); // populate OM DB and reprocess into Recon RocksDB populateOMDB(); NSSummaryTaskWithFSO nSSummaryTaskWithFso = @@ -152,26 +147,19 @@ private static OMMetadataManager initializeNewOmMetadataManager( public void testRootLevelSearchRestriction() throws IOException { // Test with root level path String rootPath = "/"; - Response response = omdbInsightSearchEndpoint.searchOpenKeys(rootPath, 20, ""); + Response response = + omdbInsightEndpoint.getOpenKeyInfo(-1, "", rootPath, true, true); assertEquals(Response.Status.BAD_REQUEST.getStatusCode(), response.getStatus()); String entity = (String) response.getEntity(); assertTrue(entity.contains("Invalid startPrefix: Path must be at the bucket level or deeper"), "Expected a message indicating the path must be at the bucket level or deeper"); - - // Test with root level path without trailing slash - rootPath = ""; - response = omdbInsightSearchEndpoint.searchOpenKeys(rootPath, 20, ""); - assertEquals(Response.Status.BAD_REQUEST.getStatusCode(), response.getStatus()); - entity = (String) response.getEntity(); - assertTrue(entity.contains("Invalid startPrefix: Path must be at the bucket level or deeper"), - "Expected a message indicating the path must be at the bucket level or deeper"); } @Test public void testVolumeLevelSearchRestriction() throws IOException { // Test with volume level path String volumePath = "/vola"; - Response response = omdbInsightSearchEndpoint.searchOpenKeys(volumePath, 20, ""); + Response response = omdbInsightEndpoint.getOpenKeyInfo(20, "", volumePath, true, true); assertEquals(Response.Status.BAD_REQUEST.getStatusCode(), response.getStatus()); String entity = (String) response.getEntity(); assertTrue(entity.contains("Invalid startPrefix: Path must be at the bucket level or deeper"), @@ -179,7 +167,7 @@ public void testVolumeLevelSearchRestriction() throws IOException { // Test with another volume level path volumePath = "/volb"; - response = omdbInsightSearchEndpoint.searchOpenKeys(volumePath, 20, ""); + response = omdbInsightEndpoint.getOpenKeyInfo(20, "", volumePath, true, true); assertEquals(Response.Status.BAD_REQUEST.getStatusCode(), response.getStatus()); entity = (String) response.getEntity(); assertTrue(entity.contains("Invalid startPrefix: Path must be at the bucket level or deeper"), @@ -190,7 +178,7 @@ public void testVolumeLevelSearchRestriction() throws IOException { public void testBucketLevelSearch() throws IOException { // Search inside FSO bucket Response response = - omdbInsightSearchEndpoint.searchOpenKeys("/vola/bucketa1", 20, ""); + omdbInsightEndpoint.getOpenKeyInfo(20, "", "/vola/bucketa1", true, true); assertEquals(200, response.getStatus()); KeyInsightInfoResponse result = (KeyInsightInfoResponse) response.getEntity(); @@ -202,7 +190,7 @@ public void testBucketLevelSearch() throws IOException { // Search inside OBS bucket response = - omdbInsightSearchEndpoint.searchOpenKeys("/volb/bucketb1", 20, ""); + omdbInsightEndpoint.getOpenKeyInfo(20, "", "/volb/bucketb1", true, true); assertEquals(200, response.getStatus()); result = (KeyInsightInfoResponse) response.getEntity(); @@ -214,14 +202,14 @@ public void testBucketLevelSearch() throws IOException { // Search Inside LEGACY bucket response = - omdbInsightSearchEndpoint.searchOpenKeys("/volc/bucketc1", 20, ""); + omdbInsightEndpoint.getOpenKeyInfo(20, "", "/volc/bucketc1", true, true); assertEquals(200, response.getStatus()); result = (KeyInsightInfoResponse) response.getEntity(); assertEquals(7, result.getNonFSOKeyInfoList().size()); // Test with bucket that does not exist - response = omdbInsightSearchEndpoint.searchOpenKeys("/vola/nonexistentbucket", 20, ""); - assertEquals(Response.Status.NOT_FOUND.getStatusCode(), response.getStatus()); + response = omdbInsightEndpoint.getOpenKeyInfo(20, "", "/vola/nonexistentbucket", true, true); + assertEquals(Response.Status.NO_CONTENT.getStatusCode(), response.getStatus()); String entity = (String) response.getEntity(); assertTrue(entity.contains("No keys matched the search prefix"), "Expected a message indicating no keys were found"); @@ -230,7 +218,7 @@ public void testBucketLevelSearch() throws IOException { @Test public void testDirectoryLevelSearch() throws IOException { Response response = - omdbInsightSearchEndpoint.searchOpenKeys("/vola/bucketa1/dira1", 20, ""); + omdbInsightEndpoint.getOpenKeyInfo(20, "", "/vola/bucketa1/dira1", true, true); assertEquals(200, response.getStatus()); KeyInsightInfoResponse result = (KeyInsightInfoResponse) response.getEntity(); @@ -241,7 +229,7 @@ public void testDirectoryLevelSearch() throws IOException { assertEquals(1000 * 3, result.getReplicatedDataSize()); response = - omdbInsightSearchEndpoint.searchOpenKeys("/vola/bucketa1/dira2", 20, ""); + omdbInsightEndpoint.getOpenKeyInfo(20, "", "/vola/bucketa1/dira2", true, true); assertEquals(200, response.getStatus()); result = (KeyInsightInfoResponse) response.getEntity(); @@ -252,7 +240,7 @@ public void testDirectoryLevelSearch() throws IOException { assertEquals(1000 * 3, result.getReplicatedDataSize()); response = - omdbInsightSearchEndpoint.searchOpenKeys("/vola/bucketa1/dira3", 20, ""); + omdbInsightEndpoint.getOpenKeyInfo(20, "", "/vola/bucketa1/dira3", true, true); assertEquals(200, response.getStatus()); result = (KeyInsightInfoResponse) response.getEntity(); @@ -263,8 +251,8 @@ public void testDirectoryLevelSearch() throws IOException { assertEquals(10000 * 3, result.getReplicatedDataSize()); // Test with non-existent directory - response = omdbInsightSearchEndpoint.searchOpenKeys("/vola/bucketa1/nonexistentdir", 20, ""); - assertEquals(Response.Status.NOT_FOUND.getStatusCode(), response.getStatus()); + response = omdbInsightEndpoint.getOpenKeyInfo(20, "", "/vola/bucketa1/nonexistentdir", true, true); + assertEquals(Response.Status.NO_CONTENT.getStatusCode(), response.getStatus()); String entity = (String) response.getEntity(); assertTrue(entity.contains("No keys matched the search prefix"), "Expected a message indicating no keys were found"); @@ -273,7 +261,7 @@ public void testDirectoryLevelSearch() throws IOException { @Test public void testKeyLevelSearch() throws IOException { // FSO Bucket key-level search - Response response = omdbInsightSearchEndpoint.searchOpenKeys("/vola/bucketa1/filea1", 10, ""); + Response response = omdbInsightEndpoint.getOpenKeyInfo(10, "", "/vola/bucketa1/filea1", true, true); assertEquals(200, response.getStatus()); KeyInsightInfoResponse result = (KeyInsightInfoResponse) response.getEntity(); assertEquals(1, result.getFsoKeyInfoList().size()); @@ -282,7 +270,7 @@ public void testKeyLevelSearch() throws IOException { assertEquals(1000, result.getUnreplicatedDataSize()); assertEquals(1000 * 3, result.getReplicatedDataSize()); - response = omdbInsightSearchEndpoint.searchOpenKeys("/vola/bucketa1/filea2", 10, ""); + response = omdbInsightEndpoint.getOpenKeyInfo(10, "", "/vola/bucketa1/filea2", true, true); assertEquals(200, response.getStatus()); result = (KeyInsightInfoResponse) response.getEntity(); assertEquals(1, result.getFsoKeyInfoList().size()); @@ -292,7 +280,8 @@ public void testKeyLevelSearch() throws IOException { assertEquals(1000 * 3, result.getReplicatedDataSize()); // OBS Bucket key-level search - response = omdbInsightSearchEndpoint.searchOpenKeys("/volb/bucketb1/fileb1", 10, ""); + response = omdbInsightEndpoint + .getOpenKeyInfo(10, "", "/volb/bucketb1/fileb1", true, true); assertEquals(200, response.getStatus()); result = (KeyInsightInfoResponse) response.getEntity(); assertEquals(0, result.getFsoKeyInfoList().size()); @@ -301,7 +290,8 @@ public void testKeyLevelSearch() throws IOException { assertEquals(1000, result.getUnreplicatedDataSize()); assertEquals(1000 * 3, result.getReplicatedDataSize()); - response = omdbInsightSearchEndpoint.searchOpenKeys("/volb/bucketb1/fileb2", 10, ""); + response = omdbInsightEndpoint + .getOpenKeyInfo(10, "", "/volb/bucketb1/fileb2", true, true); assertEquals(200, response.getStatus()); result = (KeyInsightInfoResponse) response.getEntity(); assertEquals(0, result.getFsoKeyInfoList().size()); @@ -311,14 +301,16 @@ public void testKeyLevelSearch() throws IOException { assertEquals(1000 * 3, result.getReplicatedDataSize()); // Test with non-existent key - response = omdbInsightSearchEndpoint.searchOpenKeys("/vola/bucketa1/nonexistentfile", 1, ""); - assertEquals(Response.Status.NOT_FOUND.getStatusCode(), response.getStatus()); + response = omdbInsightEndpoint + .getOpenKeyInfo(10, "", "/volb/bucketb1/nonexistentfile", true, true); + assertEquals(Response.Status.NO_CONTENT.getStatusCode(), response.getStatus()); String entity = (String) response.getEntity(); assertTrue(entity.contains("No keys matched the search prefix"), "Expected a message indicating no keys were found"); - response = omdbInsightSearchEndpoint.searchOpenKeys("/volb/bucketb1/nonexistentfile", 1, ""); - assertEquals(Response.Status.NOT_FOUND.getStatusCode(), response.getStatus()); + response = omdbInsightEndpoint + .getOpenKeyInfo(10, "", "/volb/bucketb1/nonexistentfile", true, true); + assertEquals(Response.Status.NO_CONTENT.getStatusCode(), response.getStatus()); entity = (String) response.getEntity(); assertTrue(entity.contains("No keys matched the search prefix"), "Expected a message indicating no keys were found"); @@ -328,30 +320,32 @@ public void testKeyLevelSearch() throws IOException { @Test public void testKeyLevelSearchUnderDirectory() throws IOException { // FSO Bucket key-level search - Response response = - omdbInsightSearchEndpoint.searchOpenKeys("/vola/bucketa1/dira1/innerfile", 10, ""); + Response response = omdbInsightEndpoint + .getOpenKeyInfo(10, "", "/vola/bucketa1/dira1/innerfile", true, true); assertEquals(200, response.getStatus()); KeyInsightInfoResponse result = (KeyInsightInfoResponse) response.getEntity(); assertEquals(1, result.getFsoKeyInfoList().size()); assertEquals(0, result.getNonFSOKeyInfoList().size()); - response = - omdbInsightSearchEndpoint.searchOpenKeys("/vola/bucketa1/dira2/innerfile", 10, ""); + response = omdbInsightEndpoint + .getOpenKeyInfo(10, "", "/vola/bucketa1/dira2/innerfile", true, true); assertEquals(200, response.getStatus()); result = (KeyInsightInfoResponse) response.getEntity(); assertEquals(1, result.getFsoKeyInfoList().size()); assertEquals(0, result.getNonFSOKeyInfoList().size()); // Test for unknown file in fso bucket - response = omdbInsightSearchEndpoint.searchOpenKeys("/vola/bucketa1/dira1/unknownfile", 10, ""); - assertEquals(Response.Status.NOT_FOUND.getStatusCode(), response.getStatus()); + response = omdbInsightEndpoint + .getOpenKeyInfo(10, "", "/vola/bucketa1/dira1/unknownfile", true, true); + assertEquals(Response.Status.NO_CONTENT.getStatusCode(), response.getStatus()); String entity = (String) response.getEntity(); assertTrue(entity.contains("No keys matched the search prefix"), "Expected a message indicating no keys were found"); // Test for unknown file in fso bucket - response = omdbInsightSearchEndpoint.searchOpenKeys("/vola/bucketa1/dira2/unknownfile", 10, ""); - assertEquals(Response.Status.NOT_FOUND.getStatusCode(), response.getStatus()); + response = omdbInsightEndpoint + .getOpenKeyInfo(10, "", "/vola/bucketa1/dira2/unknownfile", true, true); + assertEquals(Response.Status.NO_CONTENT.getStatusCode(), response.getStatus()); entity = (String) response.getEntity(); assertTrue(entity.contains("No keys matched the search prefix"), "Expected a message indicating no keys were found"); @@ -360,56 +354,56 @@ public void testKeyLevelSearchUnderDirectory() throws IOException { @Test public void testSearchUnderNestedDirectory() throws IOException { - Response response = omdbInsightSearchEndpoint.searchOpenKeys("/vola/bucketa1/dira3", 20, - ""); + Response response = omdbInsightEndpoint + .getOpenKeyInfo(20, "", "/vola/bucketa1/dira3", true, true); assertEquals(200, response.getStatus()); KeyInsightInfoResponse result = (KeyInsightInfoResponse) response.getEntity(); assertEquals(10, result.getFsoKeyInfoList().size()); assertEquals(0, result.getNonFSOKeyInfoList().size()); // Search under dira31 - response = omdbInsightSearchEndpoint.searchOpenKeys("/vola/bucketa1/dira3/dira31", - 20, ""); + response = omdbInsightEndpoint + .getOpenKeyInfo(20, "", "/vola/bucketa1/dira3/dira31", true, true); assertEquals(200, response.getStatus()); result = (KeyInsightInfoResponse) response.getEntity(); assertEquals(6, result.getFsoKeyInfoList().size()); assertEquals(0, result.getNonFSOKeyInfoList().size()); // Search under dira32 - response = omdbInsightSearchEndpoint.searchOpenKeys( - "/vola/bucketa1/dira3/dira31/dira32", 20, ""); + response = omdbInsightEndpoint + .getOpenKeyInfo(20, "", "/vola/bucketa1/dira3/dira31/dira32", true, true); assertEquals(200, response.getStatus()); result = (KeyInsightInfoResponse) response.getEntity(); assertEquals(3, result.getFsoKeyInfoList().size()); assertEquals(0, result.getNonFSOKeyInfoList().size()); // Search under dira33 - response = omdbInsightSearchEndpoint.searchOpenKeys( - "/vola/bucketa1/dira3/dira31/dira32/dira33", 20, ""); + response = omdbInsightEndpoint + .getOpenKeyInfo(20, "", "/vola/bucketa1/dira3/dira31/dira32/dira33", true, true); assertEquals(200, response.getStatus()); result = (KeyInsightInfoResponse) response.getEntity(); assertEquals(1, result.getFsoKeyInfoList().size()); assertEquals(0, result.getNonFSOKeyInfoList().size()); // Search for the exact file under dira33 - response = omdbInsightSearchEndpoint.searchOpenKeys( - "/vola/bucketa1/dira3/dira31/dira32/dira33/file33_1", 20, ""); + response = omdbInsightEndpoint + .getOpenKeyInfo(20, "", "/vola/bucketa1/dira3/dira31/dira32/dira33/file33_1", true, true); assertEquals(200, response.getStatus()); result = (KeyInsightInfoResponse) response.getEntity(); assertEquals(1, result.getFsoKeyInfoList().size()); assertEquals(0, result.getNonFSOKeyInfoList().size()); // Search for a non existant file under each nested directory - response = omdbInsightSearchEndpoint.searchOpenKeys( - "/vola/bucketa1/dira3/dira31/dira32/dira33/nonexistentfile", 20, ""); - assertEquals(Response.Status.NOT_FOUND.getStatusCode(), response.getStatus()); + response = omdbInsightEndpoint + .getOpenKeyInfo(20, "", "/vola/bucketa1/dira3/dira31/dira32/dira33/nonexistentfile", true, true); + assertEquals(Response.Status.NO_CONTENT.getStatusCode(), response.getStatus()); String entity = (String) response.getEntity(); assertTrue(entity.contains("No keys matched the search prefix"), "Expected a message indicating no keys were found"); - response = omdbInsightSearchEndpoint.searchOpenKeys( - "/vola/bucketa1/dira3/dira31/dira32/nonexistentfile", 20, ""); - assertEquals(Response.Status.NOT_FOUND.getStatusCode(), response.getStatus()); + response = omdbInsightEndpoint + .getOpenKeyInfo(20, "", "/vola/bucketa1/dira3/dira31/dira32/nonexistentfile", true, true); + assertEquals(Response.Status.NO_CONTENT.getStatusCode(), response.getStatus()); entity = (String) response.getEntity(); assertTrue(entity.contains("No keys matched the search prefix"), "Expected a message indicating no keys were found"); @@ -418,7 +412,7 @@ public void testSearchUnderNestedDirectory() throws IOException { @Test public void testLimitSearch() throws IOException { Response response = - omdbInsightSearchEndpoint.searchOpenKeys("/vola/bucketa1", 2, ""); + omdbInsightEndpoint.getOpenKeyInfo(2, "", "/vola/bucketa1", true, true); assertEquals(200, response.getStatus()); KeyInsightInfoResponse result = (KeyInsightInfoResponse) response.getEntity(); @@ -430,8 +424,8 @@ public void testLimitSearch() throws IOException { public void testSearchOpenKeysWithBadRequest() throws IOException { // Give a negative limit int negativeLimit = -1; - Response response = omdbInsightSearchEndpoint.searchOpenKeys("@323232", negativeLimit, ""); - + Response response = omdbInsightEndpoint + .getOpenKeyInfo(negativeLimit, "", "@323232", true, true); // Then the response should indicate that the request was bad assertEquals(Response.Status.BAD_REQUEST.getStatusCode(), response.getStatus(), "Expected a 400 BAD REQUEST status"); @@ -440,7 +434,7 @@ public void testSearchOpenKeysWithBadRequest() throws IOException { assertTrue(entity.contains("Invalid startPrefix: Path must be at the bucket level or deeper"), "Expected a message indicating the path must be at the bucket level or deeper"); - response = omdbInsightSearchEndpoint.searchOpenKeys("///", 20, ""); + response = omdbInsightEndpoint.getOpenKeyInfo(20, "", "///", true, true); assertEquals(Response.Status.BAD_REQUEST.getStatusCode(), response.getStatus()); entity = (String) response.getEntity(); assertTrue(entity.contains("Invalid startPrefix: Path must be at the bucket level or deeper"), @@ -449,8 +443,8 @@ public void testSearchOpenKeysWithBadRequest() throws IOException { @Test public void testLastKeyInResponse() throws IOException { - Response response = - omdbInsightSearchEndpoint.searchOpenKeys("/volb/bucketb1", 20, ""); + Response response = omdbInsightEndpoint + .getOpenKeyInfo(20, "", "/volb/bucketb1", true, true); assertEquals(200, response.getStatus()); KeyInsightInfoResponse result = (KeyInsightInfoResponse) response.getEntity(); @@ -472,7 +466,7 @@ public void testSearchOpenKeysWithPagination() throws IOException { String prevKey = ""; // Perform the first search request - Response response = omdbInsightSearchEndpoint.searchOpenKeys(startPrefix, limit, prevKey); + Response response = omdbInsightEndpoint.getOpenKeyInfo(limit, prevKey, startPrefix, true, true); assertEquals(200, response.getStatus()); KeyInsightInfoResponse result = (KeyInsightInfoResponse) response.getEntity(); assertEquals(2, result.getNonFSOKeyInfoList().size()); @@ -483,7 +477,7 @@ public void testSearchOpenKeysWithPagination() throws IOException { assertNotNull(prevKey, "Last key should not be null"); // Perform the second search request using the last key - response = omdbInsightSearchEndpoint.searchOpenKeys(startPrefix, limit, prevKey); + response = omdbInsightEndpoint.getOpenKeyInfo(limit, prevKey, startPrefix, true, true); assertEquals(200, response.getStatus()); result = (KeyInsightInfoResponse) response.getEntity(); assertEquals(2, result.getNonFSOKeyInfoList().size()); @@ -494,7 +488,7 @@ public void testSearchOpenKeysWithPagination() throws IOException { assertNotNull(prevKey, "Last key should not be null"); // Perform the third search request using the last key - response = omdbInsightSearchEndpoint.searchOpenKeys(startPrefix, limit, prevKey); + response = omdbInsightEndpoint.getOpenKeyInfo(limit, prevKey, startPrefix, true, true); assertEquals(200, response.getStatus()); result = (KeyInsightInfoResponse) response.getEntity(); assertEquals(1, result.getNonFSOKeyInfoList().size()); @@ -506,13 +500,61 @@ public void testSearchOpenKeysWithPagination() throws IOException { @Test public void testSearchInEmptyBucket() throws IOException { // Search in empty bucket bucketb2 - Response response = omdbInsightSearchEndpoint.searchOpenKeys("/volb/bucketb2", 20, ""); - assertEquals(404, response.getStatus()); + Response response = omdbInsightEndpoint.getOpenKeyInfo(20, "", "/volb/bucketb2", true, true); + assertEquals(Response.Status.NO_CONTENT.getStatusCode(), response.getStatus()); String entity = (String) response.getEntity(); assertTrue(entity.contains("No keys matched the search prefix"), "Expected a message indicating no keys were found"); } + @Test + public void testSearchWithPrevKeyOnly() throws IOException { + String prevKey = "/volb/bucketb1/fileb1"; // Key exists in volb/bucketb1 + Response response = omdbInsightEndpoint.getOpenKeyInfo(4, prevKey, "", true, true); + + assertEquals(200, response.getStatus()); + + KeyInsightInfoResponse result = (KeyInsightInfoResponse) response.getEntity(); + assertEquals(4, result.getNonFSOKeyInfoList().size(), "Expected 4 remaining keys after 'fileb1'"); + assertEquals("/volb/bucketb1/fileb5", result.getLastKey(), "Expected last key to be 'fileb5'"); + } + + @Test + public void testSearchWithEmptyPrevKeyAndStartPrefix() throws IOException { + Response response = omdbInsightEndpoint.getOpenKeyInfo(-1, "", "", true, true); + + assertEquals(200, response.getStatus()); + + KeyInsightInfoResponse result = (KeyInsightInfoResponse) response.getEntity(); + // Assert all the keys are returned + assertEquals(12, result.getNonFSOKeyInfoList().size(), "Expected all keys to be returned"); + } + + @Test + public void testSearchWithStartPrefixOnly() throws IOException { + String startPrefix = "/volb/bucketb1/"; + Response response = omdbInsightEndpoint.getOpenKeyInfo(10, "", startPrefix, true, true); + + assertEquals(200, response.getStatus()); + + KeyInsightInfoResponse result = (KeyInsightInfoResponse) response.getEntity(); + assertEquals(5, result.getNonFSOKeyInfoList().size(), "Expected 5 keys starting with 'fileb1'"); + assertEquals("/volb/bucketb1/fileb5", result.getLastKey(), "Expected last key to be 'fileb5'"); + } + + @Test + public void testSearchWithPrevKeyAndStartPrefix() throws IOException { + String startPrefix = "/volb/bucketb1/"; + String prevKey = "/volb/bucketb1/fileb1"; + Response response = omdbInsightEndpoint.getOpenKeyInfo(10, prevKey, startPrefix, true, true); + + assertEquals(200, response.getStatus()); + + KeyInsightInfoResponse result = (KeyInsightInfoResponse) response.getEntity(); + assertEquals(4, result.getNonFSOKeyInfoList().size(), "Expected 4 keys after 'fileb1'"); + assertEquals("/volb/bucketb1/fileb5", result.getLastKey(), "Expected last key to be 'fileb5'"); + } + /** * Tests the NSSummaryEndpoint for a given volume, bucket, and directory structure. * The test setup mimics the following filesystem structure with specified sizes: @@ -568,7 +610,7 @@ private void populateOMDB() throws Exception { // Create Bucket in volb createBucket("volb", "bucketb1", 1000 + 1000 + 1000 + 1000 + 1000, - getOBSBucketLayout()); + getOBSBucketLayout()); createBucket("volb", "bucketb2", 0, getOBSBucketLayout()); // Empty Bucket // Create Bucket in volc diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/persistence/TestSchemaVersionTableDefinition.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/persistence/TestSchemaVersionTableDefinition.java new file mode 100644 index 00000000000..ab3c4f8e6ec --- /dev/null +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/persistence/TestSchemaVersionTableDefinition.java @@ -0,0 +1,123 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.hadoop.ozone.recon.persistence; + +import static org.hadoop.ozone.recon.schema.SchemaVersionTableDefinition.SCHEMA_VERSION_TABLE_NAME; +import static org.jooq.impl.DSL.name; +import static org.junit.jupiter.api.Assertions.assertEquals; + +import java.sql.Connection; +import java.sql.DatabaseMetaData; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.sql.Timestamp; +import java.sql.Types; +import java.util.ArrayList; +import java.util.List; + +import org.apache.commons.lang3.tuple.ImmutablePair; +import org.apache.commons.lang3.tuple.Pair; +import org.jooq.DSLContext; +import org.jooq.Record1; +import org.jooq.impl.DSL; +import org.junit.jupiter.api.Test; + +/** + * Test class for SchemaVersionTableDefinition. + */ +public class TestSchemaVersionTableDefinition extends AbstractReconSqlDBTest { + + public TestSchemaVersionTableDefinition() { + super(); + } + + @Test + public void testSchemaVersionTableCreation() throws Exception { + Connection connection = getConnection(); + // Verify table definition + DatabaseMetaData metaData = connection.getMetaData(); + ResultSet resultSet = metaData.getColumns(null, null, + SCHEMA_VERSION_TABLE_NAME, null); + + List> expectedPairs = new ArrayList<>(); + + expectedPairs.add(new ImmutablePair<>("version_number", Types.INTEGER)); + expectedPairs.add(new ImmutablePair<>("applied_on", Types.TIMESTAMP)); + + List> actualPairs = new ArrayList<>(); + + while (resultSet.next()) { + actualPairs.add(new ImmutablePair<>(resultSet.getString("COLUMN_NAME"), + resultSet.getInt("DATA_TYPE"))); + } + + assertEquals(2, actualPairs.size(), "Unexpected number of columns"); + assertEquals(expectedPairs, actualPairs, "Column definitions do not match expected values."); + } + + @Test + public void testSchemaVersionCRUDOperations() throws SQLException { + Connection connection = getConnection(); + + DatabaseMetaData metaData = connection.getMetaData(); + ResultSet resultSet = metaData.getTables(null, null, + SCHEMA_VERSION_TABLE_NAME, null); + + while (resultSet.next()) { + assertEquals(SCHEMA_VERSION_TABLE_NAME, + resultSet.getString("TABLE_NAME")); + } + + DSLContext dslContext = DSL.using(connection); + + // Insert a new version record + dslContext.insertInto(DSL.table(SCHEMA_VERSION_TABLE_NAME)) + .columns(DSL.field(name("version_number")), DSL.field(name("applied_on"))) + .values(1, new Timestamp(System.currentTimeMillis())) + .execute(); + + // Read the inserted record + Record1 result = dslContext.select(DSL.field(name("version_number"), Integer.class)) + .from(DSL.table(SCHEMA_VERSION_TABLE_NAME)) + .fetchOne(); + + assertEquals(1, result.value1(), "The version number does not match the expected value."); + + // Update the version record + dslContext.update(DSL.table(SCHEMA_VERSION_TABLE_NAME)) + .set(DSL.field(name("version_number")), 2) + .execute(); + + // Read the updated record + result = dslContext.select(DSL.field(name("version_number"), Integer.class)) + .from(DSL.table(SCHEMA_VERSION_TABLE_NAME)) + .fetchOne(); + + assertEquals(2, result.value1(), "The updated version number does not match the expected value."); + + // Delete the version record + dslContext.deleteFrom(DSL.table(SCHEMA_VERSION_TABLE_NAME)) + .execute(); + + // Verify deletion + int count = dslContext.fetchCount(DSL.table(SCHEMA_VERSION_TABLE_NAME)); + assertEquals(0, count, "The table should be empty after deletion."); + } +} diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/scm/AbstractReconContainerManagerTest.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/scm/AbstractReconContainerManagerTest.java index eb62b7d3ece..939279fc17b 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/scm/AbstractReconContainerManagerTest.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/scm/AbstractReconContainerManagerTest.java @@ -88,7 +88,7 @@ public void setUp(@TempDir File tempDir) throws Exception { conf = new OzoneConfiguration(); conf.set(OZONE_METADATA_DIRS, tempDir.getAbsolutePath()); conf.set(OZONE_SCM_NAMES, "localhost"); - store = DBStoreBuilder.createDBStore(conf, new ReconSCMDBDefinition()); + store = DBStoreBuilder.createDBStore(conf, ReconSCMDBDefinition.get()); scmhaManager = SCMHAManagerStub.getInstance( true, new SCMHADBTransactionBufferStub(store)); sequenceIdGen = new SequenceIdGenerator( diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/scm/TestReconNodeManager.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/scm/TestReconNodeManager.java index 02207f9c620..f17eb78d89c 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/scm/TestReconNodeManager.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/scm/TestReconNodeManager.java @@ -86,7 +86,7 @@ public void setUp() throws Exception { reconStorageConfig = new ReconStorageConfig(conf, reconUtils); versionManager = new HDDSLayoutVersionManager( reconStorageConfig.getLayoutVersion()); - store = DBStoreBuilder.createDBStore(conf, new ReconSCMDBDefinition()); + store = DBStoreBuilder.createDBStore(conf, ReconSCMDBDefinition.get()); reconContext = new ReconContext(conf, reconUtils); } diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/scm/TestReconPipelineManager.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/scm/TestReconPipelineManager.java index d723ee75e85..302772e40fd 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/scm/TestReconPipelineManager.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/scm/TestReconPipelineManager.java @@ -94,7 +94,7 @@ public void setup() throws IOException { temporaryFolder.toAbsolutePath().toString()); conf.set(OZONE_SCM_NAMES, "localhost"); scmStorageConfig = new ReconStorageConfig(conf, new ReconUtils()); - store = DBStoreBuilder.createDBStore(conf, new ReconSCMDBDefinition()); + store = DBStoreBuilder.createDBStore(conf, ReconSCMDBDefinition.get()); scmhaManager = SCMHAManagerStub.getInstance( true, new SCMHADBTransactionBufferStub(store)); scmContext = SCMContext.emptyContext(); diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestOMDBUpdatesHandler.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestOMDBUpdatesHandler.java index 3831f03bfd8..f4f0bfe9acd 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestOMDBUpdatesHandler.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestOMDBUpdatesHandler.java @@ -68,7 +68,7 @@ public class TestOMDBUpdatesHandler { private OMMetadataManager omMetadataManager; private OMMetadataManager reconOmMetadataManager; - private OMDBDefinition omdbDefinition = new OMDBDefinition(); + private final OMDBDefinition omdbDefinition = OMDBDefinition.get(); private Random random = new Random(); private OzoneConfiguration createNewTestPath(String folderName) diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestOmUpdateEventValidator.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestOmUpdateEventValidator.java index 0adb44e87ca..7da98acb38f 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestOmUpdateEventValidator.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestOmUpdateEventValidator.java @@ -53,7 +53,7 @@ public class TestOmUpdateEventValidator { private OmUpdateEventValidator eventValidator; - private OMDBDefinition omdbDefinition; + private final OMDBDefinition omdbDefinition = OMDBDefinition.get(); private OMMetadataManager omMetadataManager; private Logger logger; @TempDir @@ -63,11 +63,10 @@ public class TestOmUpdateEventValidator { public void setUp() throws IOException { omMetadataManager = initializeNewOmMetadataManager( temporaryFolder.toFile()); - omdbDefinition = new OMDBDefinition(); eventValidator = new OmUpdateEventValidator(omdbDefinition); // Create a mock logger logger = mock(Logger.class); - eventValidator.setLogger(logger); + OmUpdateEventValidator.setLogger(logger); } @Test diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/upgrade/TestInitialConstraintUpgradeAction.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/upgrade/TestInitialConstraintUpgradeAction.java new file mode 100644 index 00000000000..b2399f42362 --- /dev/null +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/upgrade/TestInitialConstraintUpgradeAction.java @@ -0,0 +1,192 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.hadoop.ozone.recon.upgrade; + +import org.apache.hadoop.ozone.recon.persistence.AbstractReconSqlDBTest; +import org.apache.hadoop.ozone.recon.scm.ReconStorageContainerManagerFacade; +import org.hadoop.ozone.recon.schema.ContainerSchemaDefinition; +import org.jooq.DSLContext; +import org.jooq.impl.DSL; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; + +import javax.sql.DataSource; +import java.sql.Connection; +import java.sql.DatabaseMetaData; +import java.sql.ResultSet; +import java.sql.SQLException; + +import static org.hadoop.ozone.recon.schema.ContainerSchemaDefinition.UNHEALTHY_CONTAINERS_TABLE_NAME; +import static org.jooq.impl.DSL.field; +import static org.jooq.impl.DSL.name; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +/** + * Test class for InitialConstraintUpgradeAction. + */ +public class TestInitialConstraintUpgradeAction extends AbstractReconSqlDBTest { + + private InitialConstraintUpgradeAction upgradeAction; + private DSLContext dslContext; + private ReconStorageContainerManagerFacade mockScmFacade; + + @BeforeEach + public void setUp() throws SQLException { + // Initialize the DSLContext + dslContext = getDslContext(); + + // Initialize the upgrade action + upgradeAction = new InitialConstraintUpgradeAction(); + + // Mock the SCM facade to provide the DataSource + mockScmFacade = mock(ReconStorageContainerManagerFacade.class); + DataSource dataSource = getInjector().getInstance(DataSource.class); + when(mockScmFacade.getDataSource()).thenReturn(dataSource); + + // Set the DataSource and DSLContext directly + upgradeAction.setDataSource(dataSource); + upgradeAction.setDslContext(dslContext); + + // Check if the table already exists + try (Connection conn = dataSource.getConnection()) { + DatabaseMetaData dbMetaData = conn.getMetaData(); + ResultSet tables = dbMetaData.getTables(null, null, UNHEALTHY_CONTAINERS_TABLE_NAME, null); + if (!tables.next()) { + // Create the initial table if it does not exist + dslContext.createTable(UNHEALTHY_CONTAINERS_TABLE_NAME) + .column("container_id", org.jooq.impl.SQLDataType.BIGINT + .nullable(false)) + .column("container_state", org.jooq.impl.SQLDataType.VARCHAR(16) + .nullable(false)) + .constraint(DSL.constraint("pk_container_id") + .primaryKey("container_id", "container_state")) + .execute(); + } + } + } + + @Test + public void testUpgradeAppliesConstraintModificationForAllStates() throws SQLException { + // Run the upgrade action + upgradeAction.execute(mockScmFacade); + + // Iterate over all valid states and insert records + for (ContainerSchemaDefinition.UnHealthyContainerStates state : + ContainerSchemaDefinition.UnHealthyContainerStates.values()) { + dslContext.insertInto(DSL.table(UNHEALTHY_CONTAINERS_TABLE_NAME)) + .columns( + field(name("container_id")), + field(name("container_state")), + field(name("in_state_since")), + field(name("expected_replica_count")), + field(name("actual_replica_count")), + field(name("replica_delta")), + field(name("reason")) + ) + .values( + System.currentTimeMillis(), // Unique container_id for each record + state.name(), System.currentTimeMillis(), 3, 2, 1, "Replica count mismatch" + ) + .execute(); + } + + // Verify that the number of inserted records matches the number of enum values + int count = dslContext.fetchCount(DSL.table(UNHEALTHY_CONTAINERS_TABLE_NAME)); + assertEquals(ContainerSchemaDefinition.UnHealthyContainerStates.values().length, + count, "Expected one record for each valid state"); + + // Try inserting an invalid state (should fail due to constraint) + assertThrows(org.jooq.exception.DataAccessException.class, () -> + dslContext.insertInto(DSL.table(UNHEALTHY_CONTAINERS_TABLE_NAME)) + .columns( + field(name("container_id")), + field(name("container_state")), + field(name("in_state_since")), + field(name("expected_replica_count")), + field(name("actual_replica_count")), + field(name("replica_delta")), + field(name("reason")) + ) + .values(999L, "INVALID_STATE", System.currentTimeMillis(), 3, 2, 1, + "Invalid state test").execute(), + "Inserting an invalid container_state should fail due to the constraint"); + } + + @Test + public void testInsertionWithNullContainerState() { + assertThrows(org.jooq.exception.DataAccessException.class, () -> { + dslContext.insertInto(DSL.table(UNHEALTHY_CONTAINERS_TABLE_NAME)) + .columns( + field(name("container_id")), + field(name("container_state")), + field(name("in_state_since")), + field(name("expected_replica_count")), + field(name("actual_replica_count")), + field(name("replica_delta")), + field(name("reason")) + ) + .values( + 100L, // container_id + null, // container_state is NULL + System.currentTimeMillis(), 3, 2, 1, "Testing NULL state" + ) + .execute(); + }, "Inserting a NULL container_state should fail due to the NOT NULL constraint"); + } + + @Test + public void testDuplicatePrimaryKeyInsertion() throws SQLException { + // Insert the first record + dslContext.insertInto(DSL.table(UNHEALTHY_CONTAINERS_TABLE_NAME)) + .columns( + field(name("container_id")), + field(name("container_state")), + field(name("in_state_since")), + field(name("expected_replica_count")), + field(name("actual_replica_count")), + field(name("replica_delta")), + field(name("reason")) + ) + .values(200L, "MISSING", System.currentTimeMillis(), 3, 2, 1, "First insertion" + ) + .execute(); + + // Try inserting a duplicate record with the same primary key + assertThrows(org.jooq.exception.DataAccessException.class, () -> { + dslContext.insertInto(DSL.table(UNHEALTHY_CONTAINERS_TABLE_NAME)) + .columns( + field(name("container_id")), + field(name("container_state")), + field(name("in_state_since")), + field(name("expected_replica_count")), + field(name("actual_replica_count")), + field(name("replica_delta")), + field(name("reason")) + ) + .values(200L, "MISSING", System.currentTimeMillis(), 3, 2, 1, "Duplicate insertion" + ) + .execute(); + }, "Inserting a duplicate primary key should fail due to the primary key constraint"); + } + +} diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/upgrade/TestReconLayoutVersionManager.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/upgrade/TestReconLayoutVersionManager.java new file mode 100644 index 00000000000..a22c737691d --- /dev/null +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/upgrade/TestReconLayoutVersionManager.java @@ -0,0 +1,365 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.hadoop.ozone.recon.upgrade; + +import org.apache.hadoop.ozone.recon.ReconContext; +import org.apache.hadoop.ozone.recon.ReconSchemaVersionTableManager; +import org.apache.hadoop.ozone.recon.scm.ReconStorageContainerManagerFacade; +import org.mockito.InOrder; +import org.mockito.MockedStatic; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.AfterEach; + +import javax.sql.DataSource; +import java.sql.Connection; +import java.sql.SQLException; +import java.util.Arrays; +import java.util.List; +import java.util.Optional; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.never; +import static org.mockito.Mockito.any; +import static org.mockito.Mockito.eq; +import static org.mockito.Mockito.doNothing; +import static org.mockito.Mockito.doThrow; +import static org.mockito.Mockito.mockStatic; +import static org.mockito.Mockito.inOrder; +import static org.mockito.Mockito.anyInt; + + +/** + * Tests for ReconLayoutVersionManager. + */ +public class TestReconLayoutVersionManager { + + private ReconSchemaVersionTableManager schemaVersionTableManager; + private ReconLayoutVersionManager layoutVersionManager; + private MockedStatic mockedEnum; + private MockedStatic mockedEnumUpgradeActionType; + private ReconStorageContainerManagerFacade scmFacadeMock; + private DataSource mockDataSource; + private Connection mockConnection; + + @BeforeEach + public void setUp() throws SQLException { + schemaVersionTableManager = mock(ReconSchemaVersionTableManager.class); + when(schemaVersionTableManager.getCurrentSchemaVersion()).thenReturn(0); + + // Mocking ReconLayoutFeature.values() to return custom enum instances + mockedEnum = mockStatic(ReconLayoutFeature.class); + mockedEnumUpgradeActionType = mockStatic(ReconUpgradeAction.UpgradeActionType.class); + + ReconLayoutFeature feature1 = mock(ReconLayoutFeature.class); + when(feature1.getVersion()).thenReturn(1); + ReconUpgradeAction action1 = mock(ReconUpgradeAction.class); + when(feature1.getAction(ReconUpgradeAction.UpgradeActionType.FINALIZE)) + .thenReturn(Optional.of(action1)); + + ReconLayoutFeature feature2 = mock(ReconLayoutFeature.class); + when(feature2.getVersion()).thenReturn(2); + ReconUpgradeAction action2 = mock(ReconUpgradeAction.class); + when(feature2.getAction(ReconUpgradeAction.UpgradeActionType.FINALIZE)) + .thenReturn(Optional.of(action2)); + + // Define the custom features to be returned + mockedEnum.when(ReconLayoutFeature::values).thenReturn(new ReconLayoutFeature[]{feature1, feature2}); + + layoutVersionManager = new ReconLayoutVersionManager(schemaVersionTableManager, mock(ReconContext.class)); + + // Common mocks for all tests + scmFacadeMock = mock(ReconStorageContainerManagerFacade.class); + mockDataSource = mock(DataSource.class); + mockConnection = mock(Connection.class); + + when(scmFacadeMock.getDataSource()).thenReturn(mockDataSource); + when(mockDataSource.getConnection()).thenReturn(mockConnection); + + doNothing().when(mockConnection).setAutoCommit(false); + doNothing().when(mockConnection).commit(); + doNothing().when(mockConnection).rollback(); + } + + @AfterEach + public void tearDown() { + // Close the static mock after each test to deregister it + mockedEnum.close(); + if (mockedEnumUpgradeActionType != null) { + mockedEnumUpgradeActionType.close(); + } + } + + /** + * Tests the initialization of layout version manager to ensure + * that the MLV (Metadata Layout Version) is set correctly to 0, + * and SLV (Software Layout Version) reflects the maximum available version. + */ + @Test + public void testInitializationWithMockedValues() { + assertEquals(0, layoutVersionManager.getCurrentMLV()); + assertEquals(2, layoutVersionManager.getCurrentSLV()); + } + + /** + * Tests the finalization of layout features and ensures that the updateSchemaVersion for + * the schemaVersionTable is triggered for each feature version. + */ + @Test + public void testFinalizeLayoutFeaturesWithMockedValues() throws SQLException { + // Execute the method under test + layoutVersionManager.finalizeLayoutFeatures(scmFacadeMock); + + // Verify that schema versions are updated for our custom features + verify(schemaVersionTableManager, times(1)) + .updateSchemaVersion(1, mockConnection); + verify(schemaVersionTableManager, times(1)) + .updateSchemaVersion(2, mockConnection); + } + + /** + * Tests the retrieval of registered features to ensure that the correct + * layout features are returned according to the mocked values. + */ + @Test + public void testGetRegisteredFeaturesWithMockedValues() { + // Fetch the registered features + List registeredFeatures = layoutVersionManager.getRegisteredFeatures(); + + // Verify that the registered features match the mocked ones + ReconLayoutFeature feature1 = ReconLayoutFeature.values()[0]; + ReconLayoutFeature feature2 = ReconLayoutFeature.values()[1]; + List expectedFeatures = Arrays.asList(feature1, feature2); + assertEquals(expectedFeatures, registeredFeatures); + } + + /** + * Tests the scenario where no layout features are present. Ensures that no schema + * version updates are attempted when there are no features to finalize. + */ + @Test + public void testNoLayoutFeatures() throws SQLException { + // Ensure no layout features are present + mockedEnum.when(ReconLayoutFeature::values).thenReturn(new ReconLayoutFeature[]{}); + + // Execute the method under test + layoutVersionManager.finalizeLayoutFeatures(scmFacadeMock); + + // Verify that no schema version updates were attempted + verify(schemaVersionTableManager, never()).updateSchemaVersion(anyInt(), any(Connection.class)); + } + + /** + * Tests the scenario where an upgrade action fails. Ensures that if an upgrade action + * throws an exception, the schema version is not updated. + */ + @Test + public void testUpgradeActionFailure() throws Exception { + // Reset existing mocks and set up new features for this specific test + mockedEnum.reset(); + + // Mock ReconLayoutFeature instances + ReconLayoutFeature feature1 = mock(ReconLayoutFeature.class); + when(feature1.getVersion()).thenReturn(1); + ReconUpgradeAction action1 = mock(ReconUpgradeAction.class); + + // Simulate an exception being thrown during the upgrade action execution + doThrow(new RuntimeException("Upgrade failed")).when(action1).execute(scmFacadeMock); + when(feature1.getAction(ReconUpgradeAction.UpgradeActionType.FINALIZE)) + .thenReturn(Optional.of(action1)); + + // Mock the static values method to return the custom feature + mockedEnum.when(ReconLayoutFeature::values).thenReturn(new ReconLayoutFeature[]{feature1}); + + // Execute the layout feature finalization + try { + layoutVersionManager.finalizeLayoutFeatures(scmFacadeMock); + } catch (Exception e) { + // Exception is expected, so it's fine to catch and ignore it here + } + + // Verify that metadata layout version MLV was not updated as the transaction was rolled back + assertEquals(0, layoutVersionManager.getCurrentMLV()); + + // Verify that a rollback was triggered + verify(mockConnection, times(1)).rollback(); + } + + /** + * Tests the scenario where the schema version update fails. Ensures that if the schema + * version update fails, the transaction is rolled back and the metadata layout version + * is not updated. + */ + @Test + public void testUpdateSchemaFailure() throws Exception { + // Reset existing mocks and set up new features for this specific test + mockedEnum.reset(); + + // Mock ReconLayoutFeature instances + ReconLayoutFeature feature1 = mock(ReconLayoutFeature.class); + when(feature1.getVersion()).thenReturn(1); + ReconUpgradeAction action1 = mock(ReconUpgradeAction.class); + + // Simulate an exception being thrown during the schema version update + doThrow(new RuntimeException("Schema update failed")).when(schemaVersionTableManager). + updateSchemaVersion(1, mockConnection); + when(feature1.getAction(ReconUpgradeAction.UpgradeActionType.FINALIZE)) + .thenReturn(Optional.of(action1)); + + // Mock the static values method to return the custom feature + mockedEnum.when(ReconLayoutFeature::values).thenReturn(new ReconLayoutFeature[]{feature1}); + + // Execute the layout feature finalization + try { + layoutVersionManager.finalizeLayoutFeatures(scmFacadeMock); + } catch (Exception e) { + // Exception is expected, so it's fine to catch and ignore it here + } + + // Verify that metadata layout version MLV was not updated as the transaction was rolled back + assertEquals(0, layoutVersionManager.getCurrentMLV()); + + // Verify that the upgrade action was not committed and a rollback was triggered + verify(mockConnection, times(1)).rollback(); + } + + /** + * Tests the order of execution for the upgrade actions to ensure that + * they are executed sequentially according to their version numbers. + */ + @Test + public void testUpgradeActionExecutionOrder() throws Exception { + // Reset the existing static mock for this specific test + mockedEnum.reset(); + + // Mock ReconLayoutFeature instances + ReconLayoutFeature feature1 = mock(ReconLayoutFeature.class); + when(feature1.getVersion()).thenReturn(1); + ReconUpgradeAction action1 = mock(ReconUpgradeAction.class); + when(feature1.getAction(ReconUpgradeAction.UpgradeActionType.FINALIZE)) + .thenReturn(Optional.of(action1)); + + ReconLayoutFeature feature2 = mock(ReconLayoutFeature.class); + when(feature2.getVersion()).thenReturn(2); + ReconUpgradeAction action2 = mock(ReconUpgradeAction.class); + when(feature2.getAction(ReconUpgradeAction.UpgradeActionType.FINALIZE)) + .thenReturn(Optional.of(action2)); + + ReconLayoutFeature feature3 = mock(ReconLayoutFeature.class); + when(feature3.getVersion()).thenReturn(3); + ReconUpgradeAction action3 = mock(ReconUpgradeAction.class); + when(feature3.getAction(ReconUpgradeAction.UpgradeActionType.FINALIZE)) + .thenReturn(Optional.of(action3)); + + // Mock the static values method to return custom features in a jumbled order + mockedEnum.when(ReconLayoutFeature::values).thenReturn(new ReconLayoutFeature[]{feature2, feature3, feature1}); + + // Execute the layout feature finalization + layoutVersionManager.finalizeLayoutFeatures(scmFacadeMock); + + // Verify that the actions were executed in the correct order using InOrder + InOrder inOrder = inOrder(action1, action2, action3); + inOrder.verify(action1).execute(scmFacadeMock); // Should be executed first + inOrder.verify(action2).execute(scmFacadeMock); // Should be executed second + inOrder.verify(action3).execute(scmFacadeMock); // Should be executed third + } + + /** + * Tests the scenario where no upgrade actions are needed. Ensures that if the current + * schema version matches the maximum layout version, no upgrade actions are executed. + */ + @Test + public void testNoUpgradeActionsNeeded() throws SQLException { + // Mock the current schema version to the maximum layout version + when(schemaVersionTableManager.getCurrentSchemaVersion()).thenReturn(0); + + mockedEnum.when(ReconLayoutFeature::values).thenReturn(new ReconLayoutFeature[]{}); + + // Execute the method under test + layoutVersionManager.finalizeLayoutFeatures(scmFacadeMock); + + // Verify that no schema version updates were attempted + verify(schemaVersionTableManager, never()).updateSchemaVersion(anyInt(), eq(mockConnection)); + } + + /** + * Tests the scenario where the first two features are finalized, + * and then a third feature is introduced. Ensures that only the + * newly introduced feature is finalized while the previously + * finalized features are skipped. + */ + @Test + public void testFinalizingNewFeatureWithoutReFinalizingPreviousFeatures() throws Exception { + // Step 1: Mock the schema version manager + when(schemaVersionTableManager.getCurrentSchemaVersion()).thenReturn(0); + + // Mock the first two features + ReconLayoutFeature feature1 = mock(ReconLayoutFeature.class); + when(feature1.getVersion()).thenReturn(1); + ReconUpgradeAction action1 = mock(ReconUpgradeAction.class); + when(feature1.getAction(ReconUpgradeAction.UpgradeActionType.FINALIZE)) + .thenReturn(Optional.of(action1)); + + ReconLayoutFeature feature2 = mock(ReconLayoutFeature.class); + when(feature2.getVersion()).thenReturn(2); + ReconUpgradeAction action2 = mock(ReconUpgradeAction.class); + when(feature2.getAction(ReconUpgradeAction.UpgradeActionType.FINALIZE)) + .thenReturn(Optional.of(action2)); + + mockedEnum.when(ReconLayoutFeature::values).thenReturn(new ReconLayoutFeature[]{feature1, feature2}); + + // Finalize the first two features. + layoutVersionManager.finalizeLayoutFeatures(scmFacadeMock); + + // Verify that the schema versions for the first two features were updated + verify(schemaVersionTableManager, times(1)).updateSchemaVersion(1, mockConnection); + verify(schemaVersionTableManager, times(1)).updateSchemaVersion(2, mockConnection); + + // Step 2: Introduce a new feature (Feature 3) + ReconLayoutFeature feature3 = mock(ReconLayoutFeature.class); + when(feature3.getVersion()).thenReturn(3); + ReconUpgradeAction action3 = mock(ReconUpgradeAction.class); + when(feature3.getAction(ReconUpgradeAction.UpgradeActionType.FINALIZE)) + .thenReturn(Optional.of(action3)); + + mockedEnum.when(ReconLayoutFeature::values).thenReturn(new ReconLayoutFeature[]{feature1, feature2, feature3}); + + // Update schema version to simulate that features 1 and 2 have already been finalized. + when(schemaVersionTableManager.getCurrentSchemaVersion()).thenReturn(2); + + // Finalize again, but only feature 3 should be finalized. + layoutVersionManager.finalizeLayoutFeatures(scmFacadeMock); + + // Verify that the schema version for feature 3 was updated + verify(schemaVersionTableManager, times(1)).updateSchemaVersion(3, mockConnection); + + // Verify that action1 and action2 were not executed again. + verify(action1, times(1)).execute(scmFacadeMock); + verify(action2, times(1)).execute(scmFacadeMock); + + // Verify that the upgrade action for feature 3 was executed. + verify(action3, times(1)).execute(scmFacadeMock); + } + +} diff --git a/hadoop-ozone/s3-secret-store/pom.xml b/hadoop-ozone/s3-secret-store/pom.xml index b2da4c9e3c8..210969e766a 100644 --- a/hadoop-ozone/s3-secret-store/pom.xml +++ b/hadoop-ozone/s3-secret-store/pom.xml @@ -19,12 +19,12 @@ org.apache.ozone ozone - 1.5.0-SNAPSHOT + 2.0.0-SNAPSHOT ozone-s3-secret-store Apache Ozone S3 Secret Store jar - 1.5.0-SNAPSHOT + 2.0.0-SNAPSHOT UTF-8 true diff --git a/hadoop-ozone/s3gateway/pom.xml b/hadoop-ozone/s3gateway/pom.xml index c26171d98ac..f012d3f1aab 100644 --- a/hadoop-ozone/s3gateway/pom.xml +++ b/hadoop-ozone/s3gateway/pom.xml @@ -19,13 +19,14 @@ org.apache.ozone ozone - 1.5.0-SNAPSHOT + 2.0.0-SNAPSHOT ozone-s3gateway Apache Ozone S3 Gateway jar - 1.5.0-SNAPSHOT + 2.0.0-SNAPSHOT + false UTF-8 true diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/audit/S3GAction.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/audit/S3GAction.java index 20c2f4c6275..2975d0f39fa 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/audit/S3GAction.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/audit/S3GAction.java @@ -48,7 +48,11 @@ public enum S3GAction implements AuditAction { DELETE_KEY, CREATE_DIRECTORY, GENERATE_SECRET, - REVOKE_SECRET; + REVOKE_SECRET, + GET_OBJECT_TAGGING, + PUT_OBJECT_TAGGING, + DELETE_OBJECT_TAGGING, + PUT_OBJECT_ACL; @Override public String getAction() { diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/Gateway.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/Gateway.java index 86d25d19417..9816b023dc4 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/Gateway.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/Gateway.java @@ -18,7 +18,9 @@ package org.apache.hadoop.ozone.s3; import java.io.IOException; +import java.net.InetSocketAddress; +import com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.hdds.cli.GenericCli; import org.apache.hadoop.hdds.cli.HddsVersionProvider; import org.apache.hadoop.hdds.conf.OzoneConfiguration; @@ -57,7 +59,6 @@ public class Gateway extends GenericCli { private S3GatewayHttpServer httpServer; private S3GatewayMetrics metrics; - private OzoneConfiguration ozoneConfiguration; private final JvmPauseMonitor jvmPauseMonitor = newJvmPauseMonitor("S3G"); @@ -71,14 +72,14 @@ public static void main(String[] args) throws Exception { @Override public Void call() throws Exception { - ozoneConfiguration = createOzoneConfiguration(); - TracingUtil.initTracing("S3gateway", ozoneConfiguration); + OzoneConfiguration ozoneConfiguration = createOzoneConfiguration(); OzoneConfigurationHolder.setConfiguration(ozoneConfiguration); - UserGroupInformation.setConfiguration(ozoneConfiguration); - loginS3GUser(ozoneConfiguration); - setHttpBaseDir(ozoneConfiguration); - httpServer = new S3GatewayHttpServer(ozoneConfiguration, "s3gateway"); - metrics = S3GatewayMetrics.create(ozoneConfiguration); + TracingUtil.initTracing("S3gateway", OzoneConfigurationHolder.configuration()); + UserGroupInformation.setConfiguration(OzoneConfigurationHolder.configuration()); + loginS3GUser(OzoneConfigurationHolder.configuration()); + setHttpBaseDir(OzoneConfigurationHolder.configuration()); + httpServer = new S3GatewayHttpServer(OzoneConfigurationHolder.configuration(), "s3gateway"); + metrics = S3GatewayMetrics.create(OzoneConfigurationHolder.configuration()); start(); ShutdownHookManager.get().addShutdownHook(() -> { @@ -95,10 +96,10 @@ public void start() throws IOException { String[] originalArgs = getCmd().getParseResult().originalArgs() .toArray(new String[0]); HddsServerUtil.startupShutdownMessage(OzoneVersionInfo.OZONE_VERSION_INFO, - Gateway.class, originalArgs, LOG, ozoneConfiguration); + Gateway.class, originalArgs, LOG, OzoneConfigurationHolder.configuration()); LOG.info("Starting Ozone S3 gateway"); - HddsServerUtil.initializeMetrics(ozoneConfiguration, "S3Gateway"); + HddsServerUtil.initializeMetrics(OzoneConfigurationHolder.configuration(), "S3Gateway"); jvmPauseMonitor.start(); httpServer.start(); } @@ -133,4 +134,14 @@ private static void loginS3GUser(OzoneConfiguration conf) } } + @VisibleForTesting + public InetSocketAddress getHttpAddress() { + return this.httpServer.getHttpAddress(); + } + + @VisibleForTesting + public InetSocketAddress getHttpsAddress() { + return this.httpServer.getHttpsAddress(); + } + } diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/OzoneConfigurationHolder.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/OzoneConfigurationHolder.java index 4aeab1f3c4a..9d6f7a82252 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/OzoneConfigurationHolder.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/OzoneConfigurationHolder.java @@ -19,6 +19,7 @@ import javax.enterprise.inject.Produces; +import com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.hdds.conf.OzoneConfiguration; /** @@ -27,17 +28,30 @@ * As the OzoneConfiguration is created by the CLI application here we inject * it via a singleton instance to the Jax-RS/CDI instances. */ -public class OzoneConfigurationHolder { +public final class OzoneConfigurationHolder { private static OzoneConfiguration configuration; + private OzoneConfigurationHolder() { + } + @Produces - public OzoneConfiguration configuration() { + public static OzoneConfiguration configuration() { return configuration; } + @VisibleForTesting public static void setConfiguration( OzoneConfiguration conf) { - OzoneConfigurationHolder.configuration = conf; + // Nullity check is used in case the configuration was already set + // in the MiniOzoneCluster + if (configuration == null) { + OzoneConfigurationHolder.configuration = conf; + } + } + + @VisibleForTesting + public static void resetConfiguration() { + configuration = null; } } diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/S3GatewayConfigKeys.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/S3GatewayConfigKeys.java index a058e413b96..9160025a016 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/S3GatewayConfigKeys.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/S3GatewayConfigKeys.java @@ -56,7 +56,7 @@ public final class S3GatewayConfigKeys { public static final String OZONE_S3G_CLIENT_BUFFER_SIZE_KEY = "ozone.s3g.client.buffer.size"; public static final String OZONE_S3G_CLIENT_BUFFER_SIZE_DEFAULT = - "4KB"; + "4MB"; // S3G kerberos, principal config public static final String OZONE_S3G_KERBEROS_KEYTAB_FILE_KEY = diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/S3GatewayHttpServer.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/S3GatewayHttpServer.java index 97117a30bbd..8b6af74e072 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/S3GatewayHttpServer.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/S3GatewayHttpServer.java @@ -114,7 +114,7 @@ protected String getHttpAddressKey() { @Override protected String getHttpBindHostKey() { - return OZONE_S3G_HTTP_BIND_HOST_KEY; + return S3GatewayConfigKeys.OZONE_S3G_HTTP_BIND_HOST_KEY; } @Override @@ -144,12 +144,12 @@ protected int getHttpsBindPortDefault() { @Override protected String getKeytabFile() { - return OZONE_S3G_KEYTAB_FILE; + return S3GatewayConfigKeys.OZONE_S3G_KEYTAB_FILE; } @Override protected String getSpnegoPrincipal() { - return OZONE_S3G_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL; + return S3GatewayConfigKeys.OZONE_S3G_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL; } @Override diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/BucketEndpoint.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/BucketEndpoint.java index a705420ca35..e68a59e7f76 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/BucketEndpoint.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/BucketEndpoint.java @@ -568,8 +568,7 @@ public Response putAcl(String bucketName, HttpHeaders httpHeaders, if (grantReads == null && grantWrites == null && grantReadACP == null && grantWriteACP == null && grantFull == null) { S3BucketAcl putBucketAclRequest = - new PutBucketAclRequestUnmarshaller().readFrom( - null, null, null, null, null, body); + new PutBucketAclRequestUnmarshaller().readFrom(body); // Handle grants in body ozoneAclListOnBucket.addAll( S3Acl.s3AclToOzoneNativeAclOnBucket(putBucketAclRequest)); diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/CompleteMultipartUploadRequestUnmarshaller.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/CompleteMultipartUploadRequestUnmarshaller.java index 5881baa174b..3ab9a123cc7 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/CompleteMultipartUploadRequestUnmarshaller.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/CompleteMultipartUploadRequestUnmarshaller.java @@ -17,17 +17,9 @@ */ package org.apache.hadoop.ozone.s3.endpoint; -import org.xml.sax.InputSource; -import org.xml.sax.XMLReader; - import javax.ws.rs.WebApplicationException; import javax.ws.rs.core.MediaType; import javax.ws.rs.core.MultivaluedMap; -import javax.ws.rs.ext.MessageBodyReader; -import javax.xml.XMLConstants; -import javax.xml.bind.JAXBContext; -import javax.xml.bind.UnmarshallerHandler; -import javax.xml.parsers.SAXParserFactory; import java.io.IOException; import java.io.InputStream; import java.lang.annotation.Annotation; @@ -35,7 +27,6 @@ import javax.ws.rs.ext.Provider; import static org.apache.hadoop.ozone.s3.exception.S3ErrorTable.INVALID_REQUEST; -import static org.apache.hadoop.ozone.s3.util.S3Consts.S3_XML_NAMESPACE; import static org.apache.hadoop.ozone.s3.util.S3Utils.wrapOS3Exception; /** @@ -43,25 +34,10 @@ */ @Provider public class CompleteMultipartUploadRequestUnmarshaller - implements MessageBodyReader { - - private final JAXBContext context; - private final SAXParserFactory saxParserFactory; + extends MessageUnmarshaller { public CompleteMultipartUploadRequestUnmarshaller() { - try { - context = JAXBContext.newInstance(CompleteMultipartUploadRequest.class); - saxParserFactory = SAXParserFactory.newInstance(); - saxParserFactory.setFeature(XMLConstants.FEATURE_SECURE_PROCESSING, true); - } catch (Exception ex) { - throw new AssertionError("Can not instantiate " + - "CompleteMultipartUploadRequest parser", ex); - } - } - @Override - public boolean isReadable(Class aClass, Type type, - Annotation[] annotations, MediaType mediaType) { - return type.equals(CompleteMultipartUploadRequest.class); + super(CompleteMultipartUploadRequest.class); } @Override @@ -69,24 +45,13 @@ public CompleteMultipartUploadRequest readFrom( Class aClass, Type type, Annotation[] annotations, MediaType mediaType, MultivaluedMap multivaluedMap, - InputStream inputStream) throws IOException, WebApplicationException { + InputStream inputStream) throws WebApplicationException { try { if (inputStream.available() == 0) { throw wrapOS3Exception(INVALID_REQUEST.withMessage("You must specify at least one part")); } - - XMLReader xmlReader = saxParserFactory.newSAXParser().getXMLReader(); - UnmarshallerHandler unmarshallerHandler = - context.createUnmarshaller().getUnmarshallerHandler(); - XmlNamespaceFilter filter = - new XmlNamespaceFilter(S3_XML_NAMESPACE); - filter.setContentHandler(unmarshallerHandler); - filter.setParent(xmlReader); - filter.parse(new InputSource(inputStream)); - return (CompleteMultipartUploadRequest) unmarshallerHandler.getResult(); - } catch (WebApplicationException e) { - throw e; - } catch (Exception e) { + return super.readFrom(aClass, type, annotations, mediaType, multivaluedMap, inputStream); + } catch (IOException e) { throw wrapOS3Exception(INVALID_REQUEST.withMessage(e.getMessage())); } } diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/EndpointBase.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/EndpointBase.java index 4ffc3011935..fbb0614c4f4 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/EndpointBase.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/EndpointBase.java @@ -72,6 +72,7 @@ import static org.apache.hadoop.ozone.OzoneConsts.KB; import static org.apache.hadoop.ozone.s3.exception.S3ErrorTable.INVALID_TAG; import static org.apache.hadoop.ozone.s3.exception.S3ErrorTable.newError; +import static org.apache.hadoop.ozone.s3.util.S3Consts.AWS_TAG_PREFIX; import static org.apache.hadoop.ozone.s3.util.S3Consts.CUSTOM_METADATA_HEADER_PREFIX; import static org.apache.hadoop.ozone.s3.util.S3Consts.TAG_HEADER; import static org.apache.hadoop.ozone.s3.util.S3Consts.TAG_KEY_LENGTH_LIMIT; @@ -363,59 +364,70 @@ protected Map getTaggingFromHeaders(HttpHeaders httpHeaders) List tagPairs = URLEncodedUtils.parse(tagString, UTF_8); - if (tagPairs.isEmpty()) { - return Collections.emptyMap(); - } + return validateAndGetTagging(tagPairs, NameValuePair::getName, NameValuePair::getValue); + } - Map tags = new HashMap<>(); - // Tag restrictions: https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_S3Tag.html - for (NameValuePair tagPair: tagPairs) { - if (StringUtils.isEmpty(tagPair.getName())) { - OS3Exception ex = newError(INVALID_TAG, TAG_HEADER); - ex.setErrorMessage("Some tag keys are empty, please specify the non-empty tag keys"); + protected static Map validateAndGetTagging( + List tagList, + Function getTagKey, + Function getTagValue + ) throws OS3Exception { + final Map tags = new HashMap<>(); + for (KV tagPair : tagList) { + final String tagKey = getTagKey.apply(tagPair); + final String tagValue = getTagValue.apply(tagPair); + // Tag restrictions: https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_S3Tag.html + if (StringUtils.isEmpty(tagKey)) { + OS3Exception ex = S3ErrorTable.newError(INVALID_TAG, TAG_HEADER); + ex.setErrorMessage("Some tag keys are empty, please only specify non-empty tag keys"); throw ex; } - if (tagPair.getValue() == null) { - // For example for query parameter with only value (e.g. "tag1") - OS3Exception ex = newError(INVALID_TAG, tagPair.getName()); - ex.setErrorMessage("Some tag values are not specified, please specify the tag values"); + if (StringUtils.startsWith(tagKey, AWS_TAG_PREFIX)) { + OS3Exception ex = S3ErrorTable.newError(INVALID_TAG, tagKey); + ex.setErrorMessage("Tag key cannot start with \"aws:\" prefix"); throw ex; } - if (tags.containsKey(tagPair.getName())) { - // Tags that are associated with an object must have unique tag keys - // Reject request if the same key is used twice on the same resource - OS3Exception ex = newError(INVALID_TAG, tagPair.getName()); - ex.setErrorMessage("There are tags with duplicate tag keys, tag keys should be unique"); + if (tagValue == null) { + // For example for query parameter with only value (e.g. "tag1") + OS3Exception ex = S3ErrorTable.newError(INVALID_TAG, tagKey); + ex.setErrorMessage("Some tag values are not specified, please specify the tag values"); throw ex; } - if (tagPair.getName().length() > TAG_KEY_LENGTH_LIMIT) { - OS3Exception ex = newError(INVALID_TAG, tagPair.getName()); + if (tagKey.length() > TAG_KEY_LENGTH_LIMIT) { + OS3Exception ex = S3ErrorTable.newError(INVALID_TAG, tagKey); ex.setErrorMessage("The tag key exceeds the maximum length of " + TAG_KEY_LENGTH_LIMIT); throw ex; } - if (tagPair.getValue().length() > TAG_VALUE_LENGTH_LIMIT) { - OS3Exception ex = newError(INVALID_TAG, tagPair.getValue()); + if (tagValue.length() > TAG_VALUE_LENGTH_LIMIT) { + OS3Exception ex = S3ErrorTable.newError(INVALID_TAG, tagValue); ex.setErrorMessage("The tag value exceeds the maximum length of " + TAG_VALUE_LENGTH_LIMIT); throw ex; } - if (!TAG_REGEX_PATTERN.matcher(tagPair.getName()).matches()) { - OS3Exception ex = newError(INVALID_TAG, tagPair.getName()); + if (!TAG_REGEX_PATTERN.matcher(tagKey).matches()) { + OS3Exception ex = S3ErrorTable.newError(INVALID_TAG, tagKey); ex.setErrorMessage("The tag key does not have a valid pattern"); throw ex; } - if (!TAG_REGEX_PATTERN.matcher(tagPair.getValue()).matches()) { - OS3Exception ex = newError(INVALID_TAG, tagPair.getValue()); + if (!TAG_REGEX_PATTERN.matcher(tagValue).matches()) { + OS3Exception ex = S3ErrorTable.newError(INVALID_TAG, tagValue); ex.setErrorMessage("The tag value does not have a valid pattern"); throw ex; } - tags.put(tagPair.getName(), tagPair.getValue()); + final String previous = tags.put(tagKey, tagValue); + if (previous != null) { + // Tags that are associated with an object must have unique tag keys + // Reject request if the same key is used twice on the same resource + OS3Exception ex = S3ErrorTable.newError(INVALID_TAG, tagKey); + ex.setErrorMessage("There are tags with duplicate tag keys, tag keys should be unique"); + throw ex; + } } if (tags.size() > TAG_NUM_LIMIT) { @@ -426,7 +438,7 @@ protected Map getTaggingFromHeaders(HttpHeaders httpHeaders) throw ex; } - return tags; + return Collections.unmodifiableMap(tags); } private AuditMessage.Builder auditMessageBaseBuilder(AuditAction op, diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/MessageUnmarshaller.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/MessageUnmarshaller.java new file mode 100644 index 00000000000..dd50598c7c5 --- /dev/null +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/MessageUnmarshaller.java @@ -0,0 +1,95 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.ozone.s3.endpoint; + +import org.xml.sax.InputSource; +import org.xml.sax.XMLReader; + +import javax.ws.rs.WebApplicationException; +import javax.ws.rs.core.MediaType; +import javax.ws.rs.core.MultivaluedMap; +import javax.ws.rs.ext.MessageBodyReader; +import javax.xml.XMLConstants; +import javax.xml.bind.JAXBContext; +import javax.xml.bind.UnmarshallerHandler; +import javax.xml.parsers.SAXParserFactory; +import java.io.InputStream; +import java.lang.annotation.Annotation; +import java.lang.reflect.Type; + +import static org.apache.hadoop.ozone.s3.exception.S3ErrorTable.INVALID_REQUEST; +import static org.apache.hadoop.ozone.s3.util.S3Consts.S3_XML_NAMESPACE; +import static org.apache.hadoop.ozone.s3.util.S3Utils.wrapOS3Exception; + +/** + * Unmarshaller to create instances of type {@code T} from XML, + * which may or may not have namespace. + * @param the object type to read from XML + */ +public class MessageUnmarshaller implements MessageBodyReader { + + private final JAXBContext context; + private final SAXParserFactory saxParserFactory; + private final Class cls; + + public MessageUnmarshaller(Class cls) { + this.cls = cls; + + try { + context = JAXBContext.newInstance(cls); + saxParserFactory = SAXParserFactory.newInstance(); + saxParserFactory.setFeature(XMLConstants.FEATURE_SECURE_PROCESSING, true); + } catch (Exception ex) { + throw new AssertionError("Can not instantiate XML parser for " + cls.getSimpleName(), ex); + } + } + + @Override + public boolean isReadable(Class aClass, Type type, + Annotation[] annotations, MediaType mediaType) { + return type.equals(cls); + } + + @Override + public T readFrom( + Class aClass, Type type, + Annotation[] annotations, MediaType mediaType, + MultivaluedMap multivaluedMap, + InputStream inputStream + ) throws WebApplicationException { + try { + XMLReader xmlReader = saxParserFactory.newSAXParser().getXMLReader(); + UnmarshallerHandler unmarshallerHandler = + context.createUnmarshaller().getUnmarshallerHandler(); + XmlNamespaceFilter filter = + new XmlNamespaceFilter(S3_XML_NAMESPACE); + filter.setContentHandler(unmarshallerHandler); + filter.setParent(xmlReader); + filter.parse(new InputSource(inputStream)); + return cls.cast(unmarshallerHandler.getResult()); + } catch (Exception e) { + throw wrapOS3Exception(INVALID_REQUEST.withMessage(e.getMessage())); + } + } + + /** Convenience method for programmatic invocation. */ + public T readFrom(InputStream inputStream) throws WebApplicationException { + return readFrom(cls, cls, new Annotation[0], MediaType.APPLICATION_XML_TYPE, null, inputStream); + } + +} diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/MultiDeleteRequestUnmarshaller.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/MultiDeleteRequestUnmarshaller.java index 775ec789f38..3102fb94f08 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/MultiDeleteRequestUnmarshaller.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/MultiDeleteRequestUnmarshaller.java @@ -19,22 +19,7 @@ import javax.ws.rs.Produces; import javax.ws.rs.core.MediaType; -import javax.ws.rs.core.MultivaluedMap; -import javax.ws.rs.ext.MessageBodyReader; import javax.ws.rs.ext.Provider; -import javax.xml.XMLConstants; -import javax.xml.bind.JAXBContext; -import javax.xml.bind.UnmarshallerHandler; -import javax.xml.parsers.SAXParserFactory; -import java.io.InputStream; -import java.lang.annotation.Annotation; -import java.lang.reflect.Type; - -import org.xml.sax.InputSource; -import org.xml.sax.XMLReader; - -import static org.apache.hadoop.ozone.s3.exception.S3ErrorTable.INVALID_REQUEST; -import static org.apache.hadoop.ozone.s3.util.S3Utils.wrapOS3Exception; /** * Custom unmarshaller to read MultiDeleteRequest w/wo namespace. @@ -42,45 +27,10 @@ @Provider @Produces(MediaType.APPLICATION_XML) public class MultiDeleteRequestUnmarshaller - implements MessageBodyReader { - - private final JAXBContext context; - private final SAXParserFactory saxParserFactory; + extends MessageUnmarshaller { public MultiDeleteRequestUnmarshaller() { - try { - context = JAXBContext.newInstance(MultiDeleteRequest.class); - saxParserFactory = SAXParserFactory.newInstance(); - saxParserFactory.setFeature(XMLConstants.FEATURE_SECURE_PROCESSING, true); - } catch (Exception ex) { - throw new AssertionError("Can't instantiate MultiDeleteRequest parser", - ex); - } + super(MultiDeleteRequest.class); } - @Override - public boolean isReadable(Class type, Type genericType, - Annotation[] annotations, MediaType mediaType) { - return type.equals(MultiDeleteRequest.class); - } - - @Override - public MultiDeleteRequest readFrom(Class type, - Type genericType, Annotation[] annotations, MediaType mediaType, - MultivaluedMap httpHeaders, InputStream entityStream) { - try { - XMLReader xmlReader = saxParserFactory.newSAXParser().getXMLReader(); - UnmarshallerHandler unmarshallerHandler = - context.createUnmarshaller().getUnmarshallerHandler(); - - XmlNamespaceFilter filter = - new XmlNamespaceFilter("http://s3.amazonaws.com/doc/2006-03-01/"); - filter.setContentHandler(unmarshallerHandler); - filter.setParent(xmlReader); - filter.parse(new InputSource(entityStream)); - return (MultiDeleteRequest) unmarshallerHandler.getResult(); - } catch (Exception e) { - throw wrapOS3Exception(INVALID_REQUEST.withMessage(e.getMessage())); - } - } } diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpoint.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpoint.java index 40b1e013a46..9311fb7fa4b 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpoint.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpoint.java @@ -68,6 +68,7 @@ import org.apache.hadoop.ozone.om.helpers.OmMultipartUploadCompleteInfo; import org.apache.hadoop.ozone.s3.HeaderPreprocessor; import org.apache.hadoop.ozone.s3.SignedChunksInputStream; +import org.apache.hadoop.ozone.s3.endpoint.S3Tagging.Tag; import org.apache.hadoop.ozone.s3.exception.OS3Exception; import org.apache.hadoop.ozone.s3.exception.S3ErrorTable; import org.apache.hadoop.ozone.s3.util.RFC1123Util; @@ -120,6 +121,7 @@ import static org.apache.hadoop.ozone.s3.exception.S3ErrorTable.ENTITY_TOO_SMALL; import static org.apache.hadoop.ozone.s3.exception.S3ErrorTable.INVALID_ARGUMENT; import static org.apache.hadoop.ozone.s3.exception.S3ErrorTable.INVALID_REQUEST; +import static org.apache.hadoop.ozone.s3.exception.S3ErrorTable.NOT_IMPLEMENTED; import static org.apache.hadoop.ozone.s3.exception.S3ErrorTable.NO_SUCH_UPLOAD; import static org.apache.hadoop.ozone.s3.exception.S3ErrorTable.PRECOND_FAILED; import static org.apache.hadoop.ozone.s3.exception.S3ErrorTable.newError; @@ -212,7 +214,7 @@ public void init() { * See: https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectPUT.html for * more details. */ - @SuppressWarnings("checkstyle:MethodLength") + @SuppressWarnings({"checkstyle:MethodLength", "checkstyle:ParameterNumber"}) @PUT public Response put( @PathParam("bucket") String bucketName, @@ -220,6 +222,8 @@ public Response put( @HeaderParam("Content-Length") long length, @QueryParam("partNumber") int partNumber, @QueryParam("uploadId") @DefaultValue("") String uploadID, + @QueryParam("tagging") String taggingMarker, + @QueryParam("acl") String aclMarker, final InputStream body) throws IOException, OS3Exception { long startNanos = Time.monotonicNowNanos(); S3GAction s3GAction = S3GAction.CREATE_KEY; @@ -229,7 +233,16 @@ public Response put( String copyHeader = null, storageType = null; DigestInputStream digestInputStream = null; try { + if (aclMarker != null) { + s3GAction = S3GAction.PUT_OBJECT_ACL; + throw newError(NOT_IMPLEMENTED, keyPath); + } OzoneVolume volume = getVolume(); + if (taggingMarker != null) { + s3GAction = S3GAction.PUT_OBJECT_TAGGING; + return putObjectTagging(volume, bucketName, keyPath, body); + } + if (uploadID != null && !uploadID.equals("")) { if (headers.getHeaderString(COPY_SOURCE_HEADER) == null) { s3GAction = S3GAction.CREATE_MULTIPART_KEY; @@ -310,7 +323,7 @@ public Response put( perf.appendStreamMode(); Pair keyWriteResult = ObjectEndpointStreaming .put(bucket, keyPath, length, replicationConfig, chunkSize, - customMetadata, digestInputStream, perf); + customMetadata, tags, digestInputStream, perf); eTag = keyWriteResult.getKey(); putLength = keyWriteResult.getValue(); } else { @@ -320,7 +333,7 @@ public Response put( long metadataLatencyNs = getMetrics().updatePutKeyMetadataStats(startNanos); perf.appendMetaLatencyNanos(metadataLatencyNs); - putLength = IOUtils.copyLarge(digestInputStream, output); + putLength = IOUtils.copy(digestInputStream, output, getIOBufferSize(length)); eTag = DatatypeConverter.printHexBinary( digestInputStream.getMessageDigest().digest()) .toLowerCase(); @@ -336,7 +349,9 @@ public Response put( } catch (OMException ex) { auditSuccess = false; auditWriteFailure(s3GAction, ex); - if (copyHeader != null) { + if (taggingMarker != null) { + getMetrics().updatePutObjectTaggingFailureStats(startNanos); + } else if (copyHeader != null) { getMetrics().updateCopyObjectFailureStats(startNanos); } else { getMetrics().updateCreateKeyFailureStats(startNanos); @@ -360,7 +375,11 @@ public Response put( } catch (Exception ex) { auditSuccess = false; auditWriteFailure(s3GAction, ex); - if (copyHeader != null) { + if (aclMarker != null) { + getMetrics().updatePutObjectAclFailureStats(startNanos); + } else if (taggingMarker != null) { + getMetrics().updatePutObjectTaggingFailureStats(startNanos); + } else if (copyHeader != null) { getMetrics().updateCopyObjectFailureStats(startNanos); } else { getMetrics().updateCreateKeyFailureStats(startNanos); @@ -390,7 +409,7 @@ public Response put( * https://docs.aws.amazon.com/AmazonS3/latest/API/mpUploadListParts.html * for more details. */ - @SuppressWarnings("checkstyle:MethodLength") + @SuppressWarnings({"checkstyle:MethodLength", "checkstyle:ParameterNumber"}) @GET public Response get( @PathParam("bucket") String bucketName, @@ -398,12 +417,18 @@ public Response get( @QueryParam("partNumber") int partNumber, @QueryParam("uploadId") String uploadId, @QueryParam("max-parts") @DefaultValue("1000") int maxParts, - @QueryParam("part-number-marker") String partNumberMarker) + @QueryParam("part-number-marker") String partNumberMarker, + @QueryParam("tagging") String taggingMarker) throws IOException, OS3Exception { long startNanos = Time.monotonicNowNanos(); S3GAction s3GAction = S3GAction.GET_KEY; PerformanceStringBuilder perf = new PerformanceStringBuilder(); try { + if (taggingMarker != null) { + s3GAction = S3GAction.GET_OBJECT_TAGGING; + return getObjectTagging(bucketName, keyPath); + } + if (uploadId != null) { // When we have uploadId, this is the request for list Parts. s3GAction = S3GAction.LIST_PARTS; @@ -443,7 +468,7 @@ public Response get( if (rangeHeaderVal == null || rangeHeader.isReadFull()) { StreamingOutput output = dest -> { try (OzoneInputStream key = keyDetails.getContent()) { - long readLength = IOUtils.copyLarge(key, dest); + long readLength = IOUtils.copy(key, dest, getIOBufferSize(keyDetails.getDataSize())); getMetrics().incGetKeySuccessLength(readLength); perf.appendSizeBytes(readLength); } @@ -467,7 +492,7 @@ public Response get( try (OzoneInputStream ozoneInputStream = keyDetails.getContent()) { ozoneInputStream.seek(startOffset); long readLength = IOUtils.copyLarge(ozoneInputStream, dest, 0, - copyLength, new byte[bufferSize]); + copyLength, new byte[getIOBufferSize(copyLength)]); getMetrics().incGetKeySuccessLength(readLength); perf.appendSizeBytes(readLength); } @@ -532,7 +557,9 @@ public Response get( AUDIT.logReadFailure( buildAuditMessageForFailure(s3GAction, getAuditParameters(), ex) ); - if (uploadId != null) { + if (taggingMarker != null) { + getMetrics().updateGetObjectTaggingFailureStats(startNanos); + } else if (uploadId != null) { getMetrics().updateListPartsFailureStats(startNanos); } else { getMetrics().updateGetKeyFailureStats(startNanos); @@ -699,13 +726,19 @@ private Response abortMultipartUpload(OzoneVolume volume, String bucket, public Response delete( @PathParam("bucket") String bucketName, @PathParam("path") String keyPath, - @QueryParam("uploadId") @DefaultValue("") String uploadId) throws + @QueryParam("uploadId") @DefaultValue("") String uploadId, + @QueryParam("tagging") String taggingMarker) throws IOException, OS3Exception { long startNanos = Time.monotonicNowNanos(); S3GAction s3GAction = S3GAction.DELETE_KEY; try { OzoneVolume volume = getVolume(); + if (taggingMarker != null) { + s3GAction = S3GAction.DELETE_OBJECT_TAGGING; + return deleteObjectTagging(volume, bucketName, keyPath); + } + if (uploadId != null && !uploadId.equals("")) { s3GAction = S3GAction.ABORT_MULTIPART_UPLOAD; return abortMultipartUpload(volume, bucketName, keyPath, uploadId); @@ -732,13 +765,18 @@ public Response delete( // keys. Just return 204 } else if (isAccessDenied(ex)) { throw newError(S3ErrorTable.ACCESS_DENIED, keyPath, ex); + } else if (ex.getResult() == ResultCodes.NOT_SUPPORTED_OPERATION) { + // When deleteObjectTagging operation is applied on FSO directory + throw S3ErrorTable.newError(S3ErrorTable.NOT_IMPLEMENTED, keyPath); } else { throw ex; } } catch (Exception ex) { AUDIT.logWriteFailure( buildAuditMessageForFailure(s3GAction, getAuditParameters(), ex)); - if (uploadId != null && !uploadId.equals("")) { + if (taggingMarker != null) { + getMetrics().updateDeleteObjectTaggingFailureStats(startNanos); + } else if (uploadId != null && !uploadId.equals("")) { getMetrics().updateAbortMultipartUploadFailureStats(startNanos); } else { getMetrics().updateDeleteKeyFailureStats(startNanos); @@ -997,7 +1035,7 @@ private Response createMultipartKey(OzoneVolume volume, String bucket, metadataLatencyNs = getMetrics().updateCopyKeyMetadataStats(startNanos); copyLength = IOUtils.copyLarge( - sourceObject, ozoneOutputStream, 0, length); + sourceObject, ozoneOutputStream, 0, length, new byte[getIOBufferSize(length)]); ozoneOutputStream.getMetadata() .putAll(sourceKeyDetails.getMetadata()); outputStream = ozoneOutputStream; @@ -1008,7 +1046,7 @@ private Response createMultipartKey(OzoneVolume volume, String bucket, partNumber, uploadID)) { metadataLatencyNs = getMetrics().updateCopyKeyMetadataStats(startNanos); - copyLength = IOUtils.copyLarge(sourceObject, ozoneOutputStream); + copyLength = IOUtils.copy(sourceObject, ozoneOutputStream, getIOBufferSize(length)); ozoneOutputStream.getMetadata() .putAll(sourceKeyDetails.getMetadata()); outputStream = ozoneOutputStream; @@ -1024,7 +1062,7 @@ private Response createMultipartKey(OzoneVolume volume, String bucket, partNumber, uploadID)) { metadataLatencyNs = getMetrics().updatePutKeyMetadataStats(startNanos); - putLength = IOUtils.copyLarge(digestInputStream, ozoneOutputStream); + putLength = IOUtils.copy(digestInputStream, ozoneOutputStream, getIOBufferSize(length)); byte[] digest = digestInputStream.getMessageDigest().digest(); ozoneOutputStream.getMetadata() .put(ETAG, DatatypeConverter.printHexBinary(digest).toLowerCase()); @@ -1178,7 +1216,7 @@ void copy(OzoneVolume volume, DigestInputStream src, long srcKeyLen, long metadataLatencyNs = getMetrics().updateCopyKeyMetadataStats(startNanos); perf.appendMetaLatencyNanos(metadataLatencyNs); - copyLength = IOUtils.copyLarge(src, dest); + copyLength = IOUtils.copy(src, dest, getIOBufferSize(srcKeyLen)); String eTag = DatatypeConverter.printHexBinary(src.getMessageDigest().digest()).toLowerCase(); dest.getMetadata().put(ETAG, eTag); } @@ -1381,6 +1419,75 @@ public static boolean checkCopySourceModificationTime( (lastModificationTime <= copySourceIfUnmodifiedSince); } + private Response putObjectTagging(OzoneVolume volume, String bucketName, String keyName, InputStream body) + throws IOException, OS3Exception { + long startNanos = Time.monotonicNowNanos(); + S3Tagging tagging = null; + try { + tagging = new PutTaggingUnmarshaller().readFrom(body); + tagging.validate(); + } catch (Exception ex) { + OS3Exception exception = S3ErrorTable.newError(S3ErrorTable.MALFORMED_XML, keyName); + exception.setErrorMessage(exception.getErrorMessage() + ". " + ex.getMessage()); + throw exception; + } + + Map tags = validateAndGetTagging( + tagging.getTagSet().getTags(), // Nullity check was done in previous parsing step + Tag::getKey, + Tag::getValue + ); + + try { + volume.getBucket(bucketName).putObjectTagging(keyName, tags); + } catch (OMException ex) { + if (ex.getResult() == ResultCodes.INVALID_REQUEST) { + throw S3ErrorTable.newError(S3ErrorTable.INVALID_REQUEST, keyName); + } else if (ex.getResult() == ResultCodes.PERMISSION_DENIED) { + throw S3ErrorTable.newError(S3ErrorTable.ACCESS_DENIED, keyName); + } else if (ex.getResult() == ResultCodes.KEY_NOT_FOUND) { + throw S3ErrorTable.newError(S3ErrorTable.NO_SUCH_KEY, keyName); + } else if (ex.getResult() == ResultCodes.NOT_SUPPORTED_OPERATION) { + // When putObjectTagging operation is applied on FSO directory + throw S3ErrorTable.newError(S3ErrorTable.NOT_IMPLEMENTED, keyName); + } + throw ex; + } + getMetrics().updatePutObjectTaggingSuccessStats(startNanos); + return Response.ok().build(); + } + + private Response getObjectTagging(String bucketName, String keyName) throws IOException { + long startNanos = Time.monotonicNowNanos(); + + OzoneVolume volume = getVolume(); + + Map tagMap = volume.getBucket(bucketName).getObjectTagging(keyName); + + getMetrics().updateGetObjectTaggingSuccessStats(startNanos); + return Response.ok(S3Tagging.fromMap(tagMap), MediaType.APPLICATION_XML_TYPE).build(); + } + + private Response deleteObjectTagging(OzoneVolume volume, String bucketName, String keyName) + throws IOException, OS3Exception { + long startNanos = Time.monotonicNowNanos(); + + try { + volume.getBucket(bucketName).deleteObjectTagging(keyName); + } catch (OMException ex) { + // Unlike normal key deletion that ignores the key not found exception + // DeleteObjectTagging should throw the exception if the key does not exist + if (ex.getResult() == ResultCodes.KEY_NOT_FOUND) { + throw S3ErrorTable.newError(S3ErrorTable.NO_SUCH_KEY, keyName); + } + throw ex; + } + + getMetrics().updateDeleteObjectTaggingSuccessStats(startNanos); + return Response.noContent().build(); + } + + @VisibleForTesting public void setOzoneConfiguration(OzoneConfiguration config) { this.ozoneConfiguration = config; @@ -1408,4 +1515,18 @@ private String extractPartsCount(String eTag) { } return null; } + + private int getIOBufferSize(long fileLength) { + if (bufferSize == 0) { + // this is mainly for unit tests as init() will not be called in the unit tests + LOG.warn("buffer size is set to {}", IOUtils.DEFAULT_BUFFER_SIZE); + bufferSize = IOUtils.DEFAULT_BUFFER_SIZE; + } + if (fileLength == 0) { + // for empty file + return bufferSize; + } else { + return fileLength < bufferSize ? (int) fileLength : bufferSize; + } + } } diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpointStreaming.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpointStreaming.java index cb9499aa20d..f5d185fc76b 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpointStreaming.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpointStreaming.java @@ -61,12 +61,13 @@ public static Pair put( OzoneBucket bucket, String keyPath, long length, ReplicationConfig replicationConfig, int chunkSize, Map keyMetadata, + Map tags, DigestInputStream body, PerformanceStringBuilder perf) throws IOException, OS3Exception { try { return putKeyWithStream(bucket, keyPath, - length, chunkSize, replicationConfig, keyMetadata, body, perf); + length, chunkSize, replicationConfig, keyMetadata, tags, body, perf); } catch (IOException ex) { LOG.error("Exception occurred in PutObject", ex); if (ex instanceof OMException) { @@ -97,13 +98,14 @@ public static Pair putKeyWithStream( int bufferSize, ReplicationConfig replicationConfig, Map keyMetadata, + Map tags, DigestInputStream body, PerformanceStringBuilder perf) throws IOException { long startNanos = Time.monotonicNowNanos(); long writeLen; String eTag; try (OzoneDataStreamOutput streamOutput = bucket.createStreamKey(keyPath, - length, replicationConfig, keyMetadata)) { + length, replicationConfig, keyMetadata, tags)) { long metadataLatencyNs = METRICS.updatePutKeyMetadataStats(startNanos); writeLen = writeToStreamOutput(streamOutput, body, bufferSize, length); eTag = DatatypeConverter.printHexBinary(body.getMessageDigest().digest()) diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/PutBucketAclRequestUnmarshaller.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/PutBucketAclRequestUnmarshaller.java index c832915176b..df15a87428e 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/PutBucketAclRequestUnmarshaller.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/PutBucketAclRequestUnmarshaller.java @@ -17,71 +17,16 @@ */ package org.apache.hadoop.ozone.s3.endpoint; -import org.xml.sax.InputSource; -import org.xml.sax.XMLReader; - -import javax.ws.rs.WebApplicationException; -import javax.ws.rs.core.MediaType; -import javax.ws.rs.core.MultivaluedMap; -import javax.ws.rs.ext.MessageBodyReader; import javax.ws.rs.ext.Provider; -import javax.xml.XMLConstants; -import javax.xml.bind.JAXBContext; -import javax.xml.bind.UnmarshallerHandler; -import javax.xml.parsers.SAXParserFactory; -import java.io.IOException; -import java.io.InputStream; -import java.lang.annotation.Annotation; -import java.lang.reflect.Type; - -import static org.apache.hadoop.ozone.s3.exception.S3ErrorTable.INVALID_REQUEST; -import static org.apache.hadoop.ozone.s3.util.S3Consts.S3_XML_NAMESPACE; -import static org.apache.hadoop.ozone.s3.util.S3Utils.wrapOS3Exception; /** * Custom unmarshaller to read PutBucketAclRequest wo namespace. */ @Provider -public class PutBucketAclRequestUnmarshaller - implements MessageBodyReader { - - private final JAXBContext context; - private final SAXParserFactory saxParserFactory; +public class PutBucketAclRequestUnmarshaller extends MessageUnmarshaller { public PutBucketAclRequestUnmarshaller() { - try { - context = JAXBContext.newInstance(S3BucketAcl.class); - saxParserFactory = SAXParserFactory.newInstance(); - saxParserFactory.setFeature(XMLConstants.FEATURE_SECURE_PROCESSING, true); - } catch (Exception ex) { - throw new AssertionError("Can not instantiate " + - "PutBucketAclRequest parser", ex); - } - } - @Override - public boolean isReadable(Class aClass, Type type, - Annotation[] annotations, MediaType mediaType) { - return type.equals(S3BucketAcl.class); + super(S3BucketAcl.class); } - @Override - public S3BucketAcl readFrom( - Class aClass, Type type, - Annotation[] annotations, MediaType mediaType, - MultivaluedMap multivaluedMap, - InputStream inputStream) throws IOException, WebApplicationException { - try { - XMLReader xmlReader = saxParserFactory.newSAXParser().getXMLReader(); - UnmarshallerHandler unmarshallerHandler = - context.createUnmarshaller().getUnmarshallerHandler(); - XmlNamespaceFilter filter = - new XmlNamespaceFilter(S3_XML_NAMESPACE); - filter.setContentHandler(unmarshallerHandler); - filter.setParent(xmlReader); - filter.parse(new InputSource(inputStream)); - return (S3BucketAcl)(unmarshallerHandler.getResult()); - } catch (Exception e) { - throw wrapOS3Exception(INVALID_REQUEST.withMessage(e.getMessage())); - } - } } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestECKeyOutputStreamWithZeroCopy.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/PutTaggingUnmarshaller.java similarity index 72% rename from hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestECKeyOutputStreamWithZeroCopy.java rename to hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/PutTaggingUnmarshaller.java index 47c94e03cb2..f0db9fda9e4 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestECKeyOutputStreamWithZeroCopy.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/PutTaggingUnmarshaller.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with this * work for additional information regarding copyright ownership. The ASF @@ -15,17 +15,15 @@ * the License. */ -package org.apache.hadoop.ozone.client.rpc; - -import org.junit.jupiter.api.BeforeAll; +package org.apache.hadoop.ozone.s3.endpoint; /** - * Tests key output stream with zero-copy enabled. + * Custom unmarshaller to read Tagging request body. */ -public class TestECKeyOutputStreamWithZeroCopy extends - AbstractTestECKeyOutputStream { - @BeforeAll - public static void init() throws Exception { - init(true); +public class PutTaggingUnmarshaller extends MessageUnmarshaller { + + public PutTaggingUnmarshaller() { + super(S3Tagging.class); } + } diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/S3Tagging.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/S3Tagging.java new file mode 100644 index 00000000000..0a0f289f1d8 --- /dev/null +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/S3Tagging.java @@ -0,0 +1,155 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + *

    + * http://www.apache.org/licenses/LICENSE-2.0 + *

    + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package org.apache.hadoop.ozone.s3.endpoint; + +import javax.xml.bind.annotation.XmlAccessType; +import javax.xml.bind.annotation.XmlAccessorType; +import javax.xml.bind.annotation.XmlElement; +import javax.xml.bind.annotation.XmlRootElement; +import java.util.ArrayList; +import java.util.List; +import java.util.Map; +import java.util.stream.Collectors; + +/** + * S3 tagging. + */ +@XmlAccessorType(XmlAccessType.FIELD) +@XmlRootElement(name = "Tagging", + namespace = "http://s3.amazonaws.com/doc/2006-03-01/") +public class S3Tagging { + + @XmlElement(name = "TagSet") + private TagSet tagSet; + + public S3Tagging() { + + } + + public S3Tagging(TagSet tagSet) { + this.tagSet = tagSet; + } + + public TagSet getTagSet() { + return tagSet; + } + + public void setTagSet(TagSet tagSet) { + this.tagSet = tagSet; + } + + /** + * Entity for child element TagSet. + */ + @XmlAccessorType(XmlAccessType.FIELD) + @XmlRootElement(name = "TagSet") + public static class TagSet { + @XmlElement(name = "Tag") + private List tags = new ArrayList<>(); + + public TagSet() { + } + + public TagSet(List tags) { + this.tags = tags; + } + + public List getTags() { + return tags; + } + + public void setTags(List tags) { + this.tags = tags; + } + } + + /** + * Entity for child element Tag. + */ + @XmlAccessorType(XmlAccessType.FIELD) + @XmlRootElement(name = "Tag") + public static class Tag { + @XmlElement(name = "Key") + private String key; + + @XmlElement(name = "Value") + private String value; + + public Tag() { + } + + public Tag(String key, String value) { + this.key = key; + this.value = value; + } + + public String getKey() { + return key; + } + + public void setKey(String key) { + this.key = key; + } + + public String getValue() { + return value; + } + + public void setValue(String value) { + this.value = value; + } + } + + /** + * Creates a S3 tagging instance (xml representation) from a Map retrieved + * from OM. + * @param tagMap Map representing the tags. + * @return {@link S3Tagging} + */ + public static S3Tagging fromMap(Map tagMap) { + List tags = tagMap.entrySet() + .stream() + .map( + tagEntry -> new Tag(tagEntry.getKey(), tagEntry.getValue()) + ) + .collect(Collectors.toList()); + return new S3Tagging(new TagSet(tags)); + } + + /** + * Additional XML validation logic for S3 tagging. + */ + public void validate() { + if (tagSet == null) { + throw new IllegalArgumentException("TagSet needs to be specified"); + } + + if (tagSet.getTags().isEmpty()) { + throw new IllegalArgumentException("Tags need to be specified and cannot be empty"); + } + + for (Tag tag: tagSet.getTags()) { + if (tag.getKey() == null) { + throw new IllegalArgumentException("Some tag keys are not specified"); + } + if (tag.getValue() == null) { + throw new IllegalArgumentException("Tag value for tag " + tag.getKey() + " is not specified"); + } + } + } +} diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/exception/S3ErrorTable.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/exception/S3ErrorTable.java index 42c044086b8..49761f89a3a 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/exception/S3ErrorTable.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/exception/S3ErrorTable.java @@ -145,6 +145,10 @@ private S3ErrorTable() { public static final OS3Exception NO_SUCH_TAG_SET = new OS3Exception( "NoSuchTagSet", "The specified tag does not exist.", HTTP_NOT_FOUND); + public static final OS3Exception MALFORMED_XML = new OS3Exception( + "MalformedXML", "The XML you provided was not well-formed or did not " + + "validate against our published schema", HTTP_BAD_REQUEST); + public static OS3Exception newError(OS3Exception e, String resource) { return newError(e, resource, null); } diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/metrics/S3GatewayMetrics.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/metrics/S3GatewayMetrics.java index dd84d019176..49edc4d543e 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/metrics/S3GatewayMetrics.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/metrics/S3GatewayMetrics.java @@ -19,6 +19,7 @@ import org.apache.hadoop.hdds.annotation.InterfaceAudience; import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.utils.IOUtils; import org.apache.hadoop.metrics2.MetricsCollector; import org.apache.hadoop.metrics2.MetricsRecordBuilder; import org.apache.hadoop.metrics2.MetricsSource; @@ -33,12 +34,15 @@ import org.apache.hadoop.ozone.util.PerformanceMetrics; import org.apache.hadoop.util.Time; +import java.io.Closeable; +import java.util.Map; + /** * This class maintains S3 Gateway related metrics. */ @InterfaceAudience.Private @Metrics(about = "S3 Gateway Metrics", context = OzoneConsts.OZONE) -public final class S3GatewayMetrics implements MetricsSource { +public final class S3GatewayMetrics implements Closeable, MetricsSource { public static final String SOURCE_NAME = S3GatewayMetrics.class.getSimpleName(); @@ -91,6 +95,14 @@ public final class S3GatewayMetrics implements MetricsSource { private @Metric MutableCounterLong copyObjectSuccessLength; private @Metric MutableCounterLong putKeySuccessLength; private @Metric MutableCounterLong getKeySuccessLength; + private @Metric MutableCounterLong getObjectTaggingSuccess; + private @Metric MutableCounterLong getObjectTaggingFailure; + private @Metric MutableCounterLong putObjectTaggingSuccess; + private @Metric MutableCounterLong putObjectTaggingFailure; + private @Metric MutableCounterLong deleteObjectTaggingSuccess; + private @Metric MutableCounterLong deleteObjectTaggingFailure; + private @Metric MutableCounterLong putObjectAclSuccess; + private @Metric MutableCounterLong putObjectAclFailure; // S3 Gateway Latency Metrics // BucketEndpoint @@ -242,6 +254,34 @@ public final class S3GatewayMetrics implements MetricsSource { @Metric(about = "Latency for copy metadata of an key in nanoseconds") private PerformanceMetrics copyKeyMetadataLatencyNs; + @Metric(about = "Latency for successful get object tagging of a key in nanoseconds") + private PerformanceMetrics getObjectTaggingSuccessLatencyNs; + + @Metric(about = "Latency for failing to get object tagging of a key in nanoseconds") + private PerformanceMetrics getObjectTaggingFailureLatencyNs; + + @Metric(about = "Latency for successful put object tagging of a key in nanoseconds") + private PerformanceMetrics putObjectTaggingSuccessLatencyNs; + + @Metric(about = "Latency for failing to put object tagging of a key in nanoseconds") + private PerformanceMetrics putObjectTaggingFailureLatencyNs; + + @Metric(about = "Latency for successful delete object tagging of a key in nanoseconds") + private PerformanceMetrics deleteObjectTaggingSuccessLatencyNs; + + @Metric(about = "Latency for failing to delete object tagging of a key in nanoseconds") + private PerformanceMetrics deleteObjectTaggingFailureLatencyNs; + + @Metric(about = "Latency for successfully setting an S3 object ACL " + + "in nanoseconds") + private PerformanceMetrics putObjectAclSuccessLatencyNs; + + @Metric(about = "Latency for failing to set an S3 object ACL " + + "in nanoseconds") + private PerformanceMetrics putObjectAclFailureLatencyNs; + + private final Map performanceMetrics; + /** * Private constructor. */ @@ -249,10 +289,15 @@ private S3GatewayMetrics(OzoneConfiguration conf) { this.registry = new MetricsRegistry(SOURCE_NAME); int[] intervals = conf.getInts(S3GatewayConfigKeys .OZONE_S3G_METRICS_PERCENTILES_INTERVALS_SECONDS_KEY); - PerformanceMetrics.initializeMetrics( + performanceMetrics = PerformanceMetrics.initializeMetrics( this, registry, "Ops", "Time", intervals); } + @Override + public void close() { + IOUtils.closeQuietly(performanceMetrics.values()); + } + /** * Create and returns S3 Gateway Metrics instance. * @@ -272,6 +317,7 @@ public static synchronized S3GatewayMetrics create(OzoneConfiguration conf) { * Unregister the metrics instance. */ public static void unRegister() { + IOUtils.closeQuietly(instance); instance = null; MetricsSystem ms = DefaultMetricsSystem.instance(); ms.unregisterSource(SOURCE_NAME); @@ -363,6 +409,20 @@ public void getMetrics(MetricsCollector collector, boolean all) { putKeySuccessLength.snapshot(recordBuilder, true); getKeySuccessLength.snapshot(recordBuilder, true); listKeyCount.snapshot(recordBuilder, true); + getObjectTaggingSuccess.snapshot(recordBuilder, true); + getObjectTaggingSuccessLatencyNs.snapshot(recordBuilder, true); + getObjectTaggingFailure.snapshot(recordBuilder, true); + getObjectTaggingFailureLatencyNs.snapshot(recordBuilder, true); + putObjectTaggingSuccess.snapshot(recordBuilder, true); + putObjectTaggingSuccessLatencyNs.snapshot(recordBuilder, true); + putObjectTaggingFailure.snapshot(recordBuilder, true); + putObjectTaggingFailureLatencyNs.snapshot(recordBuilder, true); + deleteObjectTaggingSuccess.snapshot(recordBuilder, true); + deleteObjectTaggingSuccessLatencyNs.snapshot(recordBuilder, true); + deleteObjectTaggingFailure.snapshot(recordBuilder, true); + deleteObjectTaggingFailureLatencyNs.snapshot(recordBuilder, true); + putObjectAclSuccess.snapshot(recordBuilder, true); + putObjectAclFailure.snapshot(recordBuilder, true); } // INC and UPDATE @@ -584,6 +644,46 @@ public void incGetKeySuccessLength(long bytes) { getKeySuccessLength.incr(bytes); } + public void updateGetObjectTaggingSuccessStats(long startNanos) { + this.getObjectTaggingSuccess.incr(); + this.getObjectTaggingSuccessLatencyNs.add(Time.monotonicNowNanos() - startNanos); + } + + public void updateGetObjectTaggingFailureStats(long startNanos) { + this.getObjectTaggingFailure.incr(); + this.getObjectTaggingFailureLatencyNs.add(Time.monotonicNowNanos() - startNanos); + } + + public void updatePutObjectTaggingSuccessStats(long startNanos) { + this.putObjectTaggingSuccess.incr(); + this.putObjectTaggingSuccessLatencyNs.add(Time.monotonicNowNanos() - startNanos); + } + + public void updatePutObjectTaggingFailureStats(long startNanos) { + this.putObjectTaggingFailure.incr(); + this.putObjectTaggingFailureLatencyNs.add(Time.monotonicNowNanos() - startNanos); + } + + public void updateDeleteObjectTaggingSuccessStats(long startNanos) { + this.deleteObjectTaggingSuccess.incr(); + this.deleteObjectTaggingSuccessLatencyNs.add(Time.monotonicNowNanos() - startNanos); + } + + public void updateDeleteObjectTaggingFailureStats(long startNanos) { + this.deleteObjectTaggingFailure.incr(); + this.deleteObjectTaggingFailureLatencyNs.add(Time.monotonicNowNanos() - startNanos); + } + + public void updatePutObjectAclSuccessStats(long startNanos) { + this.putObjectAclSuccess.incr(); + this.putObjectAclSuccessLatencyNs.add(Time.monotonicNowNanos() - startNanos); + } + + public void updatePutObjectAclFailureStats(long startNanos) { + this.putObjectAclFailure.incr(); + this.putObjectAclFailureLatencyNs.add(Time.monotonicNowNanos() - startNanos); + } + // GET public long getListS3BucketsSuccess() { return listS3BucketsSuccess.value(); @@ -725,6 +825,30 @@ public long getListS3BucketsFailure() { return listS3BucketsFailure.value(); } + public long getGetObjectTaggingSuccess() { + return getObjectTaggingSuccess.value(); + } + + public long getGetObjectTaggingFailure() { + return getObjectTaggingFailure.value(); + } + + public long getPutObjectTaggingSuccess() { + return putObjectTaggingSuccess.value(); + } + + public long getPutObjectTaggingFailure() { + return putObjectTaggingFailure.value(); + } + + public long getDeleteObjectTaggingSuccess() { + return deleteObjectTaggingSuccess.value(); + } + + public long getDeleteObjectTaggingFailure() { + return deleteObjectTaggingFailure.value(); + } + private long updateAndGetStats(PerformanceMetrics metric, long startNanos) { long value = Time.monotonicNowNanos() - startNanos; metric.add(value); diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/util/S3Consts.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/util/S3Consts.java index cbdbef0e0a1..7b82d5c2a70 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/util/S3Consts.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/util/S3Consts.java @@ -74,6 +74,7 @@ private S3Consts() { public static final String TAG_HEADER = "x-amz-tagging"; public static final String TAG_DIRECTIVE_HEADER = "x-amz-tagging-directive"; public static final String TAG_COUNT_HEADER = "x-amz-tagging-count"; + public static final String AWS_TAG_PREFIX = "aws:"; public static final int TAG_NUM_LIMIT = 10; public static final int TAG_KEY_LENGTH_LIMIT = 128; diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3secret/S3AdminEndpoint.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3secret/S3AdminEndpoint.java new file mode 100644 index 00000000000..b5c7b242cb5 --- /dev/null +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3secret/S3AdminEndpoint.java @@ -0,0 +1,34 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + *

    + * http://www.apache.org/licenses/LICENSE-2.0 + *

    + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + * + */ + +package org.apache.hadoop.ozone.s3secret; + +import java.lang.annotation.ElementType; +import java.lang.annotation.Retention; +import java.lang.annotation.RetentionPolicy; +import java.lang.annotation.Target; +import javax.ws.rs.NameBinding; + +/** + * Annotation to only allow admin users to access the endpoint. + */ +@NameBinding +@Retention(RetentionPolicy.RUNTIME) +@Target({ElementType.TYPE, ElementType.METHOD}) +public @interface S3AdminEndpoint { +} diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3secret/S3SecretAdminFilter.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3secret/S3SecretAdminFilter.java new file mode 100644 index 00000000000..5ecdfa7c121 --- /dev/null +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3secret/S3SecretAdminFilter.java @@ -0,0 +1,60 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + *

    + * http://www.apache.org/licenses/LICENSE-2.0 + *

    + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + * + */ + +package org.apache.hadoop.ozone.s3secret; + + +import javax.inject.Inject; +import javax.ws.rs.container.ContainerRequestContext; +import javax.ws.rs.container.ContainerRequestFilter; +import javax.ws.rs.core.Response; +import javax.ws.rs.core.Response.Status; +import javax.ws.rs.ext.Provider; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.server.OzoneAdmins; +import org.apache.hadoop.security.UserGroupInformation; + +import java.io.IOException; +import java.security.Principal; + +/** + * Filter that only allows admin to access endpoints annotated with {@link S3AdminEndpoint}. + * Condition is based on the value of the configuration keys for: + *

      + *
    • ozone.administrators
    • + *
    • ozone.administrators.groups
    • + *
    + */ +@S3AdminEndpoint +@Provider +public class S3SecretAdminFilter implements ContainerRequestFilter { + + @Inject + private OzoneConfiguration conf; + + @Override + public void filter(ContainerRequestContext requestContext) throws IOException { + final Principal userPrincipal = requestContext.getSecurityContext().getUserPrincipal(); + if (null != userPrincipal) { + UserGroupInformation user = UserGroupInformation.createRemoteUser(userPrincipal.getName()); + if (!OzoneAdmins.isS3Admin(user, conf)) { + requestContext.abortWith(Response.status(Status.FORBIDDEN).build()); + } + } + } +} diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3secret/S3SecretManagementEndpoint.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3secret/S3SecretManagementEndpoint.java index 4ea17d2a2fd..739dadfb28e 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3secret/S3SecretManagementEndpoint.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3secret/S3SecretManagementEndpoint.java @@ -33,7 +33,6 @@ import java.io.IOException; import static javax.ws.rs.core.Response.Status.BAD_REQUEST; -import static javax.ws.rs.core.Response.Status.METHOD_NOT_ALLOWED; import static javax.ws.rs.core.Response.Status.NOT_FOUND; /** @@ -41,6 +40,7 @@ */ @Path("/secret") @S3SecretEnabled +@S3AdminEndpoint public class S3SecretManagementEndpoint extends S3SecretEndpointBase { private static final Logger LOG = LoggerFactory.getLogger(S3SecretManagementEndpoint.class); @@ -54,8 +54,7 @@ public Response generate() throws IOException { @Path("/{username}") public Response generate(@PathParam("username") String username) throws IOException { - // TODO: It is a temporary solution. To be removed after HDDS-11041 is done. - return Response.status(METHOD_NOT_ALLOWED).build(); + return generateInternal(username); } private Response generateInternal(@Nullable String username) throws IOException { @@ -95,8 +94,7 @@ public Response revoke() throws IOException { @Path("/{username}") public Response revoke(@PathParam("username") String username) throws IOException { - // TODO: It is a temporary solution. To be removed after HDDS-11041 is done. - return Response.status(METHOD_NOT_ALLOWED).build(); + return revokeInternal(username); } private Response revokeInternal(@Nullable String username) diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/ClientProtocolStub.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/ClientProtocolStub.java index e3e3537b1c3..41584c9786d 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/ClientProtocolStub.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/ClientProtocolStub.java @@ -771,4 +771,20 @@ public void recoverKey(OmKeyArgs args, long clientID) throws IOException { } + @Override + public Map getObjectTagging(String volumeName, String bucketName, String keyName) throws IOException { + return getBucket(volumeName, bucketName).getObjectTagging(keyName); + } + + @Override + public void putObjectTagging(String volumeName, String bucketName, String keyName, Map tags) + throws IOException { + getBucket(volumeName, bucketName).putObjectTagging(keyName, tags); + } + + @Override + public void deleteObjectTagging(String volumeName, String bucketName, String keyName) throws IOException { + getBucket(volumeName, bucketName).deleteObjectTagging(keyName); + } + } diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/OzoneBucketStub.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/OzoneBucketStub.java index 06b6a8efb71..21f2414c0a7 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/OzoneBucketStub.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/OzoneBucketStub.java @@ -596,6 +596,37 @@ public boolean setAcl(List acls) throws IOException { return aclList.addAll(acls); } + @Override + public Map getObjectTagging(String keyName) throws IOException { + if (keyDetails.containsKey(keyName)) { + OzoneKeyDetails ozoneKeyDetails = keyDetails.get(keyName); + return ozoneKeyDetails.getTags(); + } else { + throw new OMException(ResultCodes.KEY_NOT_FOUND); + } + } + + @Override + public void putObjectTagging(String keyName, Map tags) throws IOException { + if (keyDetails.containsKey(keyName)) { + OzoneKeyDetails ozoneKeyDetails = keyDetails.get(keyName); + ozoneKeyDetails.getTags().clear(); + ozoneKeyDetails.getTags().putAll(tags); + } else { + throw new OMException(ResultCodes.KEY_NOT_FOUND); + } + } + + @Override + public void deleteObjectTagging(String keyName) throws IOException { + if (keyDetails.containsKey(keyName)) { + OzoneKeyDetails ozoneKeyDetails = keyDetails.get(keyName); + ozoneKeyDetails.getTags().clear(); + } else { + throw new OMException(ResultCodes.KEY_NOT_FOUND); + } + } + /** * Class used to hold part information in a upload part request. */ diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestAbortMultipartUpload.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestAbortMultipartUpload.java index 9c107bdb5b1..1356b50ad35 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestAbortMultipartUpload.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestAbortMultipartUpload.java @@ -69,13 +69,13 @@ public void testAbortMultipartUpload() throws Exception { // Abort multipart upload - response = rest.delete(bucket, key, uploadID); + response = rest.delete(bucket, key, uploadID, null); assertEquals(204, response.getStatus()); // test with unknown upload Id. try { - rest.delete(bucket, key, "random"); + rest.delete(bucket, key, "random", null); } catch (OS3Exception ex) { assertEquals(S3ErrorTable.NO_SUCH_UPLOAD.getCode(), ex.getCode()); assertEquals(S3ErrorTable.NO_SUCH_UPLOAD.getErrorMessage(), diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestListParts.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestListParts.java index 677367e6d81..489aa5d91c3 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestListParts.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestListParts.java @@ -77,17 +77,17 @@ public static void setUp() throws Exception { ByteArrayInputStream body = new ByteArrayInputStream(content.getBytes(UTF_8)); response = REST.put(OzoneConsts.S3_BUCKET, OzoneConsts.KEY, - content.length(), 1, uploadID, body); + content.length(), 1, uploadID, null, null, body); assertNotNull(response.getHeaderString(OzoneConsts.ETAG)); response = REST.put(OzoneConsts.S3_BUCKET, OzoneConsts.KEY, - content.length(), 2, uploadID, body); + content.length(), 2, uploadID, null, null, body); assertNotNull(response.getHeaderString(OzoneConsts.ETAG)); response = REST.put(OzoneConsts.S3_BUCKET, OzoneConsts.KEY, - content.length(), 3, uploadID, body); + content.length(), 3, uploadID, null, null, body); assertNotNull(response.getHeaderString(OzoneConsts.ETAG)); } @@ -95,7 +95,7 @@ public static void setUp() throws Exception { @Test public void testListParts() throws Exception { Response response = REST.get(OzoneConsts.S3_BUCKET, OzoneConsts.KEY, 0, - uploadID, 3, "0"); + uploadID, 3, "0", null); ListPartsResponse listPartsResponse = (ListPartsResponse) response.getEntity(); @@ -108,7 +108,7 @@ public void testListParts() throws Exception { @Test public void testListPartsContinuation() throws Exception { Response response = REST.get(OzoneConsts.S3_BUCKET, OzoneConsts.KEY, 0, - uploadID, 2, "0"); + uploadID, 2, "0", null); ListPartsResponse listPartsResponse = (ListPartsResponse) response.getEntity(); @@ -117,7 +117,7 @@ public void testListPartsContinuation() throws Exception { // Continue response = REST.get(OzoneConsts.S3_BUCKET, OzoneConsts.KEY, 0, uploadID, 2, - Integer.toString(listPartsResponse.getNextPartNumberMarker())); + Integer.toString(listPartsResponse.getNextPartNumberMarker()), null); listPartsResponse = (ListPartsResponse) response.getEntity(); assertFalse(listPartsResponse.getTruncated()); @@ -129,7 +129,7 @@ public void testListPartsContinuation() throws Exception { public void testListPartsWithUnknownUploadID() throws Exception { try { REST.get(OzoneConsts.S3_BUCKET, OzoneConsts.KEY, 0, - uploadID, 2, "0"); + uploadID, 2, "0", null); } catch (OS3Exception ex) { assertEquals(S3ErrorTable.NO_SUCH_UPLOAD.getErrorMessage(), ex.getErrorMessage()); diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestMultipartUploadComplete.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestMultipartUploadComplete.java index b23dbfb9c05..4c5e2b53d90 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestMultipartUploadComplete.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestMultipartUploadComplete.java @@ -109,7 +109,7 @@ private Part uploadPart(String key, String uploadID, int partNumber, String ByteArrayInputStream body = new ByteArrayInputStream(content.getBytes(UTF_8)); Response response = REST.put(OzoneConsts.S3_BUCKET, key, content.length(), - partNumber, uploadID, body); + partNumber, uploadID, null, null, body); assertEquals(200, response.getStatus()); assertNotNull(response.getHeaderString(OzoneConsts.ETAG)); Part part = new Part(); diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestMultipartUploadWithCopy.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestMultipartUploadWithCopy.java index d9595aeff79..6894fc4abea 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestMultipartUploadWithCopy.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestMultipartUploadWithCopy.java @@ -330,7 +330,7 @@ private Part uploadPart(String key, String uploadID, int partNumber, String ByteArrayInputStream body = new ByteArrayInputStream(content.getBytes(UTF_8)); Response response = REST.put(OzoneConsts.S3_BUCKET, key, content.length(), - partNumber, uploadID, body); + partNumber, uploadID, null, null, body); assertEquals(200, response.getStatus()); assertNotNull(response.getHeaderString(OzoneConsts.ETAG)); Part part = new Part(); @@ -375,7 +375,7 @@ private Part uploadPartWithCopy(String key, String uploadID, int partNumber, ByteArrayInputStream body = new ByteArrayInputStream("".getBytes(UTF_8)); Response response = REST.put(OzoneConsts.S3_BUCKET, key, 0, partNumber, - uploadID, body); + uploadID, null, null, body); assertEquals(200, response.getStatus()); CopyPartResult result = (CopyPartResult) response.getEntity(); @@ -402,7 +402,7 @@ public void testUploadWithRangeCopyContentLength() OzoneConsts.S3_BUCKET + "/" + EXISTING_KEY); additionalHeaders.put(COPY_SOURCE_HEADER_RANGE, "bytes=0-3"); setHeaders(additionalHeaders); - REST.put(OzoneConsts.S3_BUCKET, KEY, 0, 1, uploadID, body); + REST.put(OzoneConsts.S3_BUCKET, KEY, 0, 1, uploadID, null, null, body); OzoneMultipartUploadPartListParts parts = CLIENT.getObjectStore().getS3Bucket(OzoneConsts.S3_BUCKET) .listParts(KEY, uploadID, 0, 100); diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectDelete.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectDelete.java index 90695f03ff9..340ed1984ec 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectDelete.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectDelete.java @@ -51,7 +51,7 @@ public void delete() throws IOException, OS3Exception { rest.setOzoneConfiguration(new OzoneConfiguration()); //WHEN - rest.delete("b1", "key1", null); + rest.delete("b1", "key1", null, null); //THEN assertFalse(bucket.listKeys("").hasNext(), diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectGet.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectGet.java index 8cf8da95cf8..048faabcef0 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectGet.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectGet.java @@ -95,11 +95,11 @@ public void init() throws OS3Exception, IOException { ByteArrayInputStream body = new ByteArrayInputStream(CONTENT.getBytes(UTF_8)); rest.put(BUCKET_NAME, KEY_NAME, CONTENT.length(), - 1, null, body); + 1, null, null, null, body); // Create a key with object tags when(headers.getHeaderString(TAG_HEADER)).thenReturn("tag1=value1&tag2=value2"); rest.put(BUCKET_NAME, KEY_WITH_TAG, CONTENT.length(), - 1, null, body); + 1, null, null, null, body); context = mock(ContainerRequestContext.class); when(context.getUriInfo()).thenReturn(mock(UriInfo.class)); @@ -111,7 +111,7 @@ public void init() throws OS3Exception, IOException { @Test public void get() throws IOException, OS3Exception { //WHEN - Response response = rest.get(BUCKET_NAME, KEY_NAME, 0, null, 0, null); + Response response = rest.get(BUCKET_NAME, KEY_NAME, 0, null, 0, null, null); //THEN OzoneInputStream ozoneInputStream = @@ -133,7 +133,7 @@ public void get() throws IOException, OS3Exception { @Test public void getKeyWithTag() throws IOException, OS3Exception { //WHEN - Response response = rest.get(BUCKET_NAME, KEY_WITH_TAG, 0, null, 0, null); + Response response = rest.get(BUCKET_NAME, KEY_WITH_TAG, 0, null, 0, null, null); //THEN OzoneInputStream ozoneInputStream = @@ -155,7 +155,7 @@ public void getKeyWithTag() throws IOException, OS3Exception { public void inheritRequestHeader() throws IOException, OS3Exception { setDefaultHeader(); - Response response = rest.get(BUCKET_NAME, KEY_NAME, 0, null, 0, null); + Response response = rest.get(BUCKET_NAME, KEY_NAME, 0, null, 0, null, null); assertEquals(CONTENT_TYPE1, response.getHeaderString("Content-Type")); @@ -188,7 +188,7 @@ public void overrideResponseHeader() throws IOException, OS3Exception { when(context.getUriInfo().getQueryParameters()) .thenReturn(queryParameter); - Response response = rest.get(BUCKET_NAME, KEY_NAME, 0, null, 0, null); + Response response = rest.get(BUCKET_NAME, KEY_NAME, 0, null, 0, null, null); assertEquals(CONTENT_TYPE2, response.getHeaderString("Content-Type")); @@ -209,13 +209,13 @@ public void getRangeHeader() throws IOException, OS3Exception { Response response; when(headers.getHeaderString(RANGE_HEADER)).thenReturn("bytes=0-0"); - response = rest.get(BUCKET_NAME, KEY_NAME, 0, null, 0, null); + response = rest.get(BUCKET_NAME, KEY_NAME, 0, null, 0, null, null); assertEquals("1", response.getHeaderString("Content-Length")); assertEquals(String.format("bytes 0-0/%s", CONTENT.length()), response.getHeaderString("Content-Range")); when(headers.getHeaderString(RANGE_HEADER)).thenReturn("bytes=0-"); - response = rest.get(BUCKET_NAME, KEY_NAME, 0, null, 0, null); + response = rest.get(BUCKET_NAME, KEY_NAME, 0, null, 0, null, null); assertEquals(String.valueOf(CONTENT.length()), response.getHeaderString("Content-Length")); assertEquals( @@ -228,7 +228,7 @@ public void getRangeHeader() throws IOException, OS3Exception { @Test public void getStatusCode() throws IOException, OS3Exception { Response response; - response = rest.get(BUCKET_NAME, KEY_NAME, 0, null, 0, null); + response = rest.get(BUCKET_NAME, KEY_NAME, 0, null, 0, null, null); assertEquals(response.getStatus(), Response.Status.OK.getStatusCode()); @@ -236,7 +236,7 @@ public void getStatusCode() throws IOException, OS3Exception { // The 206 (Partial Content) status code indicates that the server is // successfully fulfilling a range request for the target resource when(headers.getHeaderString(RANGE_HEADER)).thenReturn("bytes=0-1"); - response = rest.get(BUCKET_NAME, KEY_NAME, 0, null, 0, null); + response = rest.get(BUCKET_NAME, KEY_NAME, 0, null, 0, null, null); assertEquals(response.getStatus(), Response.Status.PARTIAL_CONTENT.getStatusCode()); assertNull(response.getHeaderString(TAG_COUNT_HEADER)); @@ -270,7 +270,7 @@ public void testGetWhenKeyIsDirectoryAndDoesNotEndWithASlash() // WHEN final OS3Exception ex = assertThrows(OS3Exception.class, - () -> rest.get(BUCKET_NAME, keyPath, 0, null, 0, null)); + () -> rest.get(BUCKET_NAME, keyPath, 0, null, 0, null, null)); // THEN assertEquals(NO_SUCH_KEY.getCode(), ex.getCode()); diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectPut.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectPut.java index 8cde144a374..a36d756ddaa 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectPut.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectPut.java @@ -31,6 +31,7 @@ import javax.ws.rs.core.MultivaluedHashMap; import javax.ws.rs.core.MultivaluedMap; import javax.ws.rs.core.Response; + import org.apache.commons.io.IOUtils; import org.apache.commons.lang3.RandomStringUtils; import org.apache.commons.lang3.StringUtils; @@ -79,6 +80,7 @@ import static org.junit.jupiter.api.Assertions.assertNotNull; import static org.junit.jupiter.api.Assertions.assertThrows; import static org.junit.jupiter.api.Assertions.fail; +import static org.mockito.ArgumentMatchers.anyInt; import static org.mockito.Mockito.any; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.mockStatic; @@ -155,7 +157,7 @@ void testPutObject(int length, ReplicationConfig replication) throws IOException bucket.setReplicationConfig(replication); //WHEN - Response response = objectEndpoint.put(BUCKET_NAME, KEY_NAME, length, 1, null, body); + Response response = objectEndpoint.put(BUCKET_NAME, KEY_NAME, length, 1, null, null, null, body); //THEN assertEquals(200, response.getStatus()); @@ -182,7 +184,7 @@ void testPutObjectContentLength() throws IOException, OS3Exception { new ByteArrayInputStream(CONTENT.getBytes(UTF_8)); long dataSize = CONTENT.length(); - objectEndpoint.put(BUCKET_NAME, KEY_NAME, dataSize, 0, null, body); + objectEndpoint.put(BUCKET_NAME, KEY_NAME, dataSize, 0, null, null, null, body); assertEquals(dataSize, getKeyDataSize()); } @@ -199,8 +201,8 @@ void testPutObjectContentLengthForStreaming() when(headers.getHeaderString(DECODED_CONTENT_LENGTH_HEADER)) .thenReturn("15"); - objectEndpoint.put(BUCKET_NAME, KEY_NAME, chunkedContent.length(), 0, null, - new ByteArrayInputStream(chunkedContent.getBytes(UTF_8))); + objectEndpoint.put(BUCKET_NAME, KEY_NAME, chunkedContent.length(), 0, null, null, + null, new ByteArrayInputStream(chunkedContent.getBytes(UTF_8))); assertEquals(15, getKeyDataSize()); } @@ -214,7 +216,7 @@ public void testPutObjectWithTags() throws IOException, OS3Exception { objectEndpoint.setHeaders(headersWithTags); Response response = objectEndpoint.put(BUCKET_NAME, KEY_NAME, CONTENT.length(), - 1, null, body); + 1, null, null, null, body); assertEquals(200, response.getStatus()); @@ -237,7 +239,7 @@ public void testPutObjectWithOnlyTagKey() throws Exception { try { objectEndpoint.put(BUCKET_NAME, KEY_NAME, CONTENT.length(), - 1, null, body); + 1, null, null, null, body); fail("request with invalid query param should fail"); } catch (OS3Exception ex) { assertEquals(INVALID_TAG.getCode(), ex.getCode()); @@ -255,7 +257,7 @@ public void testPutObjectWithDuplicateTagKey() throws Exception { objectEndpoint.setHeaders(headersWithDuplicateTagKey); try { objectEndpoint.put(BUCKET_NAME, KEY_NAME, CONTENT.length(), - 1, null, body); + 1, null, null, null, body); fail("request with duplicate tag key should fail"); } catch (OS3Exception ex) { assertEquals(INVALID_TAG.getCode(), ex.getCode()); @@ -274,7 +276,7 @@ public void testPutObjectWithLongTagKey() throws Exception { objectEndpoint.setHeaders(headersWithLongTagKey); try { objectEndpoint.put(BUCKET_NAME, KEY_NAME, CONTENT.length(), - 1, null, body); + 1, null, null, null, body); fail("request with tag key exceeding the length limit should fail"); } catch (OS3Exception ex) { assertEquals(INVALID_TAG.getCode(), ex.getCode()); @@ -293,7 +295,7 @@ public void testPutObjectWithLongTagValue() throws Exception { when(headersWithLongTagValue.getHeaderString(TAG_HEADER)).thenReturn("tag1=" + longTagValue); try { objectEndpoint.put(BUCKET_NAME, KEY_NAME, CONTENT.length(), - 1, null, body); + 1, null, null, null, body); fail("request with tag value exceeding the length limit should fail"); } catch (OS3Exception ex) { assertEquals(INVALID_TAG.getCode(), ex.getCode()); @@ -318,7 +320,7 @@ public void testPutObjectWithTooManyTags() throws Exception { objectEndpoint.setHeaders(headersWithTooManyTags); try { objectEndpoint.put(BUCKET_NAME, KEY_NAME, CONTENT.length(), - 1, null, body); + 1, null, null, null, body); fail("request with number of tags exceeding limit should fail"); } catch (OS3Exception ex) { assertEquals(INVALID_TAG.getCode(), ex.getCode()); @@ -347,7 +349,7 @@ void testPutObjectWithSignedChunks() throws IOException, OS3Exception { //WHEN Response response = objectEndpoint.put(BUCKET_NAME, KEY_NAME, - chunkedContent.length(), 1, null, + chunkedContent.length(), 1, null, null, null, new ByteArrayInputStream(chunkedContent.getBytes(UTF_8))); //THEN @@ -368,7 +370,7 @@ public void testPutObjectMessageDigestResetDuringException() throws OS3Exception MessageDigest messageDigest = mock(MessageDigest.class); try (MockedStatic mocked = mockStatic(IOUtils.class)) { // For example, EOFException during put-object due to client cancelling the operation before it completes - mocked.when(() -> IOUtils.copyLarge(any(InputStream.class), any(OutputStream.class))) + mocked.when(() -> IOUtils.copy(any(InputStream.class), any(OutputStream.class), anyInt())) .thenThrow(IOException.class); when(objectEndpoint.getMessageDigestInstance()).thenReturn(messageDigest); @@ -376,7 +378,7 @@ public void testPutObjectMessageDigestResetDuringException() throws OS3Exception new ByteArrayInputStream(CONTENT.getBytes(UTF_8)); try { objectEndpoint.put(BUCKET_NAME, KEY_NAME, CONTENT - .length(), 1, null, body); + .length(), 1, null, null, null, body); fail("Should throw IOException"); } catch (IOException ignored) { // Verify that the message digest is reset so that the instance can be reused for the @@ -401,7 +403,7 @@ void testCopyObject() throws IOException, OS3Exception { when(headers.getHeaderString(CUSTOM_METADATA_COPY_DIRECTIVE_HEADER)).thenReturn("COPY"); Response response = objectEndpoint.put(BUCKET_NAME, KEY_NAME, - CONTENT.length(), 1, null, body); + CONTENT.length(), 1, null, null, null, body); OzoneInputStream ozoneInputStream = clientStub.getObjectStore() .getS3Bucket(BUCKET_NAME) @@ -427,7 +429,7 @@ void testCopyObject() throws IOException, OS3Exception { BUCKET_NAME + "/" + urlEncode(KEY_NAME)); response = objectEndpoint.put(DEST_BUCKET_NAME, DEST_KEY, CONTENT.length(), 1, - null, body); + null, null, null, body); // Check destination key and response ozoneInputStream = clientStub.getObjectStore().getS3Bucket(DEST_BUCKET_NAME) @@ -457,7 +459,7 @@ void testCopyObject() throws IOException, OS3Exception { metadataHeaders.remove(CUSTOM_METADATA_HEADER_PREFIX + "custom-key-2"); response = objectEndpoint.put(DEST_BUCKET_NAME, DEST_KEY, CONTENT.length(), 1, - null, body); + null, null, null, body); ozoneInputStream = clientStub.getObjectStore().getS3Bucket(DEST_BUCKET_NAME) .readKey(DEST_KEY); @@ -484,7 +486,7 @@ void testCopyObject() throws IOException, OS3Exception { // wrong copy metadata directive when(headers.getHeaderString(CUSTOM_METADATA_COPY_DIRECTIVE_HEADER)).thenReturn("INVALID"); OS3Exception e = assertThrows(OS3Exception.class, () -> objectEndpoint.put( - DEST_BUCKET_NAME, DEST_KEY, CONTENT.length(), 1, null, body), + DEST_BUCKET_NAME, DEST_KEY, CONTENT.length(), 1, null, null, null, body), "test copy object failed"); assertThat(e.getHttpCode()).isEqualTo(400); assertThat(e.getCode()).isEqualTo("InvalidArgument"); @@ -494,7 +496,7 @@ void testCopyObject() throws IOException, OS3Exception { // source and dest same e = assertThrows(OS3Exception.class, () -> objectEndpoint.put( - BUCKET_NAME, KEY_NAME, CONTENT.length(), 1, null, body), + BUCKET_NAME, KEY_NAME, CONTENT.length(), 1, null, null, null, body), "test copy object failed"); assertThat(e.getErrorMessage()).contains("This copy request is illegal"); @@ -502,28 +504,28 @@ void testCopyObject() throws IOException, OS3Exception { when(headers.getHeaderString(COPY_SOURCE_HEADER)).thenReturn( NO_SUCH_BUCKET + "/" + urlEncode(KEY_NAME)); e = assertThrows(OS3Exception.class, () -> objectEndpoint.put(DEST_BUCKET_NAME, - DEST_KEY, CONTENT.length(), 1, null, body), "test copy object failed"); + DEST_KEY, CONTENT.length(), 1, null, null, null, body), "test copy object failed"); assertThat(e.getCode()).contains("NoSuchBucket"); // dest bucket not found when(headers.getHeaderString(COPY_SOURCE_HEADER)).thenReturn( BUCKET_NAME + "/" + urlEncode(KEY_NAME)); e = assertThrows(OS3Exception.class, () -> objectEndpoint.put(NO_SUCH_BUCKET, - DEST_KEY, CONTENT.length(), 1, null, body), "test copy object failed"); + DEST_KEY, CONTENT.length(), 1, null, null, null, body), "test copy object failed"); assertThat(e.getCode()).contains("NoSuchBucket"); //Both source and dest bucket not found when(headers.getHeaderString(COPY_SOURCE_HEADER)).thenReturn( NO_SUCH_BUCKET + "/" + urlEncode(KEY_NAME)); e = assertThrows(OS3Exception.class, () -> objectEndpoint.put(NO_SUCH_BUCKET, - DEST_KEY, CONTENT.length(), 1, null, body), "test copy object failed"); + DEST_KEY, CONTENT.length(), 1, null, null, null, body), "test copy object failed"); assertThat(e.getCode()).contains("NoSuchBucket"); // source key not found when(headers.getHeaderString(COPY_SOURCE_HEADER)).thenReturn( BUCKET_NAME + "/" + urlEncode(NO_SUCH_BUCKET)); e = assertThrows(OS3Exception.class, () -> objectEndpoint.put( - "nonexistent", KEY_NAME, CONTENT.length(), 1, null, body), + "nonexistent", KEY_NAME, CONTENT.length(), 1, null, null, null, body), "test copy object failed"); assertThat(e.getCode()).contains("NoSuchBucket"); } @@ -535,7 +537,7 @@ public void testCopyObjectMessageDigestResetDuringException() throws IOException new ByteArrayInputStream(CONTENT.getBytes(UTF_8)); Response response = objectEndpoint.put(BUCKET_NAME, KEY_NAME, - CONTENT.length(), 1, null, body); + CONTENT.length(), 1, null, null, null, body); OzoneInputStream ozoneInputStream = clientStub.getObjectStore() .getS3Bucket(BUCKET_NAME) @@ -553,7 +555,7 @@ public void testCopyObjectMessageDigestResetDuringException() throws IOException try (MockedStatic mocked = mockStatic(IOUtils.class)) { // Add the mocked methods only during the copy request when(objectEndpoint.getMessageDigestInstance()).thenReturn(messageDigest); - mocked.when(() -> IOUtils.copyLarge(any(InputStream.class), any(OutputStream.class))) + mocked.when(() -> IOUtils.copy(any(InputStream.class), any(OutputStream.class), anyInt())) .thenThrow(IOException.class); // Add copy header, and then call put @@ -562,7 +564,7 @@ public void testCopyObjectMessageDigestResetDuringException() throws IOException try { objectEndpoint.put(DEST_BUCKET_NAME, DEST_KEY, CONTENT.length(), 1, - null, body); + null, null, null, body); fail("Should throw IOException"); } catch (IOException ignored) { // Verify that the message digest is reset so that the instance can be reused for the @@ -584,7 +586,7 @@ public void testCopyObjectWithTags() throws IOException, OS3Exception { String sourceKeyName = "sourceKey"; Response putResponse = objectEndpoint.put(BUCKET_NAME, sourceKeyName, - CONTENT.length(), 1, null, body); + CONTENT.length(), 1, null, null, null, body); OzoneKeyDetails keyDetails = clientStub.getObjectStore().getS3Bucket(BUCKET_NAME).getKey(sourceKeyName); @@ -601,7 +603,7 @@ public void testCopyObjectWithTags() throws IOException, OS3Exception { BUCKET_NAME + "/" + urlEncode(sourceKeyName)); objectEndpoint.setHeaders(headersForCopy); - Response copyResponse = objectEndpoint.put(DEST_BUCKET_NAME, destKey, CONTENT.length(), 1, null, body); + Response copyResponse = objectEndpoint.put(DEST_BUCKET_NAME, destKey, CONTENT.length(), 1, null, null, null, body); OzoneKeyDetails destKeyDetails = clientStub.getObjectStore() .getS3Bucket(DEST_BUCKET_NAME).getKey(destKey); @@ -620,7 +622,7 @@ public void testCopyObjectWithTags() throws IOException, OS3Exception { // With x-amz-tagging-directive = COPY with a different x-amz-tagging when(headersForCopy.getHeaderString(TAG_HEADER)).thenReturn("tag3=value3"); - copyResponse = objectEndpoint.put(DEST_BUCKET_NAME, destKey, CONTENT.length(), 1, null, body); + copyResponse = objectEndpoint.put(DEST_BUCKET_NAME, destKey, CONTENT.length(), 1, null, null, null, body); assertEquals(200, copyResponse.getStatus()); destKeyDetails = clientStub.getObjectStore() @@ -635,7 +637,7 @@ public void testCopyObjectWithTags() throws IOException, OS3Exception { // Copy object with x-amz-tagging-directive = REPLACE when(headersForCopy.getHeaderString(TAG_DIRECTIVE_HEADER)).thenReturn("REPLACE"); - copyResponse = objectEndpoint.put(DEST_BUCKET_NAME, destKey, CONTENT.length(), 1, null, body); + copyResponse = objectEndpoint.put(DEST_BUCKET_NAME, destKey, CONTENT.length(), 1, null, null, null, body); assertEquals(200, copyResponse.getStatus()); destKeyDetails = clientStub.getObjectStore() @@ -657,7 +659,7 @@ public void testCopyObjectWithInvalidTagCopyDirective() throws Exception { HttpHeaders headersForCopy = Mockito.mock(HttpHeaders.class); when(headersForCopy.getHeaderString(TAG_DIRECTIVE_HEADER)).thenReturn("INVALID"); try { - objectEndpoint.put(DEST_BUCKET_NAME, "somekey", CONTENT.length(), 1, null, body); + objectEndpoint.put(DEST_BUCKET_NAME, "somekey", CONTENT.length(), 1, null, null, null, body); } catch (OS3Exception ex) { assertEquals(INVALID_ARGUMENT.getCode(), ex.getCode()); assertThat(ex.getErrorMessage()).contains("The tagging copy directive specified is invalid"); @@ -672,7 +674,7 @@ void testInvalidStorageType() { when(headers.getHeaderString(STORAGE_CLASS_HEADER)).thenReturn("random"); OS3Exception e = assertThrows(OS3Exception.class, () -> objectEndpoint.put( - BUCKET_NAME, KEY_NAME, CONTENT.length(), 1, null, body)); + BUCKET_NAME, KEY_NAME, CONTENT.length(), 1, null, null, null, body)); assertEquals(S3ErrorTable.INVALID_ARGUMENT.getErrorMessage(), e.getErrorMessage()); assertEquals("random", e.getResource()); @@ -685,7 +687,7 @@ void testEmptyStorageType() throws IOException, OS3Exception { when(headers.getHeaderString(STORAGE_CLASS_HEADER)).thenReturn(""); objectEndpoint.put(BUCKET_NAME, KEY_NAME, CONTENT - .length(), 1, null, body); + .length(), 1, null, null, null, body); OzoneKeyDetails key = clientStub.getObjectStore().getS3Bucket(BUCKET_NAME) .getKey(KEY_NAME); @@ -704,7 +706,7 @@ void testDirectoryCreation() throws IOException, // WHEN try (Response response = objectEndpoint.put(fsoBucket.getName(), path, - 0L, 0, "", null)) { + 0L, 0, "", null, null, null)) { assertEquals(HttpStatus.SC_OK, response.getStatus()); } @@ -719,16 +721,29 @@ void testDirectoryCreationOverFile() throws IOException, OS3Exception { final String path = "key"; final ByteArrayInputStream body = new ByteArrayInputStream(CONTENT.getBytes(UTF_8)); - objectEndpoint.put(FSO_BUCKET_NAME, path, CONTENT.length(), 0, "", body); + objectEndpoint.put(FSO_BUCKET_NAME, path, CONTENT.length(), 0, "", null, null, body); // WHEN final OS3Exception exception = assertThrows(OS3Exception.class, () -> objectEndpoint - .put(FSO_BUCKET_NAME, path + "/", 0, 0, "", null) + .put(FSO_BUCKET_NAME, path + "/", 0, 0, "", null, null, null) .close()); // THEN assertEquals(S3ErrorTable.NO_OVERWRITE.getCode(), exception.getCode()); assertEquals(S3ErrorTable.NO_OVERWRITE.getHttpCode(), exception.getHttpCode()); } + + @Test + public void testPutEmptyObject() throws IOException, OS3Exception { + HttpHeaders headersWithTags = Mockito.mock(HttpHeaders.class); + String emptyString = ""; + ByteArrayInputStream body = new ByteArrayInputStream(emptyString.getBytes(UTF_8)); + objectEndpoint.setHeaders(headersWithTags); + + Response putResponse = objectEndpoint.put(BUCKET_NAME, KEY_NAME, emptyString.length(), 1, null, null, null, body); + assertEquals(200, putResponse.getStatus()); + OzoneKeyDetails keyDetails = clientStub.getObjectStore().getS3Bucket(BUCKET_NAME).getKey(KEY_NAME); + assertEquals(0, keyDetails.getDataSize()); + } } diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectTaggingDelete.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectTaggingDelete.java new file mode 100644 index 00000000000..91f8869dc91 --- /dev/null +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectTaggingDelete.java @@ -0,0 +1,152 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + *

    + * http://www.apache.org/licenses/LICENSE-2.0 + *

    + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package org.apache.hadoop.ozone.s3.endpoint; + +import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.ozone.client.ObjectStore; +import org.apache.hadoop.ozone.client.OzoneBucket; +import org.apache.hadoop.ozone.client.OzoneClient; +import org.apache.hadoop.ozone.client.OzoneClientStub; +import org.apache.hadoop.ozone.client.OzoneVolume; +import org.apache.hadoop.ozone.om.exceptions.OMException; +import org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes; +import org.apache.hadoop.ozone.s3.exception.OS3Exception; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.mockito.Mockito; + +import javax.ws.rs.container.ContainerRequestContext; +import javax.ws.rs.core.HttpHeaders; +import javax.ws.rs.core.MultivaluedHashMap; +import javax.ws.rs.core.Response; +import javax.ws.rs.core.UriInfo; +import java.io.ByteArrayInputStream; +import java.io.IOException; + +import static java.net.HttpURLConnection.HTTP_NOT_FOUND; +import static java.net.HttpURLConnection.HTTP_NOT_IMPLEMENTED; +import static java.net.HttpURLConnection.HTTP_NO_CONTENT; +import static java.nio.charset.StandardCharsets.UTF_8; +import static org.apache.hadoop.ozone.s3.exception.S3ErrorTable.NOT_IMPLEMENTED; +import static org.apache.hadoop.ozone.s3.exception.S3ErrorTable.NO_SUCH_BUCKET; +import static org.apache.hadoop.ozone.s3.exception.S3ErrorTable.NO_SUCH_KEY; +import static org.apache.hadoop.ozone.s3.util.S3Consts.TAG_HEADER; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.junit.jupiter.api.Assertions.fail; +import static org.mockito.Mockito.doThrow; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +/** + * Tests for DeleteObjectTagging. + */ +public class TestObjectTaggingDelete { + + private static final String CONTENT = "0123456789"; + private static final String BUCKET_NAME = "b1"; + private static final String KEY_WITH_TAG = "keyWithTag"; + private HttpHeaders headers; + private ObjectEndpoint rest; + private OzoneClient client; + private ByteArrayInputStream body; + private ContainerRequestContext context; + + @BeforeEach + public void init() throws OS3Exception, IOException { + //GIVEN + OzoneConfiguration config = new OzoneConfiguration(); + client = new OzoneClientStub(); + client.getObjectStore().createS3Bucket(BUCKET_NAME); + + rest = new ObjectEndpoint(); + rest.setClient(client); + rest.setOzoneConfiguration(config); + headers = Mockito.mock(HttpHeaders.class); + rest.setHeaders(headers); + body = new ByteArrayInputStream(CONTENT.getBytes(UTF_8)); + // Create a key with object tags + Mockito.when(headers.getHeaderString(TAG_HEADER)).thenReturn("tag1=value1&tag2=value2"); + rest.put(BUCKET_NAME, KEY_WITH_TAG, CONTENT.length(), + 1, null, null, null, body); + + + context = Mockito.mock(ContainerRequestContext.class); + Mockito.when(context.getUriInfo()).thenReturn(Mockito.mock(UriInfo.class)); + Mockito.when(context.getUriInfo().getQueryParameters()) + .thenReturn(new MultivaluedHashMap<>()); + rest.setContext(context); + } + + @Test + public void testDeleteTagging() throws IOException, OS3Exception { + Response response = rest.delete(BUCKET_NAME, KEY_WITH_TAG, null, ""); + assertEquals(HTTP_NO_CONTENT, response.getStatus()); + + assertTrue(client.getObjectStore().getS3Bucket(BUCKET_NAME) + .getKey(KEY_WITH_TAG).getTags().isEmpty()); + } + + @Test + public void testDeleteTaggingNoKeyFound() throws Exception { + try { + rest.delete(BUCKET_NAME, "nonexistent", null, ""); + fail("Expected an OS3Exception to be thrown"); + } catch (OS3Exception ex) { + assertEquals(HTTP_NOT_FOUND, ex.getHttpCode()); + assertEquals(NO_SUCH_KEY.getCode(), ex.getCode()); + } + } + + @Test + public void testDeleteTaggingNoBucketFound() throws Exception { + try { + rest.delete("nonexistent", "nonexistent", null, ""); + fail("Expected an OS3Exception to be thrown"); + } catch (OS3Exception ex) { + assertEquals(HTTP_NOT_FOUND, ex.getHttpCode()); + assertEquals(NO_SUCH_BUCKET.getCode(), ex.getCode()); + } + } + + @Test + public void testDeleteObjectTaggingNotImplemented() throws Exception { + OzoneClient mockClient = mock(OzoneClient.class); + ObjectStore mockObjectStore = mock(ObjectStore.class); + OzoneVolume mockVolume = mock(OzoneVolume.class); + OzoneBucket mockBucket = mock(OzoneBucket.class); + + when(mockClient.getObjectStore()).thenReturn(mockObjectStore); + when(mockObjectStore.getS3Volume()).thenReturn(mockVolume); + when(mockVolume.getBucket("fsoBucket")).thenReturn(mockBucket); + + ObjectEndpoint endpoint = new ObjectEndpoint(); + endpoint.setClient(mockClient); + + doThrow(new OMException("DeleteObjectTagging is not currently supported for FSO directory", + ResultCodes.NOT_SUPPORTED_OPERATION)).when(mockBucket).deleteObjectTagging("dir/"); + + try { + endpoint.delete("fsoBucket", "dir/", null, ""); + fail("Expected an OS3Exception to be thrown"); + } catch (OS3Exception ex) { + assertEquals(HTTP_NOT_IMPLEMENTED, ex.getHttpCode()); + assertEquals(NOT_IMPLEMENTED.getCode(), ex.getCode()); + } + } +} diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectTaggingGet.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectTaggingGet.java new file mode 100644 index 00000000000..f379ae71f59 --- /dev/null +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectTaggingGet.java @@ -0,0 +1,125 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + *

    + * http://www.apache.org/licenses/LICENSE-2.0 + *

    + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package org.apache.hadoop.ozone.s3.endpoint; + +import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.ozone.client.OzoneClient; +import org.apache.hadoop.ozone.client.OzoneClientStub; +import org.apache.hadoop.ozone.s3.endpoint.S3Tagging.Tag; +import org.apache.hadoop.ozone.s3.exception.OS3Exception; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.mockito.Mockito; + +import javax.ws.rs.container.ContainerRequestContext; +import javax.ws.rs.core.HttpHeaders; +import javax.ws.rs.core.MultivaluedHashMap; +import javax.ws.rs.core.Response; +import javax.ws.rs.core.UriInfo; +import java.io.ByteArrayInputStream; +import java.io.IOException; + +import static java.net.HttpURLConnection.HTTP_NOT_FOUND; +import static java.net.HttpURLConnection.HTTP_OK; +import static java.nio.charset.StandardCharsets.UTF_8; +import static org.apache.hadoop.ozone.s3.exception.S3ErrorTable.NO_SUCH_BUCKET; +import static org.apache.hadoop.ozone.s3.exception.S3ErrorTable.NO_SUCH_KEY; +import static org.apache.hadoop.ozone.s3.util.S3Consts.TAG_HEADER; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.fail; + +/** + * Tests for GetObjectTagging. + */ +public class TestObjectTaggingGet { + + private static final String CONTENT = "0123456789"; + private static final String BUCKET_NAME = "b1"; + private static final String KEY_WITH_TAG = "keyWithTag"; + private ObjectEndpoint rest; + + @BeforeEach + public void init() throws OS3Exception, IOException { + //GIVEN + OzoneConfiguration config = new OzoneConfiguration(); + OzoneClient client = new OzoneClientStub(); + client.getObjectStore().createS3Bucket(BUCKET_NAME); + + rest = new ObjectEndpoint(); + rest.setClient(client); + rest.setOzoneConfiguration(config); + HttpHeaders headers = Mockito.mock(HttpHeaders.class); + rest.setHeaders(headers); + ByteArrayInputStream body = new ByteArrayInputStream(CONTENT.getBytes(UTF_8)); + // Create a key with object tags + Mockito.when(headers.getHeaderString(TAG_HEADER)).thenReturn("tag1=value1&tag2=value2"); + rest.put(BUCKET_NAME, KEY_WITH_TAG, CONTENT.length(), + 1, null, null, null, body); + + + ContainerRequestContext context = Mockito.mock(ContainerRequestContext.class); + Mockito.when(context.getUriInfo()).thenReturn(Mockito.mock(UriInfo.class)); + Mockito.when(context.getUriInfo().getQueryParameters()) + .thenReturn(new MultivaluedHashMap<>()); + rest.setContext(context); + } + + @Test + public void testGetTagging() throws IOException, OS3Exception { + //WHEN + Response response = rest.get(BUCKET_NAME, KEY_WITH_TAG, 0, null, 0, null, ""); + + assertEquals(HTTP_OK, response.getStatus()); + S3Tagging s3Tagging = (S3Tagging) response.getEntity(); + assertNotNull(s3Tagging); + assertNotNull(s3Tagging.getTagSet()); + assertEquals(2, s3Tagging.getTagSet().getTags().size()); + for (Tag tag: s3Tagging.getTagSet().getTags()) { + if (tag.getKey().equals("tag1")) { + assertEquals("value1", tag.getValue()); + } else if (tag.getKey().equals("tag2")) { + assertEquals("value2", tag.getValue()); + } else { + fail("Unknown tag found"); + } + } + } + + @Test + public void testGetTaggingNoKeyFound() throws Exception { + try { + rest.get(BUCKET_NAME, "nonexistent", 0, null, 0, null, ""); + fail("Expected an OS3Exception to be thrown"); + } catch (OS3Exception ex) { + assertEquals(HTTP_NOT_FOUND, ex.getHttpCode()); + assertEquals(NO_SUCH_KEY.getCode(), ex.getCode()); + } + } + + @Test + public void testGetTaggingNoBucketFound() throws Exception { + try { + rest.get("nonexistent", "nonexistent", 0, null, 0, null, ""); + fail("Expected an OS3Exception to be thrown"); + } catch (OS3Exception ex) { + assertEquals(HTTP_NOT_FOUND, ex.getHttpCode()); + assertEquals(NO_SUCH_BUCKET.getCode(), ex.getCode()); + } + } +} diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectTaggingPut.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectTaggingPut.java new file mode 100644 index 00000000000..478ab8ba79f --- /dev/null +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectTaggingPut.java @@ -0,0 +1,264 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + *

    + * http://www.apache.org/licenses/LICENSE-2.0 + *

    + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package org.apache.hadoop.ozone.s3.endpoint; + +import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.ozone.client.ObjectStore; +import org.apache.hadoop.ozone.client.OzoneBucket; +import org.apache.hadoop.ozone.client.OzoneClient; +import org.apache.hadoop.ozone.client.OzoneClientStub; +import org.apache.hadoop.ozone.client.OzoneKeyDetails; +import org.apache.hadoop.ozone.client.OzoneVolume; +import org.apache.hadoop.ozone.om.exceptions.OMException; +import org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes; +import org.apache.hadoop.ozone.s3.exception.OS3Exception; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; + +import javax.ws.rs.core.HttpHeaders; +import java.io.ByteArrayInputStream; +import java.io.IOException; +import java.io.InputStream; +import java.util.HashMap; +import java.util.Map; +import java.util.function.Supplier; + +import static java.net.HttpURLConnection.HTTP_BAD_REQUEST; +import static java.net.HttpURLConnection.HTTP_NOT_FOUND; +import static java.net.HttpURLConnection.HTTP_NOT_IMPLEMENTED; +import static java.net.HttpURLConnection.HTTP_OK; +import static java.nio.charset.StandardCharsets.UTF_8; +import static org.apache.hadoop.ozone.s3.exception.S3ErrorTable.MALFORMED_XML; +import static org.apache.hadoop.ozone.s3.exception.S3ErrorTable.NOT_IMPLEMENTED; +import static org.apache.hadoop.ozone.s3.exception.S3ErrorTable.NO_SUCH_BUCKET; +import static org.apache.hadoop.ozone.s3.exception.S3ErrorTable.NO_SUCH_KEY; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.fail; +import static org.mockito.Mockito.doThrow; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +/** + * Tests for PutObjectTagging. + */ +public class TestObjectTaggingPut { + + private OzoneClient clientStub; + private ObjectEndpoint objectEndpoint; + + private static final String BUCKET_NAME = "b1"; + private static final String KEY_NAME = "key=value/1"; + + @BeforeEach + void setup() throws IOException, OS3Exception { + OzoneConfiguration config = new OzoneConfiguration(); + + //Create client stub and object store stub. + clientStub = new OzoneClientStub(); + + // Create bucket + clientStub.getObjectStore().createS3Bucket(BUCKET_NAME); + + // Create PutObject and setClient to OzoneClientStub + objectEndpoint = new ObjectEndpoint(); + objectEndpoint.setClient(clientStub); + objectEndpoint.setOzoneConfiguration(config); + + HttpHeaders headers = mock(HttpHeaders.class); + ByteArrayInputStream body = + new ByteArrayInputStream("".getBytes(UTF_8)); + objectEndpoint.setHeaders(headers); + + objectEndpoint.put(BUCKET_NAME, KEY_NAME, 0, 1, null, null, null, body); + } + + @Test + public void testPutObjectTaggingWithEmptyBody() throws Exception { + try { + objectEndpoint.put(BUCKET_NAME, KEY_NAME, 0, 1, null, "", null, + null); + fail(); + } catch (OS3Exception ex) { + assertEquals(HTTP_BAD_REQUEST, ex.getHttpCode()); + assertEquals(MALFORMED_XML.getCode(), ex.getCode()); + } + } + + @Test + public void testPutValidObjectTagging() throws Exception { + assertEquals(HTTP_OK, objectEndpoint.put(BUCKET_NAME, KEY_NAME, 0, 1, null, + "", null, twoTags()).getStatus()); + OzoneKeyDetails keyDetails = + clientStub.getObjectStore().getS3Bucket(BUCKET_NAME).getKey(KEY_NAME); + assertEquals(2, keyDetails.getTags().size()); + assertEquals("val1", keyDetails.getTags().get("tag1")); + assertEquals("val2", keyDetails.getTags().get("tag2")); + } + + @Test + public void testPutInvalidObjectTagging() throws Exception { + testInvalidObjectTagging(this::emptyBody, HTTP_BAD_REQUEST, MALFORMED_XML.getCode()); + testInvalidObjectTagging(this::invalidXmlStructure, HTTP_BAD_REQUEST, MALFORMED_XML.getCode()); + testInvalidObjectTagging(this::noTagSet, HTTP_BAD_REQUEST, MALFORMED_XML.getCode()); + testInvalidObjectTagging(this::emptyTags, HTTP_BAD_REQUEST, MALFORMED_XML.getCode()); + testInvalidObjectTagging(this::tagKeyNotSpecified, HTTP_BAD_REQUEST, MALFORMED_XML.getCode()); + testInvalidObjectTagging(this::tagValueNotSpecified, HTTP_BAD_REQUEST, MALFORMED_XML.getCode()); + } + + private void testInvalidObjectTagging(Supplier inputStream, + int expectedHttpCode, String expectedErrorCode) throws Exception { + try { + objectEndpoint.put(BUCKET_NAME, KEY_NAME, 0, 1, null, "", null, + inputStream.get()); + fail("Expected an OS3Exception to be thrown"); + } catch (OS3Exception ex) { + assertEquals(expectedHttpCode, ex.getHttpCode()); + assertEquals(expectedErrorCode, ex.getCode()); + } + } + + @Test + public void testPutObjectTaggingNoKeyFound() throws Exception { + try { + objectEndpoint.put(BUCKET_NAME, "nonexistent", 0, 1, + null, "", null, twoTags()); + fail("Expected an OS3Exception to be thrown"); + } catch (OS3Exception ex) { + assertEquals(HTTP_NOT_FOUND, ex.getHttpCode()); + assertEquals(NO_SUCH_KEY.getCode(), ex.getCode()); + } + } + + @Test + public void testPutObjectTaggingNoBucketFound() throws Exception { + try { + objectEndpoint.put("nonexistent", "nonexistent", 0, 1, + null, "", null, twoTags()); + fail("Expected an OS3Exception to be thrown"); + } catch (OS3Exception ex) { + assertEquals(HTTP_NOT_FOUND, ex.getHttpCode()); + assertEquals(NO_SUCH_BUCKET.getCode(), ex.getCode()); + } + } + + @Test + public void testPutObjectTaggingNotImplemented() throws Exception { + OzoneClient mockClient = mock(OzoneClient.class); + ObjectStore mockObjectStore = mock(ObjectStore.class); + OzoneVolume mockVolume = mock(OzoneVolume.class); + OzoneBucket mockBucket = mock(OzoneBucket.class); + + when(mockClient.getObjectStore()).thenReturn(mockObjectStore); + when(mockObjectStore.getS3Volume()).thenReturn(mockVolume); + when(mockVolume.getBucket("fsoBucket")).thenReturn(mockBucket); + + ObjectEndpoint endpoint = new ObjectEndpoint(); + Map twoTagsMap = new HashMap<>(); + twoTagsMap.put("tag1", "val1"); + twoTagsMap.put("tag2", "val2"); + endpoint.setClient(mockClient); + + doThrow(new OMException("PutObjectTagging is not currently supported for FSO directory", + ResultCodes.NOT_SUPPORTED_OPERATION)).when(mockBucket).putObjectTagging("dir/", twoTagsMap); + + try { + endpoint.put("fsoBucket", "dir/", 0, 1, null, "", + null, twoTags()); + fail("Expected an OS3Exception to be thrown"); + } catch (OS3Exception ex) { + assertEquals(HTTP_NOT_IMPLEMENTED, ex.getHttpCode()); + assertEquals(NOT_IMPLEMENTED.getCode(), ex.getCode()); + } + } + + private InputStream emptyBody() { + return null; + } + + private InputStream invalidXmlStructure() { + String xml = + "" + + " " + + " "; + + return new ByteArrayInputStream(xml.getBytes(UTF_8)); + } + + private InputStream twoTags() { + String xml = + "" + + " " + + " " + + " tag1" + + " val1" + + " " + + " " + + " tag2" + + " val2" + + " " + + " " + + ""; + + return new ByteArrayInputStream(xml.getBytes(UTF_8)); + } + + private InputStream noTagSet() { + String xml = + "" + + ""; + return new ByteArrayInputStream(xml.getBytes(UTF_8)); + } + + private InputStream emptyTags() { + String xml = + "" + + " " + + " " + + ""; + + return new ByteArrayInputStream(xml.getBytes(UTF_8)); + } + + public InputStream tagKeyNotSpecified() { + String xml = + "" + + " " + + " " + + " val1" + + " " + + " " + + ""; + + return new ByteArrayInputStream(xml.getBytes(UTF_8)); + } + + public InputStream tagValueNotSpecified() { + String xml = + "" + + " " + + " " + + " tag1" + + " " + + " " + + ""; + + return new ByteArrayInputStream(xml.getBytes(UTF_8)); + } +} + diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestPartUpload.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestPartUpload.java index aecc56fe172..dbafa8c11cb 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestPartUpload.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestPartUpload.java @@ -51,6 +51,7 @@ import static org.junit.jupiter.api.Assertions.assertThrows; import static org.junit.jupiter.api.Assertions.fail; import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.anyInt; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.mockStatic; import static org.mockito.Mockito.spy; @@ -99,7 +100,7 @@ public void testPartUpload() throws Exception { ByteArrayInputStream body = new ByteArrayInputStream(content.getBytes(UTF_8)); response = REST.put(OzoneConsts.S3_BUCKET, OzoneConsts.KEY, - content.length(), 1, uploadID, body); + content.length(), 1, uploadID, null, null, body); assertNotNull(response.getHeaderString(OzoneConsts.ETAG)); @@ -121,7 +122,7 @@ public void testPartUploadWithOverride() throws Exception { ByteArrayInputStream body = new ByteArrayInputStream(content.getBytes(UTF_8)); response = REST.put(OzoneConsts.S3_BUCKET, OzoneConsts.KEY, - content.length(), 1, uploadID, body); + content.length(), 1, uploadID, null, null, body); assertNotNull(response.getHeaderString(OzoneConsts.ETAG)); @@ -130,7 +131,7 @@ public void testPartUploadWithOverride() throws Exception { // Upload part again with same part Number, the ETag should be changed. content = "Multipart Upload Changed"; response = REST.put(OzoneConsts.S3_BUCKET, OzoneConsts.KEY, - content.length(), 1, uploadID, body); + content.length(), 1, uploadID, null, null, body); assertNotNull(response.getHeaderString(OzoneConsts.ETAG)); assertNotEquals(eTag, response.getHeaderString(OzoneConsts.ETAG)); @@ -144,7 +145,7 @@ public void testPartUploadWithIncorrectUploadID() throws Exception { ByteArrayInputStream body = new ByteArrayInputStream(content.getBytes(UTF_8)); REST.put(OzoneConsts.S3_BUCKET, OzoneConsts.KEY, content.length(), 1, - "random", body); + "random", null, null, body); }); assertEquals("NoSuchUpload", ex.getCode()); assertEquals(HTTP_NOT_FOUND, ex.getHttpCode()); @@ -178,7 +179,7 @@ public void testPartUploadStreamContentLength() long contentLength = chunkedContent.length(); objectEndpoint.put(OzoneConsts.S3_BUCKET, keyName, contentLength, 1, - uploadID, new ByteArrayInputStream(chunkedContent.getBytes(UTF_8))); + uploadID, null, null, new ByteArrayInputStream(chunkedContent.getBytes(UTF_8))); assertContentLength(uploadID, keyName, 15); } @@ -201,7 +202,7 @@ public void testPartUploadContentLength() throws IOException, OS3Exception { ByteArrayInputStream body = new ByteArrayInputStream(content.getBytes(UTF_8)); REST.put(OzoneConsts.S3_BUCKET, keyName, - contentLength, 1, uploadID, body); + contentLength, 1, uploadID, null, null, body); assertContentLength(uploadID, keyName, content.length()); } @@ -234,7 +235,7 @@ public void testPartUploadMessageDigestResetDuringException() throws IOException try (MockedStatic mocked = mockStatic(IOUtils.class)) { // Add the mocked methods only during the copy request when(objectEndpoint.getMessageDigestInstance()).thenReturn(messageDigest); - mocked.when(() -> IOUtils.copyLarge(any(InputStream.class), any(OutputStream.class))) + mocked.when(() -> IOUtils.copy(any(InputStream.class), any(OutputStream.class), anyInt())) .thenThrow(IOException.class); String content = "Multipart Upload"; @@ -242,7 +243,7 @@ public void testPartUploadMessageDigestResetDuringException() throws IOException new ByteArrayInputStream(content.getBytes(UTF_8)); try { objectEndpoint.put(OzoneConsts.S3_BUCKET, OzoneConsts.KEY, - content.length(), 1, uploadID, body); + content.length(), 1, uploadID, null, null, body); fail("Should throw IOException"); } catch (IOException ignored) { // Verify that the message digest is reset so that the instance can be reused for the diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestPartUploadWithStream.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestPartUploadWithStream.java index 28ce32e7470..dc844f6463f 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestPartUploadWithStream.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestPartUploadWithStream.java @@ -94,7 +94,7 @@ public void testPartUpload() throws Exception { ByteArrayInputStream body = new ByteArrayInputStream(content.getBytes(UTF_8)); response = REST.put(S3BUCKET, S3KEY, - content.length(), 1, uploadID, body); + content.length(), 1, uploadID, null, null, body); assertNotNull(response.getHeaderString(OzoneConsts.ETAG)); @@ -115,7 +115,7 @@ public void testPartUploadWithOverride() throws Exception { ByteArrayInputStream body = new ByteArrayInputStream(content.getBytes(UTF_8)); response = REST.put(S3BUCKET, S3KEY, - content.length(), 1, uploadID, body); + content.length(), 1, uploadID, null, null, body); assertNotNull(response.getHeaderString(OzoneConsts.ETAG)); @@ -124,7 +124,7 @@ public void testPartUploadWithOverride() throws Exception { // Upload part again with same part Number, the ETag should be changed. content = "Multipart Upload Changed"; response = REST.put(S3BUCKET, S3KEY, - content.length(), 1, uploadID, body); + content.length(), 1, uploadID, null, null, body); assertNotNull(response.getHeaderString(OzoneConsts.ETAG)); assertNotEquals(eTag, response.getHeaderString(OzoneConsts.ETAG)); @@ -137,7 +137,7 @@ public void testPartUploadWithIncorrectUploadID() throws Exception { ByteArrayInputStream body = new ByteArrayInputStream(content.getBytes(UTF_8)); REST.put(S3BUCKET, S3KEY, content.length(), 1, - "random", body); + "random", null, null, body); }); assertEquals("NoSuchUpload", ex.getCode()); assertEquals(HTTP_NOT_FOUND, ex.getHttpCode()); diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestPermissionCheck.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestPermissionCheck.java index b74808de953..d256a346295 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestPermissionCheck.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestPermissionCheck.java @@ -38,12 +38,14 @@ import java.io.ByteArrayInputStream; import java.io.IOException; +import java.io.InputStream; import java.util.ArrayList; import java.util.HashMap; import java.util.List; import java.util.Map; import static java.net.HttpURLConnection.HTTP_FORBIDDEN; +import static java.nio.charset.StandardCharsets.UTF_8; import static org.mockito.Mockito.any; import static org.mockito.Mockito.anyBoolean; import static org.mockito.Mockito.anyLong; @@ -245,7 +247,7 @@ public void testGetKey() throws IOException { objectEndpoint.setOzoneConfiguration(conf); OS3Exception e = assertThrows(OS3Exception.class, () -> objectEndpoint.get( - "bucketName", "keyPath", 0, null, 1000, "marker")); + "bucketName", "keyPath", 0, null, 1000, "marker", null)); assertEquals(HTTP_FORBIDDEN, e.getHttpCode()); } @@ -261,7 +263,7 @@ public void testPutKey() throws IOException { objectEndpoint.setOzoneConfiguration(conf); OS3Exception e = assertThrows(OS3Exception.class, () -> objectEndpoint.put( - "bucketName", "keyPath", 1024, 0, null, + "bucketName", "keyPath", 1024, 0, null, null, null, new ByteArrayInputStream(new byte[]{}))); assertEquals(HTTP_FORBIDDEN, e.getHttpCode()); } @@ -277,7 +279,7 @@ public void testDeleteKey() throws IOException { objectEndpoint.setOzoneConfiguration(conf); OS3Exception e = assertThrows(OS3Exception.class, () -> - objectEndpoint.delete("bucketName", "keyPath", null)); + objectEndpoint.delete("bucketName", "keyPath", null, null)); assertEquals(HTTP_FORBIDDEN, e.getHttpCode()); } @@ -294,4 +296,44 @@ public void testMultiUploadKey() throws IOException { objectEndpoint.initializeMultipartUpload("bucketName", "keyPath")); assertEquals(HTTP_FORBIDDEN, e.getHttpCode()); } + + @Test + public void testObjectTagging() throws Exception { + when(objectStore.getVolume(anyString())).thenReturn(volume); + when(objectStore.getS3Volume()).thenReturn(volume); + when(objectStore.getS3Bucket(anyString())).thenReturn(bucket); + when(volume.getBucket("bucketName")).thenReturn(bucket); + when(bucket.getObjectTagging(anyString())).thenThrow(exception); + doThrow(exception).when(bucket).putObjectTagging(anyString(), anyMap()); + doThrow(exception).when(bucket).deleteObjectTagging(anyString()); + + ObjectEndpoint objectEndpoint = new ObjectEndpoint(); + objectEndpoint.setClient(client); + + String xml = + "" + + " " + + " " + + " tag1" + + " val1" + + " " + + " " + + ""; + + InputStream tagInput = new ByteArrayInputStream(xml.getBytes(UTF_8)); + + OS3Exception e = assertThrows(OS3Exception.class, () -> + objectEndpoint.put("bucketName", "keyPath", 0, 1, + null, "", null, tagInput)); + assertEquals(HTTP_FORBIDDEN, e.getHttpCode()); + + e = assertThrows(OS3Exception.class, () -> + objectEndpoint.delete("bucketName", "keyPath", "", "")); + assertEquals(HTTP_FORBIDDEN, e.getHttpCode()); + + e = assertThrows(OS3Exception.class, () -> + objectEndpoint.get("bucketName", "keyPath", 0, null, + 0, null, "")); + assertEquals(HTTP_FORBIDDEN, e.getHttpCode()); + } } diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestUploadWithStream.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestUploadWithStream.java index d988b430230..1c0e115a24c 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestUploadWithStream.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestUploadWithStream.java @@ -106,7 +106,7 @@ public void testUpload() throws Exception { byte[] keyContent = S3_COPY_EXISTING_KEY_CONTENT.getBytes(UTF_8); ByteArrayInputStream body = new ByteArrayInputStream(keyContent); - Response response = REST.put(S3BUCKET, S3KEY, 0, 0, null, body); + Response response = REST.put(S3BUCKET, S3KEY, 0, 0, null, null, null, body); assertEquals(200, response.getStatus()); } @@ -140,7 +140,7 @@ public void testUploadWithCopy() throws Exception { .forEach((k, v) -> when(headers.getHeaderString(k)).thenReturn(v)); REST.setHeaders(headers); - Response response = REST.put(S3BUCKET, S3KEY, 0, 0, null, null); + Response response = REST.put(S3BUCKET, S3KEY, 0, 0, null, null, null, null); assertEquals(200, response.getStatus()); diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/metrics/TestS3GatewayMetrics.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/metrics/TestS3GatewayMetrics.java index 947b0986c8e..1f6cee2c4a9 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/metrics/TestS3GatewayMetrics.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/metrics/TestS3GatewayMetrics.java @@ -310,7 +310,7 @@ public void testCreateKeySuccess() throws Exception { new ByteArrayInputStream(CONTENT.getBytes(UTF_8)); // Create the file keyEndpoint.put(bucketName, keyName, CONTENT - .length(), 1, null, body); + .length(), 1, null, null, null, body); body.close(); long curMetric = metrics.getCreateKeySuccess(); assertEquals(1L, curMetric - oriMetric); @@ -322,7 +322,8 @@ public void testCreateKeyFailure() throws Exception { // Create the file in a bucket that does not exist OS3Exception e = assertThrows(OS3Exception.class, () -> keyEndpoint.put( - "unknownBucket", keyName, CONTENT.length(), 1, null, null)); + "unknownBucket", keyName, CONTENT.length(), 1, null, null, + null, null)); assertEquals(S3ErrorTable.NO_SUCH_BUCKET.getCode(), e.getCode()); long curMetric = metrics.getCreateKeyFailure(); assertEquals(1L, curMetric - oriMetric); @@ -334,7 +335,7 @@ public void testDeleteKeySuccess() throws Exception { long oriMetric = metrics.getDeleteKeySuccess(); bucket.createKey(keyName, 0).close(); - keyEndpoint.delete(bucketName, keyName, null); + keyEndpoint.delete(bucketName, keyName, null, null); long curMetric = metrics.getDeleteKeySuccess(); assertEquals(1L, curMetric - oriMetric); } @@ -343,7 +344,7 @@ public void testDeleteKeySuccess() throws Exception { public void testDeleteKeyFailure() throws Exception { long oriMetric = metrics.getDeleteKeyFailure(); OS3Exception e = assertThrows(OS3Exception.class, () -> keyEndpoint.delete( - "unknownBucket", keyName, null)); + "unknownBucket", keyName, null, null)); assertEquals(S3ErrorTable.NO_SUCH_BUCKET.getCode(), e.getCode()); long curMetric = metrics.getDeleteKeyFailure(); assertEquals(1L, curMetric - oriMetric); @@ -358,9 +359,9 @@ public void testGetKeySuccess() throws Exception { new ByteArrayInputStream(CONTENT.getBytes(UTF_8)); // Create the file keyEndpoint.put(bucketName, keyName, CONTENT - .length(), 1, null, body); + .length(), 1, null, null, null, body); // GET the key from the bucket - Response response = keyEndpoint.get(bucketName, keyName, 0, null, 0, null); + Response response = keyEndpoint.get(bucketName, keyName, 0, null, 0, null, null); StreamingOutput stream = (StreamingOutput) response.getEntity(); stream.write(new ByteArrayOutputStream()); long curMetric = metrics.getGetKeySuccess(); @@ -373,7 +374,7 @@ public void testGetKeyFailure() throws Exception { // Fetching a non-existent key OS3Exception e = assertThrows(OS3Exception.class, () -> keyEndpoint.get( - bucketName, "unknownKey", 0, null, 0, null)); + bucketName, "unknownKey", 0, null, 0, null, null)); assertEquals(S3ErrorTable.NO_SUCH_KEY.getCode(), e.getCode()); long curMetric = metrics.getGetKeyFailure(); assertEquals(1L, curMetric - oriMetric); @@ -407,7 +408,7 @@ public void testAbortMultiPartUploadSuccess() throws Exception { long oriMetric = metrics.getAbortMultiPartUploadSuccess(); // Abort the Upload Successfully by deleting the key using the Upload-Id - keyEndpoint.delete(bucketName, keyName, uploadID); + keyEndpoint.delete(bucketName, keyName, uploadID, null); long curMetric = metrics.getAbortMultiPartUploadSuccess(); assertEquals(1L, curMetric - oriMetric); @@ -419,7 +420,7 @@ public void testAbortMultiPartUploadFailure() throws Exception { // Fail the Abort Method by providing wrong uploadID OS3Exception e = assertThrows(OS3Exception.class, () -> keyEndpoint.delete( - bucketName, keyName, "wrongId")); + bucketName, keyName, "wrongId", null)); assertEquals(S3ErrorTable.NO_SUCH_UPLOAD.getCode(), e.getCode()); long curMetric = metrics.getAbortMultiPartUploadFailure(); assertEquals(1L, curMetric - oriMetric); @@ -466,7 +467,7 @@ public void testCreateMultipartKeySuccess() throws Exception { ByteArrayInputStream body = new ByteArrayInputStream(CONTENT.getBytes(UTF_8)); keyEndpoint.put(bucketName, keyName, CONTENT.length(), - 1, uploadID, body); + 1, uploadID, null, null, body); long curMetric = metrics.getCreateMultipartKeySuccess(); assertEquals(1L, curMetric - oriMetric); } @@ -475,7 +476,7 @@ public void testCreateMultipartKeySuccess() throws Exception { public void testCreateMultipartKeyFailure() throws Exception { long oriMetric = metrics.getCreateMultipartKeyFailure(); OS3Exception e = assertThrows(OS3Exception.class, () -> keyEndpoint.put( - bucketName, keyName, CONTENT.length(), 1, "randomId", null)); + bucketName, keyName, CONTENT.length(), 1, "randomId", null, null, null)); assertEquals(S3ErrorTable.NO_SUCH_UPLOAD.getCode(), e.getCode()); long curMetric = metrics.getCreateMultipartKeyFailure(); assertEquals(1L, curMetric - oriMetric); @@ -490,7 +491,7 @@ public void testListPartsSuccess() throws Exception { // Listing out the parts by providing the uploadID keyEndpoint.get(bucketName, keyName, 0, - uploadID, 3, null); + uploadID, 3, null, null); long curMetric = metrics.getListPartsSuccess(); assertEquals(1L, curMetric - oriMetric); } @@ -501,7 +502,7 @@ public void testListPartsFailure() throws Exception { long oriMetric = metrics.getListPartsFailure(); // Listing out the parts by providing the uploadID after aborting OS3Exception e = assertThrows(OS3Exception.class, () -> keyEndpoint.get( - bucketName, keyName, 0, "wrong_id", 3, null)); + bucketName, keyName, 0, "wrong_id", 3, null, null)); assertEquals(S3ErrorTable.NO_SUCH_UPLOAD.getCode(), e.getCode()); long curMetric = metrics.getListPartsFailure(); assertEquals(1L, curMetric - oriMetric); @@ -522,14 +523,14 @@ public void testCopyObject() throws Exception { new ByteArrayInputStream(CONTENT.getBytes(UTF_8)); keyEndpoint.put(bucketName, keyName, - CONTENT.length(), 1, null, body); + CONTENT.length(), 1, null, null, null, body); // Add copy header, and then call put when(headers.getHeaderString(COPY_SOURCE_HEADER)).thenReturn( bucketName + "/" + urlEncode(keyName)); keyEndpoint.put(destBucket, destKey, CONTENT.length(), 1, - null, body); + null, null, null, body); long curMetric = metrics.getCopyObjectSuccess(); assertEquals(1L, curMetric - oriMetric); @@ -538,13 +539,114 @@ public void testCopyObject() throws Exception { // source and dest same when(headers.getHeaderString(STORAGE_CLASS_HEADER)).thenReturn(""); OS3Exception e = assertThrows(OS3Exception.class, () -> keyEndpoint.put( - bucketName, keyName, CONTENT.length(), 1, null, body), + bucketName, keyName, CONTENT.length(), 1, null, null, null, body), "Test for CopyObjectMetric failed"); assertThat(e.getErrorMessage()).contains("This copy request is illegal"); curMetric = metrics.getCopyObjectFailure(); assertEquals(1L, curMetric - oriMetric); } + @Test + public void testPutObjectTaggingSuccess() throws Exception { + long oriMetric = metrics.getPutObjectTaggingSuccess(); + + ByteArrayInputStream body = + new ByteArrayInputStream(CONTENT.getBytes(UTF_8)); + // Create the file + keyEndpoint.put(bucketName, keyName, CONTENT + .length(), 1, null, null, null, body); + body.close(); + + // Put object tagging + keyEndpoint.put(bucketName, keyName, 0, 1, null, "", null, getPutTaggingBody()); + + long curMetric = metrics.getPutObjectTaggingSuccess(); + assertEquals(1L, curMetric - oriMetric); + } + + @Test + public void testPutObjectTaggingFailure() throws Exception { + long oriMetric = metrics.getPutObjectTaggingFailure(); + + // Put object tagging for nonexistent key + OS3Exception ex = assertThrows(OS3Exception.class, () -> + keyEndpoint.put(bucketName, "nonexistent", 0, 1, null, "", + null, getPutTaggingBody()) + ); + assertEquals(S3ErrorTable.NO_SUCH_KEY.getCode(), ex.getCode()); + + long curMetric = metrics.getPutObjectTaggingFailure(); + assertEquals(1L, curMetric - oriMetric); + } + + @Test + public void testGetObjectTaggingSuccess() throws Exception { + long oriMetric = metrics.getGetObjectTaggingSuccess(); + + // Create the file + ByteArrayInputStream body = + new ByteArrayInputStream(CONTENT.getBytes(UTF_8)); + keyEndpoint.put(bucketName, keyName, CONTENT + .length(), 1, null, null, null, body); + body.close(); + + // Put object tagging + keyEndpoint.put(bucketName, keyName, 0, 1, null, "", null, getPutTaggingBody()); + + // Get object tagging + keyEndpoint.get(bucketName, keyName, 0, + null, 0, null, ""); + + long curMetric = metrics.getGetObjectTaggingSuccess(); + assertEquals(1L, curMetric - oriMetric); + } + + @Test + public void testGetObjectTaggingFailure() throws Exception { + long oriMetric = metrics.getGetObjectTaggingFailure(); + + // Get object tagging for nonexistent key + OS3Exception ex = assertThrows(OS3Exception.class, () -> + keyEndpoint.get(bucketName, "nonexistent", 0, null, + 0, null, "")); + assertEquals(S3ErrorTable.NO_SUCH_KEY.getCode(), ex.getCode()); + long curMetric = metrics.getGetObjectTaggingFailure(); + assertEquals(1L, curMetric - oriMetric); + } + + @Test + public void testDeleteObjectTaggingSuccess() throws Exception { + long oriMetric = metrics.getDeleteObjectTaggingSuccess(); + + // Create the file + ByteArrayInputStream body = + new ByteArrayInputStream(CONTENT.getBytes(UTF_8)); + keyEndpoint.put(bucketName, keyName, CONTENT + .length(), 1, null, null, null, body); + body.close(); + + // Put object tagging + keyEndpoint.put(bucketName, keyName, 0, 1, null, "", null, getPutTaggingBody()); + + // Delete object tagging + keyEndpoint.delete(bucketName, keyName, null, ""); + + long curMetric = metrics.getDeleteObjectTaggingSuccess(); + assertEquals(1L, curMetric - oriMetric); + } + + @Test + public void testDeleteObjectTaggingFailure() throws Exception { + long oriMetric = metrics.getDeleteObjectTaggingFailure(); + + // Delete object tagging for nonexistent key + OS3Exception ex = assertThrows(OS3Exception.class, () -> + keyEndpoint.delete(bucketName, "nonexistent", null, "")); + assertEquals(S3ErrorTable.NO_SUCH_KEY.getCode(), ex.getCode()); + long curMetric = metrics.getDeleteObjectTaggingFailure(); + assertEquals(1L, curMetric - oriMetric); + } + private OzoneClient createClientWithKeys(String... keys) throws IOException { for (String key : keys) { bucket.createKey(key, 0).close(); @@ -567,4 +669,18 @@ private String initiateMultipartUpload(String bktName, String key) } return "Invalid-Id"; } + + private static InputStream getPutTaggingBody() { + String xml = + "" + + " " + + " " + + " tag1" + + " val1" + + " " + + " " + + ""; + + return new ByteArrayInputStream(xml.getBytes(UTF_8)); + } } diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3secret/TestSecretGenerate.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3secret/TestSecretGenerate.java index d1f81faddd2..b548d17d9ff 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3secret/TestSecretGenerate.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3secret/TestSecretGenerate.java @@ -33,7 +33,6 @@ import org.apache.hadoop.ozone.client.protocol.ClientProtocol; import org.apache.hadoop.ozone.om.exceptions.OMException; import org.apache.hadoop.ozone.om.helpers.S3SecretValue; -import org.apache.ozone.test.tag.Unhealthy; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.extension.ExtendWith; @@ -93,7 +92,7 @@ void testSecretGenerate() throws IOException { hasNoSecretYet(); S3SecretResponse response = - (S3SecretResponse) endpoint.generate().getEntity(); + (S3SecretResponse) endpoint.generate().getEntity(); assertEquals(USER_SECRET, response.getAwsSecret()); assertEquals(USER_NAME, response.getAwsAccessKey()); @@ -112,12 +111,11 @@ void testIfSecretAlreadyExists() throws IOException { } @Test - @Unhealthy("HDDS-11041") void testSecretGenerateWithUsername() throws IOException { hasNoSecretYet(); S3SecretResponse response = - (S3SecretResponse) endpoint.generate(OTHER_USER_NAME).getEntity(); + (S3SecretResponse) endpoint.generate(OTHER_USER_NAME).getEntity(); assertEquals(USER_SECRET, response.getAwsSecret()); assertEquals(OTHER_USER_NAME, response.getAwsAccessKey()); } diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3secret/TestSecretRevoke.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3secret/TestSecretRevoke.java index 85e6bd4c10e..b26df0e8996 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3secret/TestSecretRevoke.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3secret/TestSecretRevoke.java @@ -30,7 +30,6 @@ import org.apache.hadoop.ozone.client.OzoneClient; import org.apache.hadoop.ozone.client.OzoneClientStub; import org.apache.hadoop.ozone.om.exceptions.OMException; -import org.apache.ozone.test.tag.Unhealthy; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.extension.ExtendWith; @@ -98,7 +97,6 @@ void testSecretRevoke() throws IOException { } @Test - @Unhealthy("HDDS-11041") void testSecretRevokeWithUsername() throws IOException { endpoint.revoke(OTHER_USER_NAME); verify(objectStore, times(1)) diff --git a/hadoop-ozone/tools/pom.xml b/hadoop-ozone/tools/pom.xml index d8c5599f304..8ea8ded01ce 100644 --- a/hadoop-ozone/tools/pom.xml +++ b/hadoop-ozone/tools/pom.xml @@ -20,14 +20,18 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> org.apache.ozone ozone - 1.5.0-SNAPSHOT + 2.0.0-SNAPSHOT ozone-tools - 1.5.0-SNAPSHOT + 2.0.0-SNAPSHOT Apache Ozone Tools Apache Ozone Tools jar + + false + + org.apache.ozone @@ -169,6 +173,14 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> info.picocli picocli + + info.picocli + picocli-shell-jline3 + + + org.jline + jline + jakarta.xml.bind jakarta.xml.bind-api @@ -271,21 +283,24 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> maven-compiler-plugin - - org.apache.ozone - hdds-config - ${hdds.version} - org.kohsuke.metainf-services metainf-services ${metainf-services.version} + + info.picocli + picocli-codegen + ${picocli.version} + - org.apache.hadoop.hdds.conf.ConfigFileGenerator org.kohsuke.metainf_services.AnnotationProcessorImpl + picocli.codegen.aot.graalvm.processor.NativeImageConfigGeneratorProcessor + + -Aproject=${project.groupId}/${project.artifactId} + @@ -299,8 +314,10 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> Only selected annotation processors are enabled, see configuration of maven-compiler-plugin. - org.apache.hadoop.ozone.om.request.validation.RequestFeatureValidator + org.apache.hadoop.hdds.conf.Config + org.apache.hadoop.hdds.conf.ConfigGroup org.apache.hadoop.hdds.scm.metadata.Replicate + org.apache.hadoop.ozone.om.request.validation.RequestFeatureValidator diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/nssummary/NSSummaryAdmin.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/nssummary/NSSummaryAdmin.java index 46a311e3546..ef9be49abfb 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/nssummary/NSSummaryAdmin.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/nssummary/NSSummaryAdmin.java @@ -18,10 +18,9 @@ package org.apache.hadoop.ozone.admin.nssummary; import org.apache.hadoop.fs.ozone.OzoneClientUtils; -import org.apache.hadoop.hdds.cli.GenericCli; import org.apache.hadoop.hdds.cli.HddsVersionProvider; import org.apache.hadoop.hdds.cli.OzoneAdmin; -import org.apache.hadoop.hdds.cli.SubcommandWithParent; +import org.apache.hadoop.hdds.cli.AdminSubcommand; import org.apache.hadoop.hdds.conf.ConfigurationSource; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.server.http.HttpConfig; @@ -62,29 +61,15 @@ QuotaUsageSubCommand.class, FileSizeDistSubCommand.class }) -@MetaInfServices(SubcommandWithParent.class) -public class NSSummaryAdmin extends GenericCli implements SubcommandWithParent { +@MetaInfServices(AdminSubcommand.class) +public class NSSummaryAdmin implements AdminSubcommand { @CommandLine.ParentCommand private OzoneAdmin parent; - @CommandLine.Spec - private CommandLine.Model.CommandSpec spec; - public OzoneAdmin getParent() { return parent; } - @Override - public Void call() throws Exception { - GenericCli.missingSubcommand(spec); - return null; - } - - @Override - public Class getParentType() { - return OzoneAdmin.class; - } - private boolean isObjectStoreBucket(OzoneBucket bucket, ObjectStore objectStore) { boolean enableFileSystemPaths = getOzoneConfig() .getBoolean(OMConfigKeys.OZONE_OM_ENABLE_FILESYSTEM_PATHS, diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/om/GetServiceRolesSubcommand.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/om/GetServiceRolesSubcommand.java index 2a25dfbd103..2b23ad9f536 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/om/GetServiceRolesSubcommand.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/om/GetServiceRolesSubcommand.java @@ -18,17 +18,19 @@ package org.apache.hadoop.ozone.admin.om; +import org.apache.hadoop.classification.VisibleForTesting; import org.apache.hadoop.hdds.cli.HddsVersionProvider; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.server.JsonUtils; -import org.apache.hadoop.ozone.client.OzoneClientException; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRoleInfo; import org.apache.hadoop.ozone.om.protocol.OzoneManagerProtocol; import org.apache.hadoop.ozone.om.helpers.ServiceInfo; +import org.apache.hadoop.ozone.utils.FormattingCLIUtils; import picocli.CommandLine; import java.io.IOException; import java.util.ArrayList; +import java.util.Arrays; import java.util.HashMap; import java.util.List; import java.util.Map; @@ -57,19 +59,40 @@ public class GetServiceRolesSubcommand implements Callable { description = "Format output as JSON") private boolean json; + @CommandLine.Option(names = { "--table" }, + defaultValue = "false", + description = "Format output as Table") + private boolean table; + private OzoneManagerProtocol ozoneManagerClient; + private static final String OM_ROLES_TITLE = "Ozone Manager Roles"; + + private static final List OM_ROLES_HEADER = Arrays.asList( + "Host Name", "Node ID", "Role"); + @Override public Void call() throws Exception { try { ozoneManagerClient = parent.createOmClient(omServiceId); if (json) { printOmServerRolesAsJson(ozoneManagerClient.getServiceList()); + } else if (table) { + FormattingCLIUtils formattingCLIUtils = new FormattingCLIUtils(OM_ROLES_TITLE) + .addHeaders(OM_ROLES_HEADER); + List serviceList = ozoneManagerClient.getServiceList(); + for (ServiceInfo serviceInfo : serviceList) { + OMRoleInfo omRoleInfo = serviceInfo.getOmRoleInfo(); + if (omRoleInfo != null && + serviceInfo.getNodeType() == HddsProtos.NodeType.OM) { + formattingCLIUtils.addLine(new String[]{serviceInfo.getHostname(), + omRoleInfo.getNodeId(), omRoleInfo.getServerRole()}); + } + } + System.out.println(formattingCLIUtils.render()); } else { printOmServerRoles(ozoneManagerClient.getServiceList()); } - } catch (OzoneClientException ex) { - System.out.printf("Error: %s", ex.getMessage()); } finally { if (ozoneManagerClient != null) { ozoneManagerClient.close(); @@ -110,4 +133,14 @@ private void printOmServerRolesAsJson(List serviceList) System.out.print( JsonUtils.toJsonStringWithDefaultPrettyPrinter(omServiceList)); } + + @VisibleForTesting + public void setOzoneManagerClient(OzoneManagerProtocol ozoneManagerClient) { + this.ozoneManagerClient = ozoneManagerClient; + } + + @VisibleForTesting + public void setParent(OMAdmin parent) { + this.parent = parent; + } } diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/om/OMAdmin.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/om/OMAdmin.java index 3162c556354..9076ce9bf7d 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/om/OMAdmin.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/om/OMAdmin.java @@ -17,10 +17,9 @@ */ package org.apache.hadoop.ozone.admin.om; -import org.apache.hadoop.hdds.cli.GenericCli; import org.apache.hadoop.hdds.cli.HddsVersionProvider; import org.apache.hadoop.hdds.cli.OzoneAdmin; -import org.apache.hadoop.hdds.cli.SubcommandWithParent; +import org.apache.hadoop.hdds.cli.AdminSubcommand; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.ipc.ProtobufRpcEngine; import org.apache.hadoop.ipc.RPC; @@ -38,8 +37,6 @@ import org.apache.ratis.protocol.ClientId; import org.kohsuke.MetaInfServices; import picocli.CommandLine; -import picocli.CommandLine.Model.CommandSpec; -import picocli.CommandLine.Spec; import java.util.Collection; @@ -63,25 +60,16 @@ TransferOmLeaderSubCommand.class, FetchKeySubCommand.class }) -@MetaInfServices(SubcommandWithParent.class) -public class OMAdmin extends GenericCli implements SubcommandWithParent { +@MetaInfServices(AdminSubcommand.class) +public class OMAdmin implements AdminSubcommand { @CommandLine.ParentCommand private OzoneAdmin parent; - @Spec - private CommandSpec spec; - public OzoneAdmin getParent() { return parent; } - @Override - public Void call() throws Exception { - GenericCli.missingSubcommand(spec); - return null; - } - public ClientProtocol createClient(String omServiceId) throws Exception { OzoneConfiguration conf = parent.getOzoneConf(); if (OmUtils.isOmHAServiceId(conf, omServiceId)) { @@ -146,9 +134,4 @@ private Collection getConfiguredServiceIds() { conf.getTrimmedStringCollection(OZONE_OM_SERVICE_IDS_KEY); return omServiceIds; } - - @Override - public Class getParentType() { - return OzoneAdmin.class; - } } diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/om/UpdateRangerSubcommand.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/om/UpdateRangerSubcommand.java index 4234ee29d12..d2d2f1bf044 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/om/UpdateRangerSubcommand.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/om/UpdateRangerSubcommand.java @@ -20,7 +20,6 @@ import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.hdds.cli.HddsVersionProvider; -import org.apache.hadoop.ozone.client.OzoneClientException; import org.apache.hadoop.ozone.om.protocol.OzoneManagerProtocol; import picocli.CommandLine; @@ -86,9 +85,6 @@ public Void call() throws Exception { System.err.println("Operation completed with errors. " + "Check OM log for details"); } - - } catch (OzoneClientException ex) { - System.err.printf("Error: %s", ex.getMessage()); } return null; } diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/reconfig/AbstractReconfigureSubCommand.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/reconfig/AbstractReconfigureSubCommand.java index 0a2666d30ee..b8ea45898d7 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/reconfig/AbstractReconfigureSubCommand.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/reconfig/AbstractReconfigureSubCommand.java @@ -28,6 +28,7 @@ /** * An abstract Class use to ReconfigureSubCommand. */ +@CommandLine.Command public abstract class AbstractReconfigureSubCommand implements Callable { @CommandLine.ParentCommand private ReconfigureCommands parent; diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/reconfig/ReconfigureCommands.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/reconfig/ReconfigureCommands.java index fc171e52d8d..d14102c4e8a 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/reconfig/ReconfigureCommands.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/reconfig/ReconfigureCommands.java @@ -17,10 +17,10 @@ */ package org.apache.hadoop.ozone.admin.reconfig; +import org.apache.hadoop.hdds.cli.AdminSubcommand; import org.apache.hadoop.hdds.cli.GenericCli; import org.apache.hadoop.hdds.cli.HddsVersionProvider; import org.apache.hadoop.hdds.cli.OzoneAdmin; -import org.apache.hadoop.hdds.cli.SubcommandWithParent; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.scm.cli.ContainerOperationClient; import org.apache.hadoop.hdds.scm.client.ScmClient; @@ -47,9 +47,8 @@ ReconfigureStatusSubcommand.class, ReconfigurePropertiesSubcommand.class }) -@MetaInfServices(SubcommandWithParent.class) -public class ReconfigureCommands implements Callable, - SubcommandWithParent { +@MetaInfServices(AdminSubcommand.class) +public class ReconfigureCommands implements Callable, AdminSubcommand { @CommandLine.ParentCommand private OzoneAdmin parent; @@ -87,11 +86,6 @@ public HddsProtos.NodeType getService() { return HddsProtos.NodeType.valueOf(service); } - @Override - public Class getParentType() { - return OzoneAdmin.class; - } - public boolean isBatchReconfigDatanodes() { return batchReconfigDatanodes; } diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/scm/GetScmRatisRolesSubcommand.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/scm/GetScmRatisRolesSubcommand.java index 480133e59b4..da74083de3b 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/scm/GetScmRatisRolesSubcommand.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/scm/GetScmRatisRolesSubcommand.java @@ -18,6 +18,7 @@ package org.apache.hadoop.ozone.admin.scm; import java.io.IOException; +import java.util.Arrays; import java.util.Collections; import java.util.HashMap; import java.util.List; @@ -27,6 +28,7 @@ import org.apache.hadoop.hdds.scm.cli.ScmSubcommand; import org.apache.hadoop.hdds.scm.client.ScmClient; import org.apache.hadoop.hdds.server.JsonUtils; +import org.apache.hadoop.ozone.utils.FormattingCLIUtils; import picocli.CommandLine; import static java.lang.System.err; @@ -50,13 +52,44 @@ public class GetScmRatisRolesSubcommand extends ScmSubcommand { description = "Format output as JSON") private boolean json; + @CommandLine.Option(names = { "--table" }, + defaultValue = "false", + description = "Format output as Table") + private boolean table; + + private static final String SCM_ROLES_TITLE = "Storage Container Manager Roles"; + + private static final List RATIS_SCM_ROLES_HEADER = Arrays.asList( + "Host Name", "Ratis Port", "Role", "Node ID", "Host Address"); + + private static final List STANDALONE_SCM_ROLES_HEADER = Arrays.asList("Host Name", "Port"); + @Override - protected void execute(ScmClient scmClient) throws IOException { + public void execute(ScmClient scmClient) throws IOException { List ratisRoles = scmClient.getScmRatisRoles(); + boolean isRatisEnabled = scmClient.isScmRatisEnable(); if (json) { Map> scmRoles = parseScmRoles(ratisRoles); System.out.print( JsonUtils.toJsonStringWithDefaultPrettyPrinter(scmRoles)); + } else if (table) { + FormattingCLIUtils formattingCLIUtils = new FormattingCLIUtils(SCM_ROLES_TITLE); + + // Determine which header to use based on whether Ratis is enabled or not. + if (isRatisEnabled) { + formattingCLIUtils.addHeaders(RATIS_SCM_ROLES_HEADER); + } else { + formattingCLIUtils.addHeaders(STANDALONE_SCM_ROLES_HEADER); + } + + for (String role : ratisRoles) { + String[] roleItems = role.split(":"); + if (roleItems.length < 2) { + err.println("Invalid response received for ScmRatisRoles."); + } + formattingCLIUtils.addLine(roleItems); + } + System.out.println(formattingCLIUtils.render()); } else { for (String role: ratisRoles) { System.out.println(role); diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/scm/ScmAdmin.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/scm/ScmAdmin.java index 98eba154b25..996485b13fd 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/scm/ScmAdmin.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/scm/ScmAdmin.java @@ -17,14 +17,11 @@ */ package org.apache.hadoop.ozone.admin.scm; -import org.apache.hadoop.hdds.cli.GenericCli; import org.apache.hadoop.hdds.cli.HddsVersionProvider; import org.apache.hadoop.hdds.cli.OzoneAdmin; -import org.apache.hadoop.hdds.cli.SubcommandWithParent; +import org.apache.hadoop.hdds.cli.AdminSubcommand; import org.kohsuke.MetaInfServices; import picocli.CommandLine; -import picocli.CommandLine.Model.CommandSpec; -import picocli.CommandLine.Spec; /** * Subcommand for admin operations related to SCM. @@ -43,28 +40,13 @@ DecommissionScmSubcommand.class, RotateKeySubCommand.class }) -@MetaInfServices(SubcommandWithParent.class) -public class ScmAdmin extends GenericCli implements SubcommandWithParent { +@MetaInfServices(AdminSubcommand.class) +public class ScmAdmin implements AdminSubcommand { @CommandLine.ParentCommand private OzoneAdmin parent; - @Spec - private CommandSpec spec; - public OzoneAdmin getParent() { return parent; } - - @Override - public Void call() throws Exception { - GenericCli.missingSubcommand(spec); - return null; - } - - @Override - public Class getParentType() { - return OzoneAdmin.class; - } - } diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/CompactionLogDagPrinter.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/CompactionLogDagPrinter.java index 18c13d67cf2..175fc03e398 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/CompactionLogDagPrinter.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/CompactionLogDagPrinter.java @@ -18,9 +18,8 @@ package org.apache.hadoop.ozone.debug; -import org.apache.hadoop.hdds.cli.SubcommandWithParent; +import org.apache.hadoop.hdds.cli.DebugSubcommand; import org.apache.hadoop.ozone.client.OzoneClient; -import org.apache.hadoop.ozone.client.OzoneClientException; import org.apache.hadoop.ozone.shell.Handler; import org.apache.hadoop.ozone.shell.OzoneAddress; import org.kohsuke.MetaInfServices; @@ -36,9 +35,9 @@ name = "print-log-dag", aliases = "pld", description = "Create an image of the current compaction log DAG in OM.") -@MetaInfServices(SubcommandWithParent.class) +@MetaInfServices(DebugSubcommand.class) public class CompactionLogDagPrinter extends Handler - implements SubcommandWithParent { + implements DebugSubcommand { @CommandLine.Option(names = {"-f", "--file-name-prefix"}, description = "Prefix to be use in image file name. (optional)") @@ -56,14 +55,9 @@ public class CompactionLogDagPrinter extends Handler defaultValue = "file_name") private String graphType; - @Override - public Class getParentType() { - return OzoneDebug.class; - } - @Override protected void execute(OzoneClient client, OzoneAddress address) - throws IOException, OzoneClientException { + throws IOException { String message = client.getObjectStore() .printCompactionLogDag(fileNamePrefix, graphType); System.out.println(message); diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/DBDefinitionFactory.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/DBDefinitionFactory.java index a163cda2502..ca79aa41fa4 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/DBDefinitionFactory.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/DBDefinitionFactory.java @@ -20,12 +20,16 @@ import java.nio.file.Path; import java.util.Arrays; +import java.util.Collections; import java.util.HashMap; +import java.util.Map; +import java.util.concurrent.atomic.AtomicReference; import org.apache.hadoop.hdds.conf.ConfigurationSource; import org.apache.hadoop.hdds.scm.metadata.SCMDBDefinition; import org.apache.hadoop.hdds.utils.db.DBDefinition; import org.apache.hadoop.ozone.OzoneConsts; +import org.apache.hadoop.ozone.container.metadata.WitnessedContainerDBDefinition; import org.apache.hadoop.ozone.container.metadata.DatanodeSchemaOneDBDefinition; import org.apache.hadoop.ozone.container.metadata.DatanodeSchemaThreeDBDefinition; import org.apache.hadoop.ozone.container.metadata.DatanodeSchemaTwoDBDefinition; @@ -48,17 +52,15 @@ public final class DBDefinitionFactory { private DBDefinitionFactory() { } - private static HashMap dbMap; - - private static String dnDBSchemaVersion; + private static final AtomicReference DATANODE_DB_SCHEMA_VERSION = new AtomicReference<>(); + private static final Map DB_MAP; static { - dbMap = new HashMap<>(); - Arrays.asList( - new SCMDBDefinition(), - new OMDBDefinition(), - new ReconSCMDBDefinition() - ).forEach(dbDefinition -> dbMap.put(dbDefinition.getName(), dbDefinition)); + final Map map = new HashMap<>(); + Arrays.asList(SCMDBDefinition.get(), OMDBDefinition.get(), ReconSCMDBDefinition.get(), + WitnessedContainerDBDefinition.get()) + .forEach(dbDefinition -> map.put(dbDefinition.getName(), dbDefinition)); + DB_MAP = Collections.unmodifiableMap(map); } public static DBDefinition getDefinition(String dbName) { @@ -66,10 +68,8 @@ public static DBDefinition getDefinition(String dbName) { if (!dbName.equals(OM_DB_NAME) && dbName.startsWith(OM_DB_NAME)) { dbName = OM_DB_NAME; } - if (dbMap.containsKey(dbName)) { - return dbMap.get(dbName); - } - return getReconDBDefinition(dbName); + final DBDefinition definition = DB_MAP.get(dbName); + return definition != null ? definition : getReconDBDefinition(dbName); } public static DBDefinition getDefinition(Path dbPath, @@ -83,7 +83,7 @@ public static DBDefinition getDefinition(Path dbPath, } String dbName = fileName.toString(); if (dbName.endsWith(OzoneConsts.CONTAINER_DB_SUFFIX)) { - switch (dnDBSchemaVersion) { + switch (DATANODE_DB_SCHEMA_VERSION.get()) { case "V1": return new DatanodeSchemaOneDBDefinition( dbPath.toAbsolutePath().toString(), config); @@ -102,12 +102,12 @@ private static DBDefinition getReconDBDefinition(String dbName) { if (dbName.startsWith(RECON_CONTAINER_KEY_DB)) { return new ReconDBDefinition(dbName); } else if (dbName.startsWith(RECON_OM_SNAPSHOT_DB)) { - return new OMDBDefinition(); + return OMDBDefinition.get(); } return null; } public static void setDnDBSchemaVersion(String dnDBSchemaVersion) { - DBDefinitionFactory.dnDBSchemaVersion = dnDBSchemaVersion; + DATANODE_DB_SCHEMA_VERSION.set(dnDBSchemaVersion); } } diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/FindMissingPadding.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/FindMissingPadding.java index 0c7ba187ce1..21b572fbc4c 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/FindMissingPadding.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/FindMissingPadding.java @@ -17,7 +17,7 @@ */ package org.apache.hadoop.ozone.debug; -import org.apache.hadoop.hdds.cli.SubcommandWithParent; +import org.apache.hadoop.hdds.cli.DebugSubcommand; import org.apache.hadoop.hdds.client.ECReplicationConfig; import org.apache.hadoop.hdds.client.StandaloneReplicationConfig; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; @@ -74,11 +74,8 @@ @CommandLine.Command(name = "find-missing-padding", aliases = { "fmp" }, description = "List all keys with any missing padding, optionally limited to a volume/bucket/key URI.") -@MetaInfServices(SubcommandWithParent.class) -public class FindMissingPadding extends Handler implements SubcommandWithParent { - - @CommandLine.ParentCommand - private OzoneDebug parent; +@MetaInfServices(DebugSubcommand.class) +public class FindMissingPadding extends Handler implements DebugSubcommand { @CommandLine.Mixin private ScmOption scmOption; @@ -100,11 +97,6 @@ protected OzoneAddress getAddress() throws OzoneClientException { return new OzoneAddress(uri); } - @Override - public Class getParentType() { - return OzoneDebug.class; - } - @Override protected void execute(OzoneClient ozoneClient, OzoneAddress address) throws IOException { findCandidateKeys(ozoneClient, address); diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/LeaseRecoverer.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/LeaseRecoverer.java index a8891404e05..9c3865ae241 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/LeaseRecoverer.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/LeaseRecoverer.java @@ -24,7 +24,7 @@ import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.LeaseRecoverable; -import org.apache.hadoop.hdds.cli.SubcommandWithParent; +import org.apache.hadoop.hdds.cli.DebugSubcommand; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.kohsuke.MetaInfServices; @@ -40,11 +40,8 @@ customSynopsis = "ozone debug recover --path=", description = "recover the lease of a specified file. Make sure to specify " + "file system scheme if ofs:// is not the default.") -@MetaInfServices(SubcommandWithParent.class) -public class LeaseRecoverer implements Callable, SubcommandWithParent { - - @CommandLine.ParentCommand - private OzoneDebug parent; +@MetaInfServices(DebugSubcommand.class) +public class LeaseRecoverer implements Callable, DebugSubcommand { @Spec private CommandSpec spec; @@ -62,11 +59,6 @@ public void setPath(String dbPath) { this.path = dbPath; } - @Override - public Class getParentType() { - return OzoneDebug.class; - } - @Override public Void call() throws Exception { OzoneConfiguration configuration = new OzoneConfiguration(); diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/OzoneDebug.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/OzoneDebug.java index 3d6cf570934..164d07f96b4 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/OzoneDebug.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/OzoneDebug.java @@ -18,11 +18,11 @@ package org.apache.hadoop.ozone.debug; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.hdds.cli.DebugSubcommand; +import org.apache.hadoop.hdds.cli.ExtensibleParentCommand; import org.apache.hadoop.hdds.cli.GenericCli; import org.apache.hadoop.hdds.cli.HddsVersionProvider; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; import picocli.CommandLine; /** @@ -32,35 +32,14 @@ description = "Developer tools for Ozone Debug operations", versionProvider = HddsVersionProvider.class, mixinStandardHelpOptions = true) -public class OzoneDebug extends GenericCli { +public class OzoneDebug extends GenericCli implements ExtensibleParentCommand { - private OzoneConfiguration ozoneConf; - - public OzoneDebug() { - super(OzoneDebug.class); - } - - @VisibleForTesting - public OzoneDebug(OzoneConfiguration configuration) { - super(OzoneDebug.class); - this.ozoneConf = configuration; - } - - public OzoneConfiguration getOzoneConf() { - if (ozoneConf == null) { - ozoneConf = createOzoneConfiguration(); - } - return ozoneConf; + public static void main(String[] argv) { + new OzoneDebug().run(argv); } - /** - * Main for the Ozone Debug shell Command handling. - * - * @param argv - System Args Strings[] - * @throws Exception - */ - public static void main(String[] argv) throws Exception { - - new OzoneDebug().run(argv); + @Override + public Class subcommandType() { + return DebugSubcommand.class; } } diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/PrefixParser.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/PrefixParser.java index fabc7f456ae..cdda3e5e0f9 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/PrefixParser.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/PrefixParser.java @@ -26,7 +26,7 @@ import java.util.concurrent.Callable; import java.nio.file.Path; -import org.apache.hadoop.hdds.cli.SubcommandWithParent; +import org.apache.hadoop.hdds.cli.DebugSubcommand; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.utils.MetadataKeyFilters; @@ -53,8 +53,8 @@ @CommandLine.Command( name = "prefix", description = "Parse prefix contents") -@MetaInfServices(SubcommandWithParent.class) -public class PrefixParser implements Callable, SubcommandWithParent { +@MetaInfServices(DebugSubcommand.class) +public class PrefixParser implements Callable, DebugSubcommand { /** * Types to represent the level or path component type. @@ -101,11 +101,6 @@ public void setDbPath(String dbPath) { this.dbPath = dbPath; } - @Override - public Class getParentType() { - return OzoneDebug.class; - } - @Override public Void call() throws Exception { parse(volume, bucket, dbPath, filePath); diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ReadReplicas.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ReadReplicas.java index 48ed7c74ae7..c88245a571b 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ReadReplicas.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ReadReplicas.java @@ -17,14 +17,13 @@ package org.apache.hadoop.ozone.debug; -import org.apache.hadoop.hdds.cli.SubcommandWithParent; +import org.apache.hadoop.hdds.cli.DebugSubcommand; import org.apache.hadoop.hdds.client.BlockID; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.server.JsonUtils; import org.apache.hadoop.hdds.utils.IOUtils; import org.apache.hadoop.ozone.client.OzoneClient; -import org.apache.hadoop.ozone.client.OzoneClientException; import org.apache.hadoop.ozone.client.OzoneKeyDetails; import org.apache.hadoop.ozone.client.io.OzoneInputStream; import org.apache.hadoop.ozone.client.protocol.ClientProtocol; @@ -61,8 +60,8 @@ @CommandLine.Command(name = "read-replicas", description = "Reads every replica for all the blocks associated with a " + "given key.") -@MetaInfServices(SubcommandWithParent.class) -public class ReadReplicas extends KeyHandler implements SubcommandWithParent { +@MetaInfServices(DebugSubcommand.class) +public class ReadReplicas extends KeyHandler implements DebugSubcommand { @CommandLine.Option(names = {"--outputDir", "-o", "--output-dir"}, description = "Destination where the directory will be created" + @@ -83,14 +82,9 @@ public class ReadReplicas extends KeyHandler implements SubcommandWithParent { private static final String JSON_PROPERTY_REPLICA_UUID = "uuid"; private static final String JSON_PROPERTY_REPLICA_EXCEPTION = "exception"; - @Override - public Class getParentType() { - return OzoneDebug.class; - } - @Override protected void execute(OzoneClient client, OzoneAddress address) - throws IOException, OzoneClientException { + throws IOException { address.ensureKeyAddress(); diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/VersionDebug.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/VersionDebug.java new file mode 100644 index 00000000000..54b2e4c9986 --- /dev/null +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/VersionDebug.java @@ -0,0 +1,72 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.ozone.debug; + +import com.google.common.collect.ImmutableSortedMap; +import org.apache.hadoop.hdds.ComponentVersion; +import org.apache.hadoop.hdds.DatanodeVersion; +import org.apache.hadoop.hdds.cli.DebugSubcommand; +import org.apache.hadoop.hdds.server.JsonUtils; +import org.apache.hadoop.ozone.ClientVersion; +import org.apache.hadoop.ozone.OzoneManagerVersion; +import org.apache.hadoop.ozone.util.OzoneVersionInfo; +import org.kohsuke.MetaInfServices; +import picocli.CommandLine; + +import java.io.IOException; +import java.util.Map; +import java.util.concurrent.Callable; + +/** Show internal component version information as JSON. */ +@CommandLine.Command( + name = "version", + description = "Show internal version of Ozone components, as defined in the artifacts where this command is " + + "executed. It does not communicate with any Ozone services. Run the same command on different nodes to " + + "get a cross-component view of versions. The goal of this command is to help quickly get a glance of the " + + "latest features supported by Ozone on the current node." +) +@MetaInfServices(DebugSubcommand.class) +public class VersionDebug implements Callable, DebugSubcommand { + + @Override + public Void call() throws IOException { + System.out.println(JsonUtils.toJsonStringWithDefaultPrettyPrinter(ImmutableSortedMap.of( + "ozone", ImmutableSortedMap.of( + "revision", OzoneVersionInfo.OZONE_VERSION_INFO.getRevision(), + "url", OzoneVersionInfo.OZONE_VERSION_INFO.getUrl(), + "version", OzoneVersionInfo.OZONE_VERSION_INFO.getVersion() + ), + "components", ImmutableSortedMap.of( + "client", asMap(ClientVersion.CURRENT), + "datanode", asMap(DatanodeVersion.CURRENT), + "om", asMap(OzoneManagerVersion.CURRENT) + ) + ))); + return null; + } + + private static & ComponentVersion> Map asMap(T version) { + return ImmutableSortedMap.of( + "componentVersion", ImmutableSortedMap.of( + "name", version.name(), + "protoValue", version.toProtoValue() + ) + ); + } + +} diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ChunkDataNodeDetails.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/chunk/ChunkDataNodeDetails.java similarity index 96% rename from hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ChunkDataNodeDetails.java rename to hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/chunk/ChunkDataNodeDetails.java index 6019e5806dd..cf6b7d7a11d 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ChunkDataNodeDetails.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/chunk/ChunkDataNodeDetails.java @@ -16,7 +16,7 @@ * limitations under the License. */ -package org.apache.hadoop.ozone.debug; +package org.apache.hadoop.ozone.debug.chunk; /** * Class that gives datanode details on which the chunk is present. */ diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ChunkDetails.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/chunk/ChunkDetails.java similarity index 96% rename from hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ChunkDetails.java rename to hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/chunk/ChunkDetails.java index 278c2bf055c..4e2b5314a06 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ChunkDetails.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/chunk/ChunkDetails.java @@ -16,7 +16,7 @@ * limitations under the License. */ -package org.apache.hadoop.ozone.debug; +package org.apache.hadoop.ozone.debug.chunk; /** * Class that gives chunkDetails. diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ChunkKeyHandler.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/chunk/ChunkKeyHandler.java similarity index 96% rename from hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ChunkKeyHandler.java rename to hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/chunk/ChunkKeyHandler.java index b5b2364007f..6944c380493 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ChunkKeyHandler.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/chunk/ChunkKeyHandler.java @@ -16,7 +16,7 @@ * limitations under the License. */ -package org.apache.hadoop.ozone.debug; +package org.apache.hadoop.ozone.debug.chunk; import java.io.File; import java.io.IOException; @@ -27,7 +27,7 @@ import com.fasterxml.jackson.databind.node.ArrayNode; import com.fasterxml.jackson.databind.node.ObjectNode; -import org.apache.hadoop.hdds.cli.SubcommandWithParent; +import org.apache.hadoop.hdds.cli.DebugSubcommand; import org.apache.hadoop.hdds.client.ECReplicationConfig; import org.apache.hadoop.hdds.client.StandaloneReplicationConfig; import org.apache.hadoop.hdds.protocol.DatanodeDetails; @@ -41,8 +41,8 @@ import org.apache.hadoop.hdds.server.JsonUtils; import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.client.OzoneClient; -import org.apache.hadoop.ozone.client.OzoneClientException; import org.apache.hadoop.ozone.container.common.impl.ContainerLayoutVersion; +import org.apache.hadoop.ozone.debug.OzoneDebug; import org.apache.hadoop.ozone.om.helpers.OmKeyArgs; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo; @@ -61,9 +61,9 @@ @Command(name = "chunkinfo", description = "returns chunk location" + " information about an existing key") -@MetaInfServices(SubcommandWithParent.class) +@MetaInfServices(DebugSubcommand.class) public class ChunkKeyHandler extends KeyHandler implements - SubcommandWithParent { + DebugSubcommand { @CommandLine.ParentCommand private OzoneDebug parent; @@ -74,7 +74,7 @@ private String getChunkLocationPath(String containerLocation) { @Override protected void execute(OzoneClient client, OzoneAddress address) - throws IOException, OzoneClientException { + throws IOException { try (ContainerOperationClient containerOperationClient = new ContainerOperationClient(parent.getOzoneConf()); XceiverClientManager xceiverClientManager = containerOperationClient.getXceiverClientManager()) { OzoneManagerProtocol ozoneManagerClient = client.getObjectStore().getClientProxy().getOzoneManagerClient(); @@ -201,10 +201,4 @@ private boolean isECParityBlock(Pipeline pipeline, DatanodeDetails dn) { return pipeline.getReplicaIndex(dn) > ((ECReplicationConfig) pipeline.getReplicationConfig()).getData(); } - - @Override - public Class getParentType() { - return OzoneDebug.class; - } - } diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ChunkType.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/chunk/ChunkType.java similarity index 95% rename from hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ChunkType.java rename to hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/chunk/ChunkType.java index 610eab54d6f..3af7f810402 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ChunkType.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/chunk/ChunkType.java @@ -14,7 +14,7 @@ * License for the specific language governing permissions and limitations under * the License. */ -package org.apache.hadoop.ozone.debug; +package org.apache.hadoop.ozone.debug.chunk; /** * The type of chunks of an Erasure Coded key. diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ContainerChunkInfo.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/chunk/ContainerChunkInfo.java similarity index 98% rename from hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ContainerChunkInfo.java rename to hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/chunk/ContainerChunkInfo.java index 130c1bca0fc..1c5fc090b0e 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ContainerChunkInfo.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/chunk/ContainerChunkInfo.java @@ -16,7 +16,7 @@ * limitations under the License. */ -package org.apache.hadoop.ozone.debug; +package org.apache.hadoop.ozone.debug.chunk; import com.fasterxml.jackson.annotation.JsonInclude; diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/chunk/package-info.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/chunk/package-info.java new file mode 100644 index 00000000000..d81f2276a65 --- /dev/null +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/chunk/package-info.java @@ -0,0 +1,22 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * Command to debug chunk information. + */ +package org.apache.hadoop.ozone.debug.chunk; diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/container/ContainerCommands.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/container/ContainerCommands.java index a0aba2a1b15..fae1189d689 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/container/ContainerCommands.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/container/ContainerCommands.java @@ -21,7 +21,7 @@ import com.google.common.base.Preconditions; import org.apache.hadoop.hdds.cli.GenericCli; import org.apache.hadoop.hdds.cli.HddsVersionProvider; -import org.apache.hadoop.hdds.cli.SubcommandWithParent; +import org.apache.hadoop.hdds.cli.DebugSubcommand; import org.apache.hadoop.hdds.conf.ConfigurationSource; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; @@ -82,8 +82,8 @@ ExportSubcommand.class, InspectSubcommand.class }) -@MetaInfServices(SubcommandWithParent.class) -public class ContainerCommands implements Callable, SubcommandWithParent { +@MetaInfServices(DebugSubcommand.class) +public class ContainerCommands implements Callable, DebugSubcommand { private static final Logger LOG = LoggerFactory.getLogger(ContainerCommands.class); @@ -104,11 +104,6 @@ public Void call() throws Exception { return null; } - @Override - public Class getParentType() { - return OzoneDebug.class; - } - OzoneConfiguration getOzoneConf() { return parent.getOzoneConf(); } @@ -116,7 +111,7 @@ OzoneConfiguration getOzoneConf() { public void loadContainersFromVolumes() throws IOException { OzoneConfiguration conf = parent.getOzoneConf(); - ContainerSet containerSet = new ContainerSet(1000); + ContainerSet containerSet = new ContainerSet(null, 1000, true); ContainerMetrics metrics = ContainerMetrics.create(conf); diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/DBScanner.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ldb/DBScanner.java similarity index 74% rename from hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/DBScanner.java rename to hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ldb/DBScanner.java index 5e1207519ab..6fbbd1a3083 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/DBScanner.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ldb/DBScanner.java @@ -16,7 +16,7 @@ * limitations under the License. */ -package org.apache.hadoop.ozone.debug; +package org.apache.hadoop.ozone.debug.ldb; import com.fasterxml.jackson.annotation.JsonAutoDetect; import com.fasterxml.jackson.annotation.JsonInclude; @@ -27,7 +27,6 @@ import com.fasterxml.jackson.datatype.jsr310.JavaTimeModule; import com.google.common.annotations.VisibleForTesting; import com.google.common.util.concurrent.ThreadFactoryBuilder; -import org.apache.hadoop.hdds.cli.SubcommandWithParent; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.scm.container.ContainerID; import org.apache.hadoop.hdds.scm.pipeline.PipelineID; @@ -44,8 +43,9 @@ import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.container.common.statemachine.DatanodeConfiguration; import org.apache.hadoop.ozone.container.metadata.DatanodeSchemaThreeDBDefinition; +import org.apache.hadoop.ozone.debug.DBDefinitionFactory; +import org.apache.hadoop.ozone.debug.RocksDBUtils; import org.apache.hadoop.ozone.utils.Filter; -import org.kohsuke.MetaInfServices; import org.rocksdb.ColumnFamilyDescriptor; import org.rocksdb.ColumnFamilyHandle; import org.rocksdb.RocksDBException; @@ -54,6 +54,7 @@ import picocli.CommandLine; import java.io.BufferedWriter; +import java.io.File; import java.io.IOException; import java.io.PrintWriter; import java.lang.reflect.Field; @@ -74,6 +75,8 @@ import java.util.concurrent.ThreadPoolExecutor; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicLong; +import java.util.regex.Matcher; +import java.util.regex.Pattern; import static java.nio.charset.StandardCharsets.UTF_8; @@ -84,8 +87,7 @@ name = "scan", description = "Parse specified metadataTable" ) -@MetaInfServices(SubcommandWithParent.class) -public class DBScanner implements Callable, SubcommandWithParent { +public class DBScanner implements Callable { public static final Logger LOG = LoggerFactory.getLogger(DBScanner.class); private static final String SCHEMA_V3 = "V3"; @@ -104,7 +106,7 @@ public class DBScanner implements Callable, SubcommandWithParent { @CommandLine.Option(names = {"--with-keys"}, description = "Print a JSON object of key->value pairs (default)" + " instead of a JSON array of only values.", - defaultValue = "true") + defaultValue = "true", fallbackValue = "true") private boolean withKey; @CommandLine.Option(names = {"--length", "--limit", "-l"}, @@ -132,9 +134,11 @@ public class DBScanner implements Callable, SubcommandWithParent { @CommandLine.Option(names = {"--filter"}, description = "Comma-separated list of \"::\" where " + " is any valid field of the record, " + - " is (EQUALS,MAX or MIN) and " + - " is the value of the field. " + - "eg.) \"dataSize:equals:1000\" for showing records having the value 1000 for dataSize") + " is [EQUALS,LESSER, GREATER or REGEX]. (EQUALS compares the exact string, " + + "REGEX compares with a valid regular expression passed, and LESSER/GREATER works with numeric values), " + + " is the value of the field. \n" + + "eg.) \"dataSize:equals:1000\" for showing records having the value 1000 for dataSize, \n" + + " \"keyName:regex:^key.*$\" for showing records having keyName that matches the given regex.") private String filter; @CommandLine.Option(names = {"--dnSchema", "--dn-schema", "-d"}, @@ -168,6 +172,14 @@ public class DBScanner implements Callable, SubcommandWithParent { defaultValue = "10") private int threadCount; + @CommandLine.Option(names = {"--max-records-per-file"}, + description = "The number of records to print per file.", + defaultValue = "0") + private long recordsPerFile; + + private int fileSuffix = 0; + private long globalCount = 0; + private static final String KEY_SEPARATOR_SCHEMA_V3 = new OzoneConfiguration().getObject(DatanodeConfiguration.class) .getContainerSchemaV3KeySeparator(); @@ -176,7 +188,8 @@ public class DBScanner implements Callable, SubcommandWithParent { @Override public Void call() throws Exception { - + fileSuffix = 0; + globalCount = 0; List cfDescList = RocksDBUtils.getColumnFamilyDescriptors(parent.getDbPath()); final List cfHandleList = new ArrayList<>(); @@ -236,11 +249,29 @@ private boolean displayTable(ManagedRocksIterator iterator, return displayTable(iterator, dbColumnFamilyDef, out(), schemaV3); } + // If there are no parent directories, create them + File file = new File(fileName); + File parentFile = file.getParentFile(); + if (!parentFile.exists()) { + boolean flg = parentFile.mkdirs(); + if (!flg) { + throw new IOException("An exception occurred while creating " + + "the directory. Directorys: " + parentFile.getAbsolutePath()); + } + } + // Write to file output - try (PrintWriter out = new PrintWriter(new BufferedWriter( - new PrintWriter(fileName, UTF_8.name())))) { - return displayTable(iterator, dbColumnFamilyDef, out, schemaV3); + while (iterator.get().isValid() && withinLimit(globalCount)) { + String fileNameTarget = recordsPerFile > 0 ? fileName + "." + fileSuffix++ : + fileName; + try (PrintWriter out = new PrintWriter(new BufferedWriter( + new PrintWriter(fileNameTarget, UTF_8.name())))) { + if (!displayTable(iterator, dbColumnFamilyDef, out, schemaV3)) { + return false; + } + } } + return true; } private boolean displayTable(ManagedRocksIterator iterator, @@ -261,7 +292,7 @@ private boolean displayTable(ManagedRocksIterator iterator, logWriter.start(); processRecords(iterator, dbColumnFamilyDef, logWriter, threadPool, schemaV3); - } catch (InterruptedException e) { + } catch (InterruptedException | IOException e) { exception = true; Thread.currentThread().interrupt(); } finally { @@ -277,7 +308,7 @@ private boolean displayTable(ManagedRocksIterator iterator, private void processRecords(ManagedRocksIterator iterator, DBColumnFamilyDefinition dbColumnFamilyDef, LogWriter logWriter, ExecutorService threadPool, - boolean schemaV3) throws InterruptedException { + boolean schemaV3) throws InterruptedException, IOException { if (startKey != null) { iterator.get().seek(getValueObject(dbColumnFamilyDef, startKey)); } @@ -289,33 +320,64 @@ private void processRecords(ManagedRocksIterator iterator, long count = 0; List> futures = new ArrayList<>(); boolean reachedEnd = false; - while (withinLimit(count) && iterator.get().isValid() && !exception && !reachedEnd) { + + Map fieldsFilterSplitMap = new HashMap<>(); + if (filter != null) { + for (String field : filter.split(",")) { + String[] fieldValue = field.split(":"); + if (fieldValue.length != 3) { + err().println("Error: Invalid format for filter \"" + field + + "\". Usage: ::. Ignoring filter passed"); + } else { + Filter filterValue = new Filter(fieldValue[1], fieldValue[2]); + if (filterValue.getOperator() == null) { + err().println("Error: Invalid operator for filter \"" + filterValue + + "\". can be one of [EQUALS,LESSER,GREATER]. Ignoring filter passed"); + } else { + String[] subfields = fieldValue[0].split("\\."); + getFilterSplit(Arrays.asList(subfields), fieldsFilterSplitMap, filterValue); + } + } + } + } + + while (withinLimit(globalCount) && iterator.get().isValid() && !exception && !reachedEnd) { // if invalid endKey is given, it is ignored if (null != endKey && Arrays.equals(iterator.get().key(), getValueObject(dbColumnFamilyDef, endKey))) { reachedEnd = true; } - batch.add(new ByteArrayKeyValue( - iterator.get().key(), iterator.get().value())); - iterator.get().next(); - count++; - if (batch.size() >= batchSize) { - while (logWriter.getInflightLogCount() > threadCount * 10L - && !exception) { - // Prevents too many unfinished Tasks from - // consuming too much memory. - Thread.sleep(100); + + Object o = dbColumnFamilyDef.getValueCodec().fromPersistedFormat(iterator.get().value()); + if (filter == null || + checkFilteredObject(o, dbColumnFamilyDef.getValueType(), fieldsFilterSplitMap)) { + // the record passes the filter + batch.add(new ByteArrayKeyValue( + iterator.get().key(), iterator.get().value())); + globalCount++; + count++; + if (batch.size() >= batchSize) { + while (logWriter.getInflightLogCount() > threadCount * 10L + && !exception) { + // Prevents too many unfinished Tasks from + // consuming too much memory. + Thread.sleep(100); + } + Future future = threadPool.submit( + new Task(dbColumnFamilyDef, batch, logWriter, sequenceId, + withKey, schemaV3, fieldsFilter)); + futures.add(future); + batch = new ArrayList<>(batchSize); + sequenceId++; } - Future future = threadPool.submit( - new Task(dbColumnFamilyDef, batch, logWriter, sequenceId, - withKey, schemaV3, fieldsFilter, filter)); - futures.add(future); - batch = new ArrayList<>(batchSize); - sequenceId++; + } + iterator.get().next(); + if ((recordsPerFile > 0) && (count >= recordsPerFile)) { + break; } } if (!batch.isEmpty()) { Future future = threadPool.submit(new Task(dbColumnFamilyDef, - batch, logWriter, sequenceId, withKey, schemaV3, fieldsFilter, filter)); + batch, logWriter, sequenceId, withKey, schemaV3, fieldsFilter)); futures.add(future); } @@ -328,6 +390,158 @@ private void processRecords(ManagedRocksIterator iterator, } } + private void getFilterSplit(List fields, Map fieldMap, Filter leafValue) throws IOException { + int len = fields.size(); + if (len == 1) { + Filter currentValue = fieldMap.get(fields.get(0)); + if (currentValue != null) { + err().println("Cannot pass multiple values for the same field and " + + "cannot have filter for both parent and child"); + throw new IOException("Invalid filter passed"); + } + fieldMap.put(fields.get(0), leafValue); + } else { + Filter fieldMapGet = fieldMap.computeIfAbsent(fields.get(0), k -> new Filter()); + if (fieldMapGet.getValue() != null) { + err().println("Cannot pass multiple values for the same field and " + + "cannot have filter for both parent and child"); + throw new IOException("Invalid filter passed"); + } + Map nextLevel = fieldMapGet.getNextLevel(); + if (nextLevel == null) { + fieldMapGet.setNextLevel(new HashMap<>()); + } + getFilterSplit(fields.subList(1, len), fieldMapGet.getNextLevel(), leafValue); + } + } + + private boolean checkFilteredObject(Object obj, Class clazz, Map fieldsSplitMap) { + for (Map.Entry field : fieldsSplitMap.entrySet()) { + try { + Field valueClassField = getRequiredFieldFromAllFields(clazz, field.getKey()); + Object valueObject = valueClassField.get(obj); + Filter fieldValue = field.getValue(); + + if (valueObject == null) { + // there is no such field in the record. This filter will be ignored for the current record. + continue; + } + if (fieldValue == null) { + err().println("Malformed filter. Check input"); + throw new IOException("Invalid filter passed"); + } else if (fieldValue.getNextLevel() == null) { + // reached the end of fields hierarchy, check if they match the filter + try { + switch (fieldValue.getOperator()) { + case EQUALS: + if (!String.valueOf(valueObject).equals(fieldValue.getValue())) { + return false; + } + break; + case GREATER: + if (Double.parseDouble(String.valueOf(valueObject)) + < Double.parseDouble(String.valueOf(fieldValue.getValue()))) { + return false; + } + break; + case LESSER: + if (Double.parseDouble(String.valueOf(valueObject)) + > Double.parseDouble(String.valueOf(fieldValue.getValue()))) { + return false; + } + break; + case REGEX: + Pattern p = Pattern.compile(String.valueOf(fieldValue.getValue())); + Matcher m = p.matcher(String.valueOf(valueObject)); + if (!m.find()) { + return false; + } + break; + default: + err().println("Only EQUALS/LESSER/GREATER/REGEX operator is supported currently."); + throw new IOException("Invalid filter passed"); + } + } catch (NumberFormatException ex) { + err().println("LESSER or GREATER operation can be performed only on numeric values."); + throw new IOException("Invalid filter passed"); + } + } else { + Map subfields = fieldValue.getNextLevel(); + if (Collection.class.isAssignableFrom(valueObject.getClass())) { + if (!checkFilteredObjectCollection((Collection) valueObject, subfields)) { + return false; + } + } else if (Map.class.isAssignableFrom(valueObject.getClass())) { + Map valueObjectMap = (Map) valueObject; + boolean flag = false; + for (Map.Entry ob : valueObjectMap.entrySet()) { + boolean subflag; + if (Collection.class.isAssignableFrom(ob.getValue().getClass())) { + subflag = checkFilteredObjectCollection((Collection)ob.getValue(), subfields); + } else { + subflag = checkFilteredObject(ob.getValue(), ob.getValue().getClass(), subfields); + } + if (subflag) { + // atleast one item in the map/list of the record has matched the filter, + // so record passes the filter. + flag = true; + break; + } + } + if (!flag) { + // none of the items in the map/list passed the filter => record doesn't pass the filter + return false; + } + } else { + if (!checkFilteredObject(valueObject, valueClassField.getType(), subfields)) { + return false; + } + } + } + } catch (NoSuchFieldException ex) { + err().println("ERROR: no such field: " + field); + exception = true; + return false; + } catch (IllegalAccessException e) { + err().println("ERROR: Cannot get field \"" + field + "\" from record."); + exception = true; + return false; + } catch (Exception ex) { + err().println("ERROR: field: " + field + ", ex: " + ex); + exception = true; + return false; + } + } + return true; + } + + private boolean checkFilteredObjectCollection(Collection valueObject, Map fields) + throws NoSuchFieldException, IllegalAccessException, IOException { + for (Object ob : valueObject) { + if (checkFilteredObject(ob, ob.getClass(), fields)) { + return true; + } + } + return false; + } + + static Field getRequiredFieldFromAllFields(Class clazz, String fieldName) throws NoSuchFieldException { + List classFieldList = ValueSchema.getAllFields(clazz); + Field classField = null; + for (Field f : classFieldList) { + if (f.getName().equals(fieldName)) { + classField = f; + break; + } + } + if (classField == null) { + err().println("Error: Invalid field \"" + fieldName + "\" passed for filter"); + throw new NoSuchFieldException(); + } + classField.setAccessible(true); + return classField; + } + private boolean withinLimit(long i) { return limit == -1L || i < limit; } @@ -429,11 +643,6 @@ private String removeTrailingSlashIfNeeded(String dbPath) { return dbPath; } - @Override - public Class getParentType() { - return RDBParser.class; - } - /** * Utility for centralized JSON serialization using Jackson. */ @@ -482,12 +691,11 @@ private static class Task implements Callable { private final boolean withKey; private final boolean schemaV3; private String valueFields; - private String valueFilter; @SuppressWarnings("checkstyle:parameternumber") Task(DBColumnFamilyDefinition dbColumnFamilyDefinition, ArrayList batch, LogWriter logWriter, - long sequenceId, boolean withKey, boolean schemaV3, String valueFields, String filter) { + long sequenceId, boolean withKey, boolean schemaV3, String valueFields) { this.dbColumnFamilyDefinition = dbColumnFamilyDefinition; this.batch = batch; this.logWriter = logWriter; @@ -495,7 +703,6 @@ private static class Task implements Callable { this.withKey = withKey; this.schemaV3 = schemaV3; this.valueFields = valueFields; - this.valueFilter = filter; } Map getFieldSplit(List fields, Map fieldMap) { @@ -516,31 +723,6 @@ Map getFieldSplit(List fields, Map field return fieldMap; } - void getFilterSplit(List fields, Map fieldMap, Filter leafValue) throws IOException { - int len = fields.size(); - if (len == 1) { - Filter currentValue = fieldMap.get(fields.get(0)); - if (currentValue != null) { - err().println("Cannot pass multiple values for the same field and " + - "cannot have filter for both parent and child"); - throw new IOException("Invalid filter passed"); - } - fieldMap.put(fields.get(0), leafValue); - } else { - Filter fieldMapGet = fieldMap.computeIfAbsent(fields.get(0), k -> new Filter()); - if (fieldMapGet.getValue() != null) { - err().println("Cannot pass multiple values for the same field and " + - "cannot have filter for both parent and child"); - throw new IOException("Invalid filter passed"); - } - Map nextLevel = fieldMapGet.getNextLevel(); - if (nextLevel == null) { - fieldMapGet.setNextLevel(new HashMap<>()); - } - getFilterSplit(fields.subList(1, len), fieldMapGet.getNextLevel(), leafValue); - } - } - @Override public Void call() { try { @@ -554,26 +736,6 @@ public Void call() { } } - Map fieldsFilterSplitMap = new HashMap<>(); - if (valueFilter != null) { - for (String field : valueFilter.split(",")) { - String[] fieldValue = field.split(":"); - if (fieldValue.length != 3) { - err().println("Error: Invalid format for filter \"" + field - + "\". Usage: ::. Ignoring filter passed"); - } else { - Filter filter = new Filter(fieldValue[1], fieldValue[2]); - if (filter.getOperator() == null) { - err().println("Error: Invalid format for filter \"" + filter - + "\". can be one of [EQUALS,MIN,MAX]. Ignoring filter passed"); - } else { - String[] subfields = fieldValue[0].split("\\."); - getFilterSplit(Arrays.asList(subfields), fieldsFilterSplitMap, filter); - } - } - } - } - for (ByteArrayKeyValue byteArrayKeyValue : batch) { StringBuilder sb = new StringBuilder(); if (!(sequenceId == FIRST_SEQUENCE_ID && results.isEmpty())) { @@ -609,11 +771,6 @@ public Void call() { Object o = dbColumnFamilyDefinition.getValueCodec() .fromPersistedFormat(byteArrayKeyValue.getValue()); - if (valueFilter != null && - !checkFilteredObject(o, dbColumnFamilyDefinition.getValueType(), fieldsFilterSplitMap)) { - // the record doesn't pass the filter - continue; - } if (valueFields != null) { Map filteredValue = new HashMap<>(); filteredValue.putAll(getFieldsFilteredObject(o, dbColumnFamilyDefinition.getValueType(), fieldsSplitMap)); @@ -632,91 +789,6 @@ public Void call() { return null; } - boolean checkFilteredObject(Object obj, Class clazz, Map fieldsSplitMap) - throws IOException { - for (Map.Entry field : fieldsSplitMap.entrySet()) { - try { - Field valueClassField = getRequiredFieldFromAllFields(clazz, field.getKey()); - Object valueObject = valueClassField.get(obj); - Filter fieldValue = field.getValue(); - - if (valueObject == null) { - // there is no such field in the record. This filter will be ignored for the current record. - continue; - } - if (fieldValue == null) { - err().println("Malformed filter. Check input"); - throw new IOException("Invalid filter passed"); - } else if (fieldValue.getNextLevel() == null) { - // reached the end of fields hierarchy, check if they match the filter - // Currently, only equals operation is supported - if (Filter.FilterOperator.EQUALS.equals(fieldValue.getOperator()) && - !String.valueOf(valueObject).equals(fieldValue.getValue())) { - return false; - } else if (!Filter.FilterOperator.EQUALS.equals(fieldValue.getOperator())) { - err().println("Only EQUALS operator is supported currently."); - throw new IOException("Invalid filter passed"); - } - } else { - Map subfields = fieldValue.getNextLevel(); - if (Collection.class.isAssignableFrom(valueObject.getClass())) { - if (!checkFilteredObjectCollection((Collection) valueObject, subfields)) { - return false; - } - } else if (Map.class.isAssignableFrom(valueObject.getClass())) { - Map valueObjectMap = (Map) valueObject; - boolean flag = false; - for (Map.Entry ob : valueObjectMap.entrySet()) { - boolean subflag; - if (Collection.class.isAssignableFrom(ob.getValue().getClass())) { - subflag = checkFilteredObjectCollection((Collection)ob.getValue(), subfields); - } else { - subflag = checkFilteredObject(ob.getValue(), ob.getValue().getClass(), subfields); - } - if (subflag) { - // atleast one item in the map/list of the record has matched the filter, - // so record passes the filter. - flag = true; - break; - } - } - if (!flag) { - // none of the items in the map/list passed the filter => record doesn't pass the filter - return false; - } - } else { - if (!checkFilteredObject(valueObject, valueClassField.getType(), subfields)) { - return false; - } - } - } - } catch (NoSuchFieldException ex) { - err().println("ERROR: no such field: " + field); - exception = true; - return false; - } catch (IllegalAccessException e) { - err().println("ERROR: Cannot get field from object: " + field); - exception = true; - return false; - } catch (Exception ex) { - err().println("ERROR: field: " + field + ", ex: " + ex); - exception = true; - return false; - } - } - return true; - } - - boolean checkFilteredObjectCollection(Collection valueObject, Map fields) - throws NoSuchFieldException, IllegalAccessException, IOException { - for (Object ob : valueObject) { - if (checkFilteredObject(ob, ob.getClass(), fields)) { - return true; - } - } - return false; - } - Map getFieldsFilteredObject(Object obj, Class clazz, Map fieldsSplitMap) { Map valueMap = new HashMap<>(); for (Map.Entry field : fieldsSplitMap.entrySet()) { @@ -768,22 +840,6 @@ List getFieldsFilteredObjectCollection(Collection valueObject, Map classFieldList = ValueSchema.getAllFields(clazz); - Field classField = null; - for (Field f : classFieldList) { - if (f.getName().equals(fieldName)) { - classField = f; - break; - } - } - if (classField == null) { - throw new NoSuchFieldException(); - } - classField.setAccessible(true); - return classField; - } } private static class ByteArrayKeyValue { diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/DropTable.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ldb/DropTable.java similarity index 90% rename from hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/DropTable.java rename to hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ldb/DropTable.java index 745712850b9..5326adc0e2b 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/DropTable.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ldb/DropTable.java @@ -16,10 +16,10 @@ * limitations under the License. */ -package org.apache.hadoop.ozone.debug; +package org.apache.hadoop.ozone.debug.ldb; -import org.apache.hadoop.hdds.cli.SubcommandWithParent; import org.apache.hadoop.hdds.utils.db.managed.ManagedRocksDB; +import org.apache.hadoop.ozone.debug.RocksDBUtils; import org.rocksdb.ColumnFamilyDescriptor; import org.rocksdb.ColumnFamilyHandle; import picocli.CommandLine; @@ -37,7 +37,7 @@ name = "drop_column_family", description = "drop column family in db." ) -public class DropTable implements Callable, SubcommandWithParent { +public class DropTable implements Callable { @CommandLine.Option(names = {"--column-family", "--column_family"}, description = "Table name") @@ -73,9 +73,4 @@ public Void call() throws Exception { } return null; } - - @Override - public Class getParentType() { - return RDBParser.class; - } } diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ListTables.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ldb/ListTables.java similarity index 82% rename from hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ListTables.java rename to hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ldb/ListTables.java index 494f42e5877..d115a44da8b 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ListTables.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ldb/ListTables.java @@ -16,16 +16,13 @@ * limitations under the License. */ -package org.apache.hadoop.ozone.debug; +package org.apache.hadoop.ozone.debug.ldb; import java.nio.charset.StandardCharsets; import java.util.List; import java.util.concurrent.Callable; -import org.apache.hadoop.hdds.cli.SubcommandWithParent; - import org.apache.hadoop.hdds.utils.db.RocksDatabase; -import org.kohsuke.MetaInfServices; import picocli.CommandLine; /** @@ -36,8 +33,7 @@ aliases = "ls", description = "list all column families in db." ) -@MetaInfServices(SubcommandWithParent.class) -public class ListTables implements Callable, SubcommandWithParent { +public class ListTables implements Callable { @CommandLine.ParentCommand private RDBParser parent; @@ -51,9 +47,4 @@ public Void call() throws Exception { } return null; } - - @Override - public Class getParentType() { - return RDBParser.class; - } } diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/RDBParser.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ldb/RDBParser.java similarity index 81% rename from hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/RDBParser.java rename to hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ldb/RDBParser.java index f133386ab13..4e945c7c418 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/RDBParser.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ldb/RDBParser.java @@ -16,12 +16,12 @@ * limitations under the License. */ -package org.apache.hadoop.ozone.debug; +package org.apache.hadoop.ozone.debug.ldb; import java.util.concurrent.Callable; import org.apache.hadoop.hdds.cli.GenericCli; -import org.apache.hadoop.hdds.cli.SubcommandWithParent; +import org.apache.hadoop.hdds.cli.DebugSubcommand; import org.kohsuke.MetaInfServices; import picocli.CommandLine; @@ -33,9 +33,15 @@ */ @CommandLine.Command( name = "ldb", + subcommands = { + DBScanner.class, + DropTable.class, + ListTables.class, + ValueSchema.class, + }, description = "Parse rocksdb file content") -@MetaInfServices(SubcommandWithParent.class) -public class RDBParser implements Callable, SubcommandWithParent { +@MetaInfServices(DebugSubcommand.class) +public class RDBParser implements Callable, DebugSubcommand { @Spec private CommandSpec spec; @@ -53,11 +59,6 @@ public void setDbPath(String dbPath) { this.dbPath = dbPath; } - @Override - public Class getParentType() { - return OzoneDebug.class; - } - @Override public Void call() throws Exception { GenericCli.missingSubcommand(spec); diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ValueSchema.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ldb/ValueSchema.java similarity index 94% rename from hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ValueSchema.java rename to hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ldb/ValueSchema.java index b06be2aff53..4b8eb3b3208 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ValueSchema.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ldb/ValueSchema.java @@ -16,15 +16,14 @@ * limitations under the License. */ -package org.apache.hadoop.ozone.debug; +package org.apache.hadoop.ozone.debug.ldb; -import org.apache.hadoop.hdds.cli.SubcommandWithParent; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.server.JsonUtils; import org.apache.hadoop.hdds.utils.db.DBColumnFamilyDefinition; import org.apache.hadoop.hdds.utils.db.DBDefinition; import org.apache.hadoop.ozone.OzoneConsts; -import org.kohsuke.MetaInfServices; +import org.apache.hadoop.ozone.debug.DBDefinitionFactory; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import picocli.CommandLine; @@ -52,8 +51,7 @@ name = "value-schema", description = "Schema of value in metadataTable" ) -@MetaInfServices(SubcommandWithParent.class) -public class ValueSchema implements Callable, SubcommandWithParent { +public class ValueSchema implements Callable { @CommandLine.ParentCommand private RDBParser parent; @@ -172,11 +170,6 @@ private static PrintWriter out() { return spec.commandLine().getOut(); } - @Override - public Class getParentType() { - return RDBParser.class; - } - private static String removeTrailingSlashIfNeeded(String dbPath) { if (dbPath.endsWith(OzoneConsts.OZONE_URI_DELIMITER)) { dbPath = dbPath.substring(0, dbPath.length() - 1); diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ldb/package-info.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ldb/package-info.java new file mode 100644 index 00000000000..d69d92e6f1b --- /dev/null +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ldb/package-info.java @@ -0,0 +1,22 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * RDB debug related commands. + */ +package org.apache.hadoop.ozone.debug.ldb; diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/segmentparser/BaseLogParser.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/segmentparser/BaseLogParser.java similarity index 97% rename from hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/segmentparser/BaseLogParser.java rename to hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/segmentparser/BaseLogParser.java index a3ea9f6a1f6..4d2fb4b23c2 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/segmentparser/BaseLogParser.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/segmentparser/BaseLogParser.java @@ -15,7 +15,7 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.apache.hadoop.ozone.segmentparser; +package org.apache.hadoop.ozone.debug.segmentparser; import com.google.common.annotations.VisibleForTesting; import org.apache.ratis.proto.RaftProtos; diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/segmentparser/DatanodeRatisLogParser.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/segmentparser/DatanodeRatisLogParser.java similarity index 97% rename from hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/segmentparser/DatanodeRatisLogParser.java rename to hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/segmentparser/DatanodeRatisLogParser.java index 9f35e8b3c31..3f0f70c281b 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/segmentparser/DatanodeRatisLogParser.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/segmentparser/DatanodeRatisLogParser.java @@ -15,7 +15,7 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.apache.hadoop.ozone.segmentparser; +package org.apache.hadoop.ozone.debug.segmentparser; import org.apache.hadoop.hdds.cli.HddsVersionProvider; import org.apache.hadoop.ozone.container.common.transport.server diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/segmentparser/GenericRatisLogParser.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/segmentparser/GenericRatisLogParser.java similarity index 96% rename from hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/segmentparser/GenericRatisLogParser.java rename to hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/segmentparser/GenericRatisLogParser.java index d989527c341..8bd0d0ff5a8 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/segmentparser/GenericRatisLogParser.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/segmentparser/GenericRatisLogParser.java @@ -15,7 +15,7 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.apache.hadoop.ozone.segmentparser; +package org.apache.hadoop.ozone.debug.segmentparser; import org.apache.hadoop.hdds.cli.HddsVersionProvider; import picocli.CommandLine; diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/segmentparser/OMRatisLogParser.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/segmentparser/OMRatisLogParser.java similarity index 96% rename from hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/segmentparser/OMRatisLogParser.java rename to hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/segmentparser/OMRatisLogParser.java index 1ce8b63dab1..eb6214e6471 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/segmentparser/OMRatisLogParser.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/segmentparser/OMRatisLogParser.java @@ -16,7 +16,7 @@ * limitations under the License. */ -package org.apache.hadoop.ozone.segmentparser; +package org.apache.hadoop.ozone.debug.segmentparser; import org.apache.hadoop.hdds.cli.HddsVersionProvider; import org.apache.hadoop.ozone.om.helpers.OMRatisHelper; diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/segmentparser/RatisLogParser.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/segmentparser/RatisLogParser.java similarity index 73% rename from hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/segmentparser/RatisLogParser.java rename to hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/segmentparser/RatisLogParser.java index d41ee2dec16..1fad2b607c1 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/segmentparser/RatisLogParser.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/segmentparser/RatisLogParser.java @@ -15,12 +15,10 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.apache.hadoop.ozone.segmentparser; +package org.apache.hadoop.ozone.debug.segmentparser; -import org.apache.hadoop.hdds.cli.GenericCli; import org.apache.hadoop.hdds.cli.HddsVersionProvider; -import org.apache.hadoop.hdds.cli.SubcommandWithParent; -import org.apache.hadoop.ozone.debug.OzoneDebug; +import org.apache.hadoop.hdds.cli.DebugSubcommand; import org.kohsuke.MetaInfServices; import picocli.CommandLine; @@ -39,15 +37,6 @@ }, versionProvider = HddsVersionProvider.class, mixinStandardHelpOptions = true) -@MetaInfServices(SubcommandWithParent.class) -public class RatisLogParser extends GenericCli implements SubcommandWithParent { - - public static void main(String[] args) { - new RatisLogParser().run(args); - } - - @Override - public Class getParentType() { - return OzoneDebug.class; - } +@MetaInfServices(DebugSubcommand.class) +public class RatisLogParser implements DebugSubcommand { } diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/segmentparser/SCMRatisLogParser.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/segmentparser/SCMRatisLogParser.java similarity index 96% rename from hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/segmentparser/SCMRatisLogParser.java rename to hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/segmentparser/SCMRatisLogParser.java index db0cd8bd14e..dfdbdd2d847 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/segmentparser/SCMRatisLogParser.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/segmentparser/SCMRatisLogParser.java @@ -16,7 +16,7 @@ * limitations under the License. */ -package org.apache.hadoop.ozone.segmentparser; +package org.apache.hadoop.ozone.debug.segmentparser; import org.apache.hadoop.hdds.cli.HddsVersionProvider; import org.apache.hadoop.hdds.scm.ha.SCMRatisRequest; diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/segmentparser/package-info.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/segmentparser/package-info.java similarity index 94% rename from hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/segmentparser/package-info.java rename to hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/segmentparser/package-info.java index 727bb8aa4e0..d5c1027bfe4 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/segmentparser/package-info.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/segmentparser/package-info.java @@ -15,8 +15,8 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.apache.hadoop.ozone.segmentparser; /** * Command line utility for dump ratis log files. */ +package org.apache.hadoop.ozone.debug.segmentparser; diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/ClosedContainerReplicator.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/ClosedContainerReplicator.java index 0c525457aac..f229eb43bc6 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/ClosedContainerReplicator.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/ClosedContainerReplicator.java @@ -35,6 +35,8 @@ import org.apache.hadoop.ozone.container.common.statemachine.DatanodeConfiguration; import org.apache.hadoop.ozone.container.common.volume.MutableVolumeSet; import org.apache.hadoop.ozone.container.common.volume.StorageVolume; +import org.apache.hadoop.ozone.container.metadata.WitnessedContainerMetadataStore; +import org.apache.hadoop.ozone.container.metadata.WitnessedContainerMetadataStoreImpl; import org.apache.hadoop.ozone.container.ozoneimpl.ContainerController; import org.apache.hadoop.ozone.container.replication.ContainerImporter; import org.apache.hadoop.ozone.container.replication.ContainerReplicator; @@ -83,11 +85,22 @@ public class ClosedContainerReplicator extends BaseFreonGenerator implements private ContainerReplicator replicator; private Timer timer; + private WitnessedContainerMetadataStore witnessedContainerMetadataStore; private List replicationTasks; @Override public Void call() throws Exception { + try { + return replicate(); + } finally { + if (witnessedContainerMetadataStore != null) { + witnessedContainerMetadataStore.close(); + } + } + } + + public Void replicate() throws Exception { OzoneConfiguration conf = createOzoneConfiguration(); @@ -102,7 +115,7 @@ public Void call() throws Exception { new ContainerOperationClient(conf); final List containerInfos = - containerOperationClient.listContainer(0L, 1_000_000); + containerOperationClient.listContainer(0L, 1_000_000).getContainerInfoList(); //logic same as the download+import on the destination datanode initializeReplicationSupervisor(conf, containerInfos.size() * 2); @@ -174,8 +187,10 @@ private void initializeReplicationSupervisor( if (fakeDatanodeUuid.isEmpty()) { fakeDatanodeUuid = UUID.randomUUID().toString(); } - - ContainerSet containerSet = new ContainerSet(1000); + WitnessedContainerMetadataStore referenceCountedDS = + WitnessedContainerMetadataStoreImpl.get(conf); + this.witnessedContainerMetadataStore = referenceCountedDS; + ContainerSet containerSet = new ContainerSet(referenceCountedDS.getContainerIdsTable(), 1000); ContainerMetrics metrics = ContainerMetrics.create(conf); diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/DatanodeSimulator.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/DatanodeSimulator.java index 2b178ac0aec..a86b4789fef 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/DatanodeSimulator.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/DatanodeSimulator.java @@ -463,13 +463,12 @@ private DatanodeDetails randomDatanodeDetails(ConfigurationSource config) details.setCurrentVersion(DatanodeVersion.CURRENT_VERSION); details.setHostName(HddsUtils.getHostName(config)); details.setIpAddress(randomIp()); - details.setPort(DatanodeDetails.Port.Name.STANDALONE, 0); - details.setPort(DatanodeDetails.Port.Name.RATIS, 0); - details.setPort(DatanodeDetails.Port.Name.REST, 0); + details.setStandalonePort(0); + details.setRatisPort(0); + details.setRestPort(0); details.setVersion(HDDS_VERSION_INFO.getVersion()); details.setSetupTime(Time.now()); details.setRevision(HDDS_VERSION_INFO.getRevision()); - details.setBuildDate(HDDS_VERSION_INFO.getDate()); details.setCurrentVersion(DatanodeVersion.CURRENT_VERSION); return details; } diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/Freon.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/Freon.java index d0c9a33b330..ccae53f345b 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/Freon.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/Freon.java @@ -75,7 +75,8 @@ DatanodeSimulator.class, OmMetadataGenerator.class, DNRPCLoadGenerator.class, - HsyncGenerator.class + HsyncGenerator.class, + OzoneClientCreator.class, }, versionProvider = HddsVersionProvider.class, mixinStandardHelpOptions = true) @@ -83,10 +84,6 @@ public class Freon extends GenericCli { public static final Logger LOG = LoggerFactory.getLogger(Freon.class); - public Freon() { - super(Freon.class); - } - @Option(names = "--server", description = "Enable internal http server to provide metric " + "and profile endpoint") diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/HsyncGenerator.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/HsyncGenerator.java index 8de2ee032d0..687030ab325 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/HsyncGenerator.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/HsyncGenerator.java @@ -16,17 +16,13 @@ */ package org.apache.hadoop.ozone.freon; -import java.util.concurrent.Callable; -import java.util.concurrent.atomic.AtomicInteger; - +import com.codahale.metrics.Timer; import com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hdds.cli.HddsVersionProvider; import org.apache.hadoop.hdds.conf.OzoneConfiguration; - -import com.codahale.metrics.Timer; import org.apache.hadoop.ozone.util.PayloadUtils; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -34,17 +30,25 @@ import picocli.CommandLine.Command; import picocli.CommandLine.Option; +import java.io.IOException; +import java.net.URI; +import java.util.concurrent.ArrayBlockingQueue; +import java.util.concurrent.BlockingQueue; +import java.util.concurrent.Callable; +import java.util.concurrent.atomic.AtomicInteger; + /** * Data generator tool test hsync/write synchronization performance. + * This tool simulates the way HBase writes transaction logs (WAL) to a file in Ozone: + * - Transactions are written to the file's OutputStream by a single thread, each transaction is numbered by an + * increasing counter. Every transaction can be serialized to the OutputStream via multiple write calls. + * - Multiple threads checks and sync (hsync) the OutputStream to make it persistent. * * Example usage: * - * To generate 1000 hsync calls with 10 threads on a single file: - * ozone freon hsync-generator -t 10 --bytes-per-write=1024 -n 1000 - * - * To generate 1000 hsync calls with 10 threads on 3 files simultaneously: + * To simulate hlog that generates 1M hsync calls with 5 threads: * - * ozone freon hsync-generator -t 10 --bytes-per-write=1024 --number-of-files=3 -n 1000 + * ozone freon hsync-generator -t 5 --writes-per-transaction=32 --bytes-per-write=8 -n 1000000 * */ @Command(name = "hg", @@ -53,32 +57,38 @@ versionProvider = HddsVersionProvider.class, mixinStandardHelpOptions = true, showDefaultValues = true) -public class HsyncGenerator extends HadoopNestedDirGenerator implements Callable { +public class HsyncGenerator extends BaseFreonGenerator implements Callable { private static final Logger LOG = LoggerFactory.getLogger(HsyncGenerator.class); @CommandLine.ParentCommand private Freon freon; + @Option(names = {"--path"}, + description = "Hadoop FS file system path. Use full path.", + defaultValue = "o3fs://bucket1.vol1") + private String rootPath; + @Option(names = {"--bytes-per-write"}, description = "Size of each write", - defaultValue = "1024") + defaultValue = "8") private int writeSize; - @Option(names = {"--number-of-files"}, - description = "Number of files to run test.", - defaultValue = "1") - private int numberOfFiles; + @Option(names = {"--writes-per-transaction"}, + description = "Size of each write", + defaultValue = "32") + private int writesPerTransaction; private Timer timer; private OzoneConfiguration configuration; - private FSDataOutputStream[] outputStreams; - private Path[] files; - private AtomicInteger[] callsPerFile; + private FSDataOutputStream outputStream; + private byte[] data; + private final BlockingQueue writtenTransactions = new ArrayBlockingQueue<>(10_000); + private final AtomicInteger lastSyncedTransaction = new AtomicInteger(); public HsyncGenerator() { } - private byte[] data; + @VisibleForTesting HsyncGenerator(OzoneConfiguration ozoneConfiguration) { @@ -87,55 +97,75 @@ public HsyncGenerator() { @Override public Void call() throws Exception { - super.init(); + init(); if (configuration == null) { configuration = freon.createOzoneConfiguration(); } + URI uri = URI.create(rootPath); - outputStreams = new FSDataOutputStream[numberOfFiles]; - files = new Path[numberOfFiles]; - callsPerFile = new AtomicInteger[numberOfFiles]; - FileSystem fileSystem = getFileSystem(); - for (int i = 0; i < numberOfFiles; i++) { - Path file = new Path(getRootPath() + "/" + generateObjectName(i)); - fileSystem.mkdirs(file.getParent()); - outputStreams[i] = fileSystem.create(file); - files[i] = file; - callsPerFile[i] = new AtomicInteger(); - - LOG.info("Created file for testing: {}", file); - } + FileSystem fileSystem = FileSystem.get(uri, configuration); + Path file = new Path(rootPath + "/" + generateObjectName(0)); + fileSystem.mkdirs(file.getParent()); + outputStream = fileSystem.create(file); + + LOG.info("Created file for testing: {}", file); timer = getMetrics().timer("hsync-generator"); data = PayloadUtils.generatePayload(writeSize); + startTransactionWriter(); + try { runTests(this::sendHsync); } finally { - for (FSDataOutputStream outputStream : outputStreams) { - outputStream.close(); - } + outputStream.close(); + fileSystem.close(); } - StringBuilder distributionReport = new StringBuilder(); - for (int i = 0; i < numberOfFiles; i++) { - distributionReport.append("\t").append(files[i]).append(": ").append(callsPerFile[i]).append("\n"); - } + return null; + } - LOG.info("Hsync generator finished, calls distribution: \n {}", distributionReport); + private void startTransactionWriter() { + Thread transactionWriter = new Thread(this::generateTransactions); + transactionWriter.setDaemon(true); + transactionWriter.start(); + } - return null; + private void generateTransactions() { + int transaction = 0; + while (true) { + for (int i = 0; i < writesPerTransaction; i++) { + try { + if (writeSize > 1) { + outputStream.write(data); + } else { + outputStream.write(i); + } + } catch (IOException e) { + throw new RuntimeException(e); + } + } + + try { + writtenTransactions.put(transaction++); + } catch (InterruptedException e) { + throw new RuntimeException(e); + } + } } private void sendHsync(long counter) throws Exception { timer.time(() -> { - int i = ((int) counter) % numberOfFiles; - FSDataOutputStream outputStream = outputStreams[i]; - outputStream.write(data); - outputStream.hsync(); - callsPerFile[i].incrementAndGet(); - return null; + while (true) { + int transaction = writtenTransactions.take(); + int lastSynced = lastSyncedTransaction.get(); + if (transaction > lastSynced) { + outputStream.hsync(); + lastSyncedTransaction.compareAndSet(lastSynced, transaction); + return null; + } + } }); } } diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/OmKeyGenerator.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/OmKeyGenerator.java index 9c98817185e..d5fbdc75f19 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/OmKeyGenerator.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/OmKeyGenerator.java @@ -109,8 +109,7 @@ private void createKey(long counter) throws Exception { .setKeyName(generateObjectName(counter)) .setReplicationConfig(replicationConfig) .setLocationInfoList(new ArrayList<>()) - .setAcls(OzoneAclUtil.getAclList(ugi.getUserName(), ugi.getGroupNames(), - ALL, ALL)) + .setAcls(OzoneAclUtil.getAclList(ugi, ALL, ALL)) .setOwnerName(ownerName) .build(); diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/OmMetadataGenerator.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/OmMetadataGenerator.java index 24060b0bac8..4c277f07422 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/OmMetadataGenerator.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/OmMetadataGenerator.java @@ -272,8 +272,7 @@ private OmKeyArgs.Builder createKeyArgsBuilder() { .setVolumeName(volumeName) .setReplicationConfig(replicationConfig) .setLocationInfoList(new ArrayList<>()) - .setAcls(OzoneAclUtil.getAclList(ugi.getUserName(), ugi.getGroupNames(), - ALL, ALL)); + .setAcls(OzoneAclUtil.getAclList(ugi, ALL, ALL)); } private String getPath(long counter) { diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/OzoneClientCreator.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/OzoneClientCreator.java new file mode 100644 index 00000000000..2fc4cb48eac --- /dev/null +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/OzoneClientCreator.java @@ -0,0 +1,65 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ +package org.apache.hadoop.ozone.freon; + +import com.codahale.metrics.Timer; +import org.apache.hadoop.hdds.cli.HddsVersionProvider; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import picocli.CommandLine; + +import java.util.concurrent.Callable; + +/** + * Creates and closes Ozone clients. + */ +@CommandLine.Command(name = "occ", + aliases = "ozone-client-creator", + description = "Create and close Ozone clients without doing anything useful", + versionProvider = HddsVersionProvider.class, + mixinStandardHelpOptions = true, + showDefaultValues = true) +public class OzoneClientCreator extends BaseFreonGenerator implements Callable { + + @CommandLine.Option(names = "--om-service-id", + description = "OM Service ID" + ) + private String omServiceID; + + private Timer timer; + private OzoneConfiguration conf; + + @Override + public Void call() { + init(); + conf = createOzoneConfiguration(); + timer = getMetrics().timer("client-create"); + runTests(this::createClient); + return null; + } + + private void createClient(long step) { + timer.time(this::createClientSafely); + } + + private void createClientSafely() { + try { + createOzoneClient(omServiceID, conf).close(); + } catch (Exception e) { + throw new RuntimeException(e); + } + } +} diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/SCMThroughputBenchmark.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/SCMThroughputBenchmark.java index d62ad1d79c1..6dc0efae0d2 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/SCMThroughputBenchmark.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/SCMThroughputBenchmark.java @@ -867,12 +867,9 @@ private static DatanodeDetails createRandomDatanodeDetails() { RANDOM.nextInt(256) + "." + RANDOM.nextInt(256) + "." + RANDOM .nextInt(256) + "." + RANDOM.nextInt(256); - DatanodeDetails.Port containerPort = DatanodeDetails.newPort( - DatanodeDetails.Port.Name.STANDALONE, 0); - DatanodeDetails.Port ratisPort = DatanodeDetails.newPort( - DatanodeDetails.Port.Name.RATIS, 0); - DatanodeDetails.Port restPort = DatanodeDetails.newPort( - DatanodeDetails.Port.Name.REST, 0); + DatanodeDetails.Port containerPort = DatanodeDetails.newStandalonePort(0); + DatanodeDetails.Port ratisPort = DatanodeDetails.newRatisPort(0); + DatanodeDetails.Port restPort = DatanodeDetails.newRestPort(0); DatanodeDetails.Builder builder = DatanodeDetails.newBuilder(); builder.setUuid(uuid).setHostName("localhost") .setIpAddress(ipAddress) diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/containergenerator/GeneratorScm.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/containergenerator/GeneratorScm.java index 66656d315d8..a15caab7d6b 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/containergenerator/GeneratorScm.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/containergenerator/GeneratorScm.java @@ -59,7 +59,7 @@ public Void call() throws Exception { ConfigurationSource config = createOzoneConfiguration(); - scmDb = DBStoreBuilder.createDBStore(config, new SCMDBDefinition()); + scmDb = DBStoreBuilder.createDBStore(config, SCMDBDefinition.get()); containerStore = CONTAINERS.getTable(scmDb); diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/OzoneRepair.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/OzoneRepair.java index 16644522808..b1ed206f975 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/OzoneRepair.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/OzoneRepair.java @@ -18,10 +18,10 @@ package org.apache.hadoop.ozone.repair; -import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.hdds.cli.ExtensibleParentCommand; import org.apache.hadoop.hdds.cli.GenericCli; import org.apache.hadoop.hdds.cli.HddsVersionProvider; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.cli.RepairSubcommand; import picocli.CommandLine; import java.nio.charset.StandardCharsets; @@ -34,39 +34,13 @@ description = "Operational tool to repair Ozone", versionProvider = HddsVersionProvider.class, mixinStandardHelpOptions = true) -public class OzoneRepair extends GenericCli { +public class OzoneRepair extends GenericCli implements ExtensibleParentCommand { public static final String WARNING_SYS_USER_MESSAGE = "ATTENTION: Running as user %s. Make sure this is the same user used to run the Ozone process." + " Are you sure you want to continue (y/N)? "; - - private OzoneConfiguration ozoneConf; - - public OzoneRepair() { - super(OzoneRepair.class); - } - - @VisibleForTesting - public OzoneRepair(OzoneConfiguration configuration) { - super(OzoneRepair.class); - this.ozoneConf = configuration; - } - - public OzoneConfiguration getOzoneConf() { - if (ozoneConf == null) { - ozoneConf = createOzoneConfiguration(); - } - return ozoneConf; - } - - /** - * Main for the Ozone Repair shell Command handling. - * - * @param argv - System Args Strings[] - * @throws Exception - */ - public static void main(String[] argv) throws Exception { + public static void main(String[] argv) { new OzoneRepair().run(argv); } @@ -91,4 +65,8 @@ public String getConsoleReadLineWithFormat(String currentUser) { return (new Scanner(System.in, StandardCharsets.UTF_8.name())).nextLine().trim(); } + @Override + public Class subcommandType() { + return RepairSubcommand.class; + } } diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/RecoverSCMCertificate.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/RecoverSCMCertificate.java index aca41844a18..e6462aa3f85 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/RecoverSCMCertificate.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/RecoverSCMCertificate.java @@ -17,7 +17,7 @@ */ package org.apache.hadoop.ozone.repair; -import org.apache.hadoop.hdds.cli.SubcommandWithParent; +import org.apache.hadoop.hdds.cli.RepairSubcommand; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.security.SecurityConfig; import org.apache.hadoop.hdds.security.x509.certificate.authority.CAType; @@ -67,8 +67,8 @@ @CommandLine.Command( name = "cert-recover", description = "Recover Deleted SCM Certificate from RocksDB") -@MetaInfServices(SubcommandWithParent.class) -public class RecoverSCMCertificate implements Callable, SubcommandWithParent { +@MetaInfServices(RepairSubcommand.class) +public class RecoverSCMCertificate implements Callable, RepairSubcommand { @CommandLine.Option(names = {"--db"}, required = true, @@ -81,11 +81,6 @@ public class RecoverSCMCertificate implements Callable, SubcommandWithPare @CommandLine.Spec private CommandLine.Model.CommandSpec spec; - @Override - public Class getParentType() { - return OzoneRepair.class; - } - private PrintWriter err() { return spec.commandLine().getErr(); } diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/RDBRepair.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/ldb/RDBRepair.java similarity index 82% rename from hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/RDBRepair.java rename to hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/ldb/RDBRepair.java index 0f36934ec14..01ad705b201 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/RDBRepair.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/ldb/RDBRepair.java @@ -16,10 +16,10 @@ * limitations under the License. */ -package org.apache.hadoop.ozone.repair; +package org.apache.hadoop.ozone.repair.ldb; import org.apache.hadoop.hdds.cli.GenericCli; -import org.apache.hadoop.hdds.cli.SubcommandWithParent; +import org.apache.hadoop.hdds.cli.RepairSubcommand; import org.kohsuke.MetaInfServices; import picocli.CommandLine; @@ -29,9 +29,13 @@ * Ozone Repair CLI for RocksDB. */ @CommandLine.Command(name = "ldb", + subcommands = { + SnapshotRepair.class, + TransactionInfoRepair.class, + }, description = "Operational tool to repair RocksDB table.") -@MetaInfServices(SubcommandWithParent.class) -public class RDBRepair implements Callable, SubcommandWithParent { +@MetaInfServices(RepairSubcommand.class) +public class RDBRepair implements Callable, RepairSubcommand { @CommandLine.Spec private CommandLine.Model.CommandSpec spec; @@ -50,9 +54,4 @@ public Void call() { GenericCli.missingSubcommand(spec); return null; } - - @Override - public Class getParentType() { - return OzoneRepair.class; - } } diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/om/SnapshotRepair.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/ldb/SnapshotRepair.java similarity index 94% rename from hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/om/SnapshotRepair.java rename to hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/ldb/SnapshotRepair.java index d07fc13be8a..45c10f5668b 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/om/SnapshotRepair.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/ldb/SnapshotRepair.java @@ -16,18 +16,15 @@ * limitations under the License. */ -package org.apache.hadoop.ozone.repair.om; +package org.apache.hadoop.ozone.repair.ldb; -import org.apache.hadoop.hdds.cli.SubcommandWithParent; import org.apache.hadoop.hdds.utils.IOUtils; import org.apache.hadoop.hdds.utils.db.StringCodec; import org.apache.hadoop.hdds.utils.db.managed.ManagedRocksDB; import org.apache.hadoop.hdds.utils.db.managed.ManagedRocksIterator; import org.apache.hadoop.ozone.debug.RocksDBUtils; import org.apache.hadoop.ozone.om.helpers.SnapshotInfo; -import org.apache.hadoop.ozone.repair.RDBRepair; import org.apache.hadoop.ozone.shell.bucket.BucketUri; -import org.kohsuke.MetaInfServices; import org.rocksdb.ColumnFamilyDescriptor; import org.rocksdb.ColumnFamilyHandle; import org.rocksdb.RocksDBException; @@ -55,8 +52,7 @@ name = "snapshot", description = "CLI to update global and path previous snapshot for a snapshot in case snapshot chain is corrupted." ) -@MetaInfServices(SubcommandWithParent.class) -public class SnapshotRepair implements Callable, SubcommandWithParent { +public class SnapshotRepair implements Callable { protected static final Logger LOG = LoggerFactory.getLogger(SnapshotRepair.class); @@ -178,9 +174,4 @@ private Set getSnapshotIdSet(ManagedRocksDB db, ColumnFamilyHandle snapsho } return snapshotIdSet; } - - @Override - public Class getParentType() { - return RDBRepair.class; - } } diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/TransactionInfoRepair.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/ldb/TransactionInfoRepair.java similarity index 93% rename from hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/TransactionInfoRepair.java rename to hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/ldb/TransactionInfoRepair.java index f2a63317378..277a2788247 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/TransactionInfoRepair.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/ldb/TransactionInfoRepair.java @@ -19,16 +19,14 @@ * permissions and * limitations under the License. */ -package org.apache.hadoop.ozone.repair; +package org.apache.hadoop.ozone.repair.ldb; import org.apache.hadoop.hdds.cli.HddsVersionProvider; -import org.apache.hadoop.hdds.cli.SubcommandWithParent; import org.apache.hadoop.hdds.utils.IOUtils; import org.apache.hadoop.hdds.utils.TransactionInfo; import org.apache.hadoop.hdds.utils.db.StringCodec; import org.apache.hadoop.hdds.utils.db.managed.ManagedRocksDB; import org.apache.hadoop.ozone.debug.RocksDBUtils; -import org.kohsuke.MetaInfServices; import org.rocksdb.ColumnFamilyDescriptor; import org.rocksdb.ColumnFamilyHandle; import org.rocksdb.RocksDBException; @@ -42,7 +40,6 @@ import static org.apache.hadoop.ozone.OzoneConsts.TRANSACTION_INFO_KEY; import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.TRANSACTION_INFO_TABLE; - /** * Tool to update the highest term-index in transactionInfoTable. */ @@ -52,9 +49,7 @@ mixinStandardHelpOptions = true, versionProvider = HddsVersionProvider.class ) -@MetaInfServices(SubcommandWithParent.class) -public class TransactionInfoRepair - implements Callable, SubcommandWithParent { +public class TransactionInfoRepair implements Callable { @CommandLine.Spec private static CommandLine.Model.CommandSpec spec; @@ -127,9 +122,4 @@ protected RDBRepair getParent() { return parent; } - @Override - public Class getParentType() { - return RDBRepair.class; - } - } diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/ldb/package-info.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/ldb/package-info.java new file mode 100644 index 00000000000..388d4b7dcea --- /dev/null +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/ldb/package-info.java @@ -0,0 +1,22 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * RDB related repair tools. + */ +package org.apache.hadoop.ozone.repair.ldb; diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/om/FSORepairCLI.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/om/FSORepairCLI.java new file mode 100644 index 00000000000..5a217e9f2de --- /dev/null +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/om/FSORepairCLI.java @@ -0,0 +1,78 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.repair.om; + +import picocli.CommandLine; + +import java.util.concurrent.Callable; + +/** + * Parser for scm.db file. + */ +@CommandLine.Command( + name = "fso-tree", + description = "Identify and repair a disconnected FSO tree by marking unreferenced entries for deletion. " + + "OM should be stopped while this tool is run." +) +public class FSORepairCLI implements Callable { + + @CommandLine.Option(names = {"--db"}, + required = true, + description = "Path to OM RocksDB") + private String dbPath; + + @CommandLine.Option(names = {"-r", "--repair"}, + defaultValue = "false", + description = "Run in repair mode to move unreferenced files and directories to deleted tables.") + private boolean repair; + + @CommandLine.Option(names = {"-v", "--volume"}, + description = "Filter by volume name. Add '/' before the volume name.") + private String volume; + + @CommandLine.Option(names = {"-b", "--bucket"}, + description = "Filter by bucket name") + private String bucket; + + @CommandLine.Option(names = {"--verbose"}, + description = "Verbose output. Show all intermediate steps and deleted keys info.") + private boolean verbose; + + @Override + public Void call() throws Exception { + if (repair) { + System.out.println("FSO Repair Tool is running in repair mode"); + } else { + System.out.println("FSO Repair Tool is running in debug mode"); + } + try { + FSORepairTool + repairTool = new FSORepairTool(dbPath, repair, volume, bucket, verbose); + repairTool.run(); + } catch (Exception ex) { + throw new IllegalArgumentException("FSO repair failed: " + ex.getMessage()); + } + + if (verbose) { + System.out.println("FSO repair finished."); + } + + return null; + } +} diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/om/FSORepairTool.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/om/FSORepairTool.java new file mode 100644 index 00000000000..7e0fb23f5aa --- /dev/null +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/om/FSORepairTool.java @@ -0,0 +1,710 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.ozone.repair.om; + +import org.apache.commons.io.FileUtils; +import org.apache.hadoop.hdds.conf.ConfigurationSource; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.utils.db.Table; +import org.apache.hadoop.hdds.utils.db.DBStore; +import org.apache.hadoop.hdds.utils.db.DBStoreBuilder; +import org.apache.hadoop.hdds.utils.db.TableIterator; +import org.apache.hadoop.hdds.utils.db.BatchOperation; +import org.apache.hadoop.ozone.OmUtils; +import org.apache.hadoop.ozone.om.OmMetadataManagerImpl; +import org.apache.hadoop.ozone.om.helpers.BucketLayout; +import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; +import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo; +import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; +import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs; +import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo; +import org.apache.hadoop.ozone.om.helpers.SnapshotInfo; +import org.apache.hadoop.ozone.om.helpers.WithObjectID; +import org.apache.hadoop.ozone.om.request.file.OMFileRequest; +import org.apache.ratis.util.Preconditions; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.File; +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collection; +import java.util.Objects; +import java.util.Stack; + +import static org.apache.hadoop.ozone.OzoneConsts.OM_KEY_PREFIX; + +/** + * Base Tool to identify and repair disconnected FSO trees across all buckets. + * This tool logs information about reachable, unreachable and unreferenced files and directories in debug mode + * and moves these unreferenced files and directories to the deleted tables in repair mode. + + * If deletes are still in progress (the deleted directory table is not empty), the tool + * reports that the tree is unreachable, even though pending deletes would fix the issue. + * If not, the tool reports them as unreferenced and deletes them in repair mode. + + * Before using the tool, make sure all OMs are stopped, and that all Ratis logs have been flushed to the OM DB. + * This can be done using `ozone admin prepare` before running the tool, and `ozone admin + * cancelprepare` when done. + + * The tool will run a DFS from each bucket, and save all reachable directories as keys in a new temporary RocksDB + * instance called "reachable.db" in the same directory as om.db. + * It will then scan the entire file and directory tables for each bucket to see if each object's parent is in the + * reachable table of reachable.db. The reachable table will be dropped and recreated for each bucket. + * The tool is idempotent. reachable.db will not be deleted automatically when the tool finishes, + * in case users want to manually inspect it. It can be safely deleted once the tool finishes. + */ +public class FSORepairTool { + public static final Logger LOG = LoggerFactory.getLogger(FSORepairTool.class); + + private final String omDBPath; + private final DBStore store; + private final Table volumeTable; + private final Table bucketTable; + private final Table directoryTable; + private final Table fileTable; + private final Table deletedDirectoryTable; + private final Table deletedTable; + private final Table snapshotInfoTable; + private final String volumeFilter; + private final String bucketFilter; + private static final String REACHABLE_TABLE = "reachable"; + private DBStore reachableDB; + private final ReportStatistics reachableStats; + private final ReportStatistics unreachableStats; + private final ReportStatistics unreferencedStats; + private final boolean repair; + private final boolean verbose; + + public FSORepairTool(String dbPath, boolean repair, String volume, String bucket, boolean verbose) + throws IOException { + this(getStoreFromPath(dbPath), dbPath, repair, volume, bucket, verbose); + } + + /** + * Allows passing RocksDB instance from a MiniOzoneCluster directly to this class for testing. + */ + public FSORepairTool(DBStore dbStore, String dbPath, boolean repair, String volume, String bucket, boolean verbose) + throws IOException { + this.reachableStats = new ReportStatistics(0, 0, 0); + this.unreachableStats = new ReportStatistics(0, 0, 0); + this.unreferencedStats = new ReportStatistics(0, 0, 0); + + this.store = dbStore; + this.omDBPath = dbPath; + this.repair = repair; + this.volumeFilter = volume; + this.bucketFilter = bucket; + this.verbose = verbose; + volumeTable = store.getTable(OmMetadataManagerImpl.VOLUME_TABLE, + String.class, + OmVolumeArgs.class); + bucketTable = store.getTable(OmMetadataManagerImpl.BUCKET_TABLE, + String.class, + OmBucketInfo.class); + directoryTable = store.getTable(OmMetadataManagerImpl.DIRECTORY_TABLE, + String.class, + OmDirectoryInfo.class); + fileTable = store.getTable(OmMetadataManagerImpl.FILE_TABLE, + String.class, + OmKeyInfo.class); + deletedDirectoryTable = store.getTable(OmMetadataManagerImpl.DELETED_DIR_TABLE, + String.class, + OmKeyInfo.class); + deletedTable = store.getTable(OmMetadataManagerImpl.DELETED_TABLE, + String.class, + RepeatedOmKeyInfo.class); + snapshotInfoTable = store.getTable(OmMetadataManagerImpl.SNAPSHOT_INFO_TABLE, + String.class, + SnapshotInfo.class); + } + + protected static DBStore getStoreFromPath(String dbPath) throws IOException { + File omDBFile = new File(dbPath); + if (!omDBFile.exists() || !omDBFile.isDirectory()) { + throw new IOException(String.format("Specified OM DB instance %s does " + + "not exist or is not a RocksDB directory.", dbPath)); + } + // Load RocksDB and tables needed. + return OmMetadataManagerImpl.loadDB(new OzoneConfiguration(), new File(dbPath).getParentFile(), -1); + } + + public FSORepairTool.Report run() throws Exception { + try { + if (bucketFilter != null && volumeFilter == null) { + System.out.println("--bucket flag cannot be used without specifying --volume."); + return null; + } + + if (volumeFilter != null) { + OmVolumeArgs volumeArgs = volumeTable.getIfExist(volumeFilter); + if (volumeArgs == null) { + System.out.println("Volume '" + volumeFilter + "' does not exist."); + return null; + } + } + + // Iterate all volumes or a specific volume if specified + try (TableIterator> + volumeIterator = volumeTable.iterator()) { + try { + openReachableDB(); + } catch (IOException e) { + System.out.println("Failed to open reachable database: " + e.getMessage()); + throw e; + } + while (volumeIterator.hasNext()) { + Table.KeyValue volumeEntry = volumeIterator.next(); + String volumeKey = volumeEntry.getKey(); + + if (volumeFilter != null && !volumeFilter.equals(volumeKey)) { + continue; + } + + System.out.println("Processing volume: " + volumeKey); + + if (bucketFilter != null) { + OmBucketInfo bucketInfo = bucketTable.getIfExist(volumeKey + "/" + bucketFilter); + if (bucketInfo == null) { + //Bucket does not exist in the volume + System.out.println("Bucket '" + bucketFilter + "' does not exist in volume '" + volumeKey + "'."); + return null; + } + + if (bucketInfo.getBucketLayout() != BucketLayout.FILE_SYSTEM_OPTIMIZED) { + System.out.println("Skipping non-FSO bucket " + bucketFilter); + continue; + } + + processBucket(volumeEntry.getValue(), bucketInfo); + } else { + + // Iterate all buckets in the volume. + try (TableIterator> + bucketIterator = bucketTable.iterator()) { + bucketIterator.seek(volumeKey); + while (bucketIterator.hasNext()) { + Table.KeyValue bucketEntry = bucketIterator.next(); + String bucketKey = bucketEntry.getKey(); + OmBucketInfo bucketInfo = bucketEntry.getValue(); + + if (bucketInfo.getBucketLayout() != BucketLayout.FILE_SYSTEM_OPTIMIZED) { + System.out.println("Skipping non-FSO bucket " + bucketKey); + continue; + } + + // Stop this loop once we have seen all buckets in the current + // volume. + if (!bucketKey.startsWith(volumeKey)) { + break; + } + + processBucket(volumeEntry.getValue(), bucketInfo); + } + } + } + } + } + } catch (IOException e) { + System.out.println("An error occurred while processing" + e.getMessage()); + throw e; + } finally { + closeReachableDB(); + store.close(); + } + + return buildReportAndLog(); + } + + private boolean checkIfSnapshotExistsForBucket(String volumeName, String bucketName) throws IOException { + if (snapshotInfoTable == null) { + return false; + } + + try (TableIterator> iterator = + snapshotInfoTable.iterator()) { + while (iterator.hasNext()) { + SnapshotInfo snapshotInfo = iterator.next().getValue(); + String snapshotPath = (volumeName + "/" + bucketName).replaceFirst("^/", ""); + if (snapshotInfo.getSnapshotPath().equals(snapshotPath)) { + return true; + } + } + } + return false; + } + + private void processBucket(OmVolumeArgs volume, OmBucketInfo bucketInfo) throws IOException { + System.out.println("Processing bucket: " + volume.getVolume() + "/" + bucketInfo.getBucketName()); + if (checkIfSnapshotExistsForBucket(volume.getVolume(), bucketInfo.getBucketName())) { + if (!repair) { + System.out.println( + "Snapshot detected in bucket '" + volume.getVolume() + "/" + bucketInfo.getBucketName() + "'. "); + } else { + System.out.println( + "Skipping repair for bucket '" + volume.getVolume() + "/" + bucketInfo.getBucketName() + "' " + + "due to snapshot presence."); + return; + } + } + markReachableObjectsInBucket(volume, bucketInfo); + handleUnreachableAndUnreferencedObjects(volume, bucketInfo); + } + + private Report buildReportAndLog() { + Report report = new Report.Builder() + .setReachable(reachableStats) + .setUnreachable(unreachableStats) + .setUnreferenced(unreferencedStats) + .build(); + + System.out.println("\n" + report); + return report; + } + + private void markReachableObjectsInBucket(OmVolumeArgs volume, OmBucketInfo bucket) throws IOException { + // Only put directories in the stack. + // Directory keys should have the form /volumeID/bucketID/parentID/name. + Stack dirKeyStack = new Stack<>(); + + // Since the tool uses parent directories to check for reachability, add + // a reachable entry for the bucket as well. + addReachableEntry(volume, bucket, bucket); + // Initialize the stack with all immediate child directories of the + // bucket, and mark them all as reachable. + Collection childDirs = getChildDirectoriesAndMarkAsReachable(volume, bucket, bucket); + dirKeyStack.addAll(childDirs); + + while (!dirKeyStack.isEmpty()) { + // Get one directory and process its immediate children. + String currentDirKey = dirKeyStack.pop(); + OmDirectoryInfo currentDir = directoryTable.get(currentDirKey); + if (currentDir == null) { + System.out.println("Directory key" + currentDirKey + "to be processed was not found in the directory table."); + continue; + } + + // TODO revisit this for a more memory efficient implementation, + // possibly making better use of RocksDB iterators. + childDirs = getChildDirectoriesAndMarkAsReachable(volume, bucket, currentDir); + dirKeyStack.addAll(childDirs); + } + } + + private boolean isDirectoryInDeletedDirTable(String dirKey) throws IOException { + return deletedDirectoryTable.isExist(dirKey); + } + + private boolean isFileKeyInDeletedTable(String fileKey) throws IOException { + return deletedTable.isExist(fileKey); + } + + private void handleUnreachableAndUnreferencedObjects(OmVolumeArgs volume, OmBucketInfo bucket) throws IOException { + // Check for unreachable and unreferenced directories in the bucket. + String bucketPrefix = OM_KEY_PREFIX + + volume.getObjectID() + + OM_KEY_PREFIX + + bucket.getObjectID(); + + try (TableIterator> dirIterator = + directoryTable.iterator()) { + dirIterator.seek(bucketPrefix); + while (dirIterator.hasNext()) { + Table.KeyValue dirEntry = dirIterator.next(); + String dirKey = dirEntry.getKey(); + + // Only search directories in this bucket. + if (!dirKey.startsWith(bucketPrefix)) { + break; + } + + if (!isReachable(dirKey)) { + if (!isDirectoryInDeletedDirTable(dirKey)) { + System.out.println("Found unreferenced directory: " + dirKey); + unreferencedStats.addDir(); + + if (!repair) { + if (verbose) { + System.out.println("Marking unreferenced directory " + dirKey + " for deletion."); + } + } else { + System.out.println("Deleting unreferenced directory " + dirKey); + OmDirectoryInfo dirInfo = dirEntry.getValue(); + markDirectoryForDeletion(volume.getVolume(), bucket.getBucketName(), dirKey, dirInfo); + } + } else { + unreachableStats.addDir(); + } + } + } + } + + // Check for unreachable and unreferenced files + try (TableIterator> + fileIterator = fileTable.iterator()) { + fileIterator.seek(bucketPrefix); + while (fileIterator.hasNext()) { + Table.KeyValue fileEntry = fileIterator.next(); + String fileKey = fileEntry.getKey(); + // Only search files in this bucket. + if (!fileKey.startsWith(bucketPrefix)) { + break; + } + + OmKeyInfo fileInfo = fileEntry.getValue(); + if (!isReachable(fileKey)) { + if (!isFileKeyInDeletedTable(fileKey)) { + System.out.println("Found unreferenced file: " + fileKey); + unreferencedStats.addFile(fileInfo.getDataSize()); + + if (!repair) { + if (verbose) { + System.out.println("Marking unreferenced file " + fileKey + " for deletion." + fileKey); + } + } else { + System.out.println("Deleting unreferenced file " + fileKey); + markFileForDeletion(fileKey, fileInfo); + } + } else { + unreachableStats.addFile(fileInfo.getDataSize()); + } + } else { + // NOTE: We are deserializing the proto of every reachable file + // just to log it's size. If we don't need this information we could + // save time by skipping this step. + reachableStats.addFile(fileInfo.getDataSize()); + } + } + } + } + + protected void markFileForDeletion(String fileKey, OmKeyInfo fileInfo) throws IOException { + try (BatchOperation batch = store.initBatchOperation()) { + fileTable.deleteWithBatch(batch, fileKey); + + RepeatedOmKeyInfo originalRepeatedKeyInfo = deletedTable.get(fileKey); + RepeatedOmKeyInfo updatedRepeatedOmKeyInfo = OmUtils.prepareKeyForDelete( + fileInfo, fileInfo.getUpdateID(), true); + // NOTE: The FSO code seems to write the open key entry with the whole + // path, using the object's names instead of their ID. This would only + // be possible when the file is deleted explicitly, and not part of a + // directory delete. It is also not possible here if the file's parent + // is gone. The name of the key does not matter so just use IDs. + deletedTable.putWithBatch(batch, fileKey, updatedRepeatedOmKeyInfo); + if (verbose) { + System.out.println("Added entry " + fileKey + " to open key table: " + updatedRepeatedOmKeyInfo); + } + store.commitBatchOperation(batch); + } + } + + protected void markDirectoryForDeletion(String volumeName, String bucketName, + String dirKeyName, OmDirectoryInfo dirInfo) throws IOException { + try (BatchOperation batch = store.initBatchOperation()) { + directoryTable.deleteWithBatch(batch, dirKeyName); + // HDDS-7592: Make directory entries in deleted dir table unique. + String deleteDirKeyName = dirKeyName + OM_KEY_PREFIX + dirInfo.getObjectID(); + + // Convert the directory to OmKeyInfo for deletion. + OmKeyInfo dirAsKeyInfo = OMFileRequest.getOmKeyInfo(volumeName, bucketName, dirInfo, dirInfo.getName()); + deletedDirectoryTable.putWithBatch(batch, deleteDirKeyName, dirAsKeyInfo); + + store.commitBatchOperation(batch); + } + } + + private Collection getChildDirectoriesAndMarkAsReachable(OmVolumeArgs volume, OmBucketInfo bucket, + WithObjectID currentDir) throws IOException { + + Collection childDirs = new ArrayList<>(); + + try (TableIterator> + dirIterator = directoryTable.iterator()) { + String dirPrefix = buildReachableKey(volume, bucket, currentDir); + // Start searching the directory table at the current directory's + // prefix to get its immediate children. + dirIterator.seek(dirPrefix); + while (dirIterator.hasNext()) { + Table.KeyValue childDirEntry = dirIterator.next(); + String childDirKey = childDirEntry.getKey(); + // Stop processing once we have seen all immediate children of this + // directory. + if (!childDirKey.startsWith(dirPrefix)) { + break; + } + // This directory was reached by search. + addReachableEntry(volume, bucket, childDirEntry.getValue()); + childDirs.add(childDirKey); + reachableStats.addDir(); + } + } + + return childDirs; + } + + /** + * Add the specified object to the reachable table, indicating it is part + * of the connected FSO tree. + */ + private void addReachableEntry(OmVolumeArgs volume, OmBucketInfo bucket, WithObjectID object) throws IOException { + String reachableKey = buildReachableKey(volume, bucket, object); + // No value is needed for this table. + reachableDB.getTable(REACHABLE_TABLE, String.class, byte[].class).put(reachableKey, new byte[]{}); + } + + /** + * Build an entry in the reachable table for the current object, which + * could be a bucket, file or directory. + */ + private static String buildReachableKey(OmVolumeArgs volume, OmBucketInfo bucket, WithObjectID object) { + return OM_KEY_PREFIX + + volume.getObjectID() + + OM_KEY_PREFIX + + bucket.getObjectID() + + OM_KEY_PREFIX + + object.getObjectID(); + } + + /** + * + * @param fileOrDirKey The key of a file or directory in RocksDB. + * @return true if the entry's parent is in the reachable table. + */ + protected boolean isReachable(String fileOrDirKey) throws IOException { + String reachableParentKey = buildReachableParentKey(fileOrDirKey); + + return reachableDB.getTable(REACHABLE_TABLE, String.class, byte[].class).get(reachableParentKey) != null; + } + + /** + * Build an entry in the reachable table for the current object's parent + * object. The object could be a file or directory. + */ + private static String buildReachableParentKey(String fileOrDirKey) { + String[] keyParts = fileOrDirKey.split(OM_KEY_PREFIX); + // Should be /volID/bucketID/parentID/name + // The first part will be blank since key begins with a slash. + Preconditions.assertTrue(keyParts.length >= 4); + String volumeID = keyParts[1]; + String bucketID = keyParts[2]; + String parentID = keyParts[3]; + + return OM_KEY_PREFIX + + volumeID + + OM_KEY_PREFIX + + bucketID + + OM_KEY_PREFIX + + parentID; + } + + private void openReachableDB() throws IOException { + File reachableDBFile = new File(new File(omDBPath).getParentFile(), "reachable.db"); + System.out.println("Creating database of reachable directories at " + reachableDBFile); + // Delete the DB from the last run if it exists. + if (reachableDBFile.exists()) { + FileUtils.deleteDirectory(reachableDBFile); + } + + ConfigurationSource conf = new OzoneConfiguration(); + reachableDB = DBStoreBuilder.newBuilder(conf) + .setName("reachable.db") + .setPath(reachableDBFile.getParentFile().toPath()) + .addTable(REACHABLE_TABLE) + .build(); + } + + private void closeReachableDB() throws IOException { + if (reachableDB != null) { + reachableDB.close(); + } + File reachableDBFile = new File(new File(omDBPath).getParentFile(), "reachable.db"); + if (reachableDBFile.exists()) { + FileUtils.deleteDirectory(reachableDBFile); + } + } + + /** + * Define a Report to be created. + */ + public static class Report { + private final ReportStatistics reachable; + private final ReportStatistics unreachable; + private final ReportStatistics unreferenced; + + /** + * Builds one report that is the aggregate of multiple others. + */ + public Report(FSORepairTool.Report... reports) { + reachable = new ReportStatistics(); + unreachable = new ReportStatistics(); + unreferenced = new ReportStatistics(); + + for (FSORepairTool.Report report : reports) { + reachable.add(report.reachable); + unreachable.add(report.unreachable); + unreferenced.add(report.unreferenced); + } + } + + private Report(FSORepairTool.Report.Builder builder) { + this.reachable = builder.reachable; + this.unreachable = builder.unreachable; + this.unreferenced = builder.unreferenced; + } + + public ReportStatistics getReachable() { + return reachable; + } + + public ReportStatistics getUnreachable() { + return unreachable; + } + + public ReportStatistics getUnreferenced() { + return unreferenced; + } + + public String toString() { + return "Reachable:" + reachable + "\nUnreachable:" + unreachable + "\nUnreferenced:" + unreferenced; + } + + @Override + public boolean equals(Object other) { + if (other == this) { + return true; + } + if (other == null || getClass() != other.getClass()) { + return false; + } + FSORepairTool.Report report = (FSORepairTool.Report) other; + + // Useful for testing. + System.out.println("Comparing reports\nExpect:\n" + this + "\nActual:\n" + report); + + return reachable.equals(report.reachable) && unreachable.equals(report.unreachable) && + unreferenced.equals(report.unreferenced); + } + + @Override + public int hashCode() { + return Objects.hash(reachable, unreachable, unreferenced); + } + + /** + * Builder class for a Report. + */ + public static final class Builder { + private ReportStatistics reachable = new ReportStatistics(); + private ReportStatistics unreachable = new ReportStatistics(); + private ReportStatistics unreferenced = new ReportStatistics(); + + public Builder() { + } + + public Builder setReachable(ReportStatistics reachable) { + this.reachable = reachable; + return this; + } + + public Builder setUnreachable(ReportStatistics unreachable) { + this.unreachable = unreachable; + return this; + } + + public Builder setUnreferenced(ReportStatistics unreferenced) { + this.unreferenced = unreferenced; + return this; + } + + public Report build() { + return new Report(this); + } + } + } + + /** + * Represents the statistics of reachable and unreachable data. + * This gives the count of dirs, files and bytes. + */ + + public static class ReportStatistics { + private long dirs; + private long files; + private long bytes; + + public ReportStatistics() { } + + public ReportStatistics(long dirs, long files, long bytes) { + this.dirs = dirs; + this.files = files; + this.bytes = bytes; + } + + public void add(ReportStatistics other) { + this.dirs += other.dirs; + this.files += other.files; + this.bytes += other.bytes; + } + + public long getDirs() { + return dirs; + } + + public long getFiles() { + return files; + } + + public long getBytes() { + return bytes; + } + + @Override + public String toString() { + return "\n\tDirectories: " + dirs + + "\n\tFiles: " + files + + "\n\tBytes: " + bytes; + } + + @Override + public boolean equals(Object other) { + if (other == this) { + return true; + } + if (other == null || getClass() != other.getClass()) { + return false; + } + ReportStatistics stats = (ReportStatistics) other; + + return bytes == stats.bytes && files == stats.files && dirs == stats.dirs; + } + + @Override + public int hashCode() { + return Objects.hash(bytes, files, dirs); + } + + public void addDir() { + dirs++; + } + + public void addFile(long size) { + files++; + bytes += size; + } + } +} diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/om/OMRepair.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/om/OMRepair.java new file mode 100644 index 00000000000..56d42d23f49 --- /dev/null +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/om/OMRepair.java @@ -0,0 +1,47 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.repair.om; + +import org.apache.hadoop.hdds.cli.GenericCli; +import org.apache.hadoop.hdds.cli.RepairSubcommand; +import org.kohsuke.MetaInfServices; +import picocli.CommandLine; + +import java.util.concurrent.Callable; + +/** + * Ozone Repair CLI for OM. + */ +@CommandLine.Command(name = "om", + subcommands = { + FSORepairCLI.class, + }, + description = "Operational tool to repair OM.") +@MetaInfServices(RepairSubcommand.class) +public class OMRepair implements Callable, RepairSubcommand { + + @CommandLine.Spec + private CommandLine.Model.CommandSpec spec; + + @Override + public Void call() { + GenericCli.missingSubcommand(spec); + return null; + } +} diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/quota/QuotaRepair.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/quota/QuotaRepair.java index 5f21b739c81..6ead713e148 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/quota/QuotaRepair.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/quota/QuotaRepair.java @@ -22,7 +22,7 @@ import java.util.Collection; import java.util.concurrent.Callable; import org.apache.hadoop.hdds.cli.GenericCli; -import org.apache.hadoop.hdds.cli.SubcommandWithParent; +import org.apache.hadoop.hdds.cli.RepairSubcommand; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.ipc.ProtobufRpcEngine; import org.apache.hadoop.ipc.RPC; @@ -45,9 +45,13 @@ * Ozone Repair CLI for quota. */ @CommandLine.Command(name = "quota", + subcommands = { + QuotaStatus.class, + QuotaTrigger.class, + }, description = "Operational tool to repair quota in OM DB.") -@MetaInfServices(SubcommandWithParent.class) -public class QuotaRepair implements Callable, SubcommandWithParent { +@MetaInfServices(RepairSubcommand.class) +public class QuotaRepair implements Callable, RepairSubcommand { @CommandLine.Spec private CommandLine.Model.CommandSpec spec; @@ -113,9 +117,4 @@ public UserGroupInformation getUser() throws IOException { protected OzoneRepair getParent() { return parent; } - - @Override - public Class getParentType() { - return OzoneRepair.class; - } } diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/quota/QuotaStatus.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/quota/QuotaStatus.java index a78d248e055..820ac6f8eaf 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/quota/QuotaStatus.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/quota/QuotaStatus.java @@ -23,12 +23,9 @@ import java.util.concurrent.Callable; import org.apache.hadoop.hdds.cli.HddsVersionProvider; -import org.apache.hadoop.hdds.cli.SubcommandWithParent; import org.apache.hadoop.ozone.om.protocol.OzoneManagerProtocol; -import org.kohsuke.MetaInfServices; import picocli.CommandLine; - /** * Tool to get status of last triggered quota repair. */ @@ -38,8 +35,7 @@ mixinStandardHelpOptions = true, versionProvider = HddsVersionProvider.class ) -@MetaInfServices(SubcommandWithParent.class) -public class QuotaStatus implements Callable, SubcommandWithParent { +public class QuotaStatus implements Callable { @CommandLine.Spec private static CommandLine.Model.CommandSpec spec; @@ -72,9 +68,4 @@ public Void call() throws Exception { protected QuotaRepair getParent() { return parent; } - - @Override - public Class getParentType() { - return QuotaRepair.class; - } } diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/quota/QuotaTrigger.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/quota/QuotaTrigger.java index 19ad92340c0..04d78f05dc6 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/quota/QuotaTrigger.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/quota/QuotaTrigger.java @@ -27,9 +27,7 @@ import java.util.concurrent.Callable; import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.hdds.cli.HddsVersionProvider; -import org.apache.hadoop.hdds.cli.SubcommandWithParent; import org.apache.hadoop.ozone.om.protocol.OzoneManagerProtocol; -import org.kohsuke.MetaInfServices; import picocli.CommandLine; /** @@ -41,8 +39,7 @@ mixinStandardHelpOptions = true, versionProvider = HddsVersionProvider.class ) -@MetaInfServices(SubcommandWithParent.class) -public class QuotaTrigger implements Callable, SubcommandWithParent { +public class QuotaTrigger implements Callable { @CommandLine.Spec private static CommandLine.Model.CommandSpec spec; @@ -92,9 +89,4 @@ protected QuotaRepair getParent() { return parent; } - @Override - public Class getParentType() { - return QuotaRepair.class; - } - } diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/quota/package-info.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/quota/package-info.java index 9a433b24397..40c0abcb916 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/quota/package-info.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/quota/package-info.java @@ -17,6 +17,6 @@ */ /** - * Ozone Repair tools. + * Ozone Quota Repair tools. */ package org.apache.hadoop.ozone.repair.quota; diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/Handler.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/Handler.java index ae5edf5b1f7..d1755a68806 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/Handler.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/Handler.java @@ -69,7 +69,7 @@ protected OzoneAddress getAddress() throws OzoneClientException { } protected abstract void execute(OzoneClient client, OzoneAddress address) - throws IOException, OzoneClientException; + throws IOException; /** * Checks whether the current command should be executed or not. @@ -102,7 +102,7 @@ public Void call() throws Exception { } protected OzoneClient createClient(OzoneAddress address) - throws IOException, OzoneClientException { + throws IOException { return address.createClient(conf); } @@ -111,7 +111,7 @@ protected boolean securityEnabled() { if (!enabled) { err().printf("Error: '%s' operation works only when security is " + "enabled. To enable security set ozone.security.enabled to " + - "true.%n", spec.qualifiedName()); + "true.%n", spec.qualifiedName().trim()); } return enabled; } diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/OzoneAddress.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/OzoneAddress.java index f2fa1a8c4f3..ae5b5ad566e 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/OzoneAddress.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/OzoneAddress.java @@ -123,7 +123,7 @@ protected OzoneClient createRpcClientFromServiceId( } public OzoneClient createClient(MutableConfigurationSource conf) - throws IOException, OzoneClientException { + throws IOException { OzoneClient client; String scheme = ozoneURI.getScheme(); if (ozoneURI.getScheme() == null || scheme.isEmpty()) { @@ -185,13 +185,12 @@ public OzoneClient createClient(MutableConfigurationSource conf) * @param omServiceID * @return OzoneClient * @throws IOException - * @throws OzoneClientException */ public OzoneClient createClientForS3Commands( OzoneConfiguration conf, String omServiceID ) - throws IOException, OzoneClientException { + throws IOException { Collection serviceIds = conf. getTrimmedStringCollection(OZONE_OM_SERVICE_IDS_KEY); if (omServiceID != null) { @@ -227,8 +226,7 @@ public OzoneClient createClientForS3Commands( * @param uri - UriString * @return URI */ - protected URI parseURI(String uri) - throws OzoneClientException { + protected URI parseURI(String uri) throws OzoneClientException { if ((uri == null) || uri.isEmpty()) { throw new OzoneClientException( "Ozone URI is needed to execute this command."); @@ -422,7 +420,7 @@ public void ensureVolumeAddress() throws OzoneClientException { } } - public void ensureRootAddress() throws OzoneClientException { + public void ensureRootAddress() throws OzoneClientException { if (keyName.length() != 0 || bucketName.length() != 0 || volumeName.length() != 0) { throw new OzoneClientException( diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/OzoneRatis.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/OzoneRatis.java index 5bc98268064..20f6f683cbf 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/OzoneRatis.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/OzoneRatis.java @@ -17,6 +17,7 @@ */ package org.apache.hadoop.ozone.shell; +import org.apache.hadoop.hdds.cli.GenericCli; import org.apache.hadoop.hdds.cli.HddsVersionProvider; import org.apache.hadoop.hdds.tracing.TracingUtil; import org.apache.ratis.shell.cli.sh.RatisShell; @@ -30,17 +31,8 @@ description = "Shell for running Ratis commands", versionProvider = HddsVersionProvider.class, mixinStandardHelpOptions = true) -public class OzoneRatis extends Shell { +public class OzoneRatis extends GenericCli { - public OzoneRatis() { - super(OzoneRatis.class); - } - - /** - * Main for the OzoneRatis Command handling. - * - * @param argv - System Args Strings[] - */ public static void main(String[] argv) throws Exception { new OzoneRatis().run(argv); } diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/OzoneShell.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/OzoneShell.java index 04b2c706f7e..925e3bc13ec 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/OzoneShell.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/OzoneShell.java @@ -19,6 +19,13 @@ import org.apache.hadoop.hdds.cli.HddsVersionProvider; import org.apache.hadoop.hdds.tracing.TracingUtil; +import org.apache.hadoop.ozone.shell.bucket.BucketCommands; +import org.apache.hadoop.ozone.shell.keys.KeyCommands; +import org.apache.hadoop.ozone.shell.prefix.PrefixCommands; +import org.apache.hadoop.ozone.shell.snapshot.SnapshotCommands; +import org.apache.hadoop.ozone.shell.tenant.TenantUserCommands; +import org.apache.hadoop.ozone.shell.token.TokenCommands; +import org.apache.hadoop.ozone.shell.volume.VolumeCommands; import picocli.CommandLine.Command; @@ -27,14 +34,19 @@ */ @Command(name = "ozone sh", description = "Shell for Ozone object store", + subcommands = { + BucketCommands.class, + KeyCommands.class, + PrefixCommands.class, + SnapshotCommands.class, + TenantUserCommands.class, + TokenCommands.class, + VolumeCommands.class, + }, versionProvider = HddsVersionProvider.class, mixinStandardHelpOptions = true) public class OzoneShell extends Shell { - public OzoneShell() { - super(OzoneShell.class); - } - /** * Main for the ozShell Command handling. * diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/REPL.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/REPL.java new file mode 100644 index 00000000000..14848846348 --- /dev/null +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/REPL.java @@ -0,0 +1,90 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.ozone.shell; + +import org.jline.console.SystemRegistry; +import org.jline.console.impl.SystemRegistryImpl; +import org.jline.reader.EndOfFileException; +import org.jline.reader.LineReader; +import org.jline.reader.LineReaderBuilder; +import org.jline.reader.MaskingCallback; +import org.jline.reader.Parser; +import org.jline.reader.UserInterruptException; +import org.jline.reader.impl.DefaultParser; +import org.jline.terminal.Terminal; +import org.jline.terminal.TerminalBuilder; +import org.jline.widget.TailTipWidgets; +import org.jline.widget.TailTipWidgets.TipType; +import picocli.CommandLine; +import picocli.shell.jline3.PicocliCommands; +import picocli.shell.jline3.PicocliCommands.PicocliCommandsFactory; + +import java.nio.file.Path; +import java.nio.file.Paths; +import java.util.function.Supplier; + +/** + * Interactive shell for Ozone commands. + * (REPL = Read-Eval-Print Loop) + */ +class REPL { + + REPL(Shell shell, CommandLine cmd, PicocliCommandsFactory factory) { + Parser parser = new DefaultParser(); + Supplier workDir = () -> Paths.get(System.getProperty("user.dir")); + TerminalBuilder terminalBuilder = TerminalBuilder.builder() + .dumb(true); + try (Terminal terminal = terminalBuilder.build()) { + factory.setTerminal(terminal); + + PicocliCommands picocliCommands = new PicocliCommands(cmd); + picocliCommands.name(shell.name()); + SystemRegistry registry = new SystemRegistryImpl(parser, terminal, workDir, null); + registry.setCommandRegistries(picocliCommands); + registry.register("help", picocliCommands); + + LineReader reader = LineReaderBuilder.builder() + .terminal(terminal) + .completer(registry.completer()) + .parser(parser) + .variable(LineReader.LIST_MAX, 50) + .build(); + + TailTipWidgets widgets = new TailTipWidgets(reader, registry::commandDescription, 5, TipType.COMPLETER); + widgets.enable(); + + String prompt = shell.prompt() + "> "; + + while (true) { + try { + registry.cleanUp(); + String line = reader.readLine(prompt, null, (MaskingCallback) null, null); + registry.execute(line); + } catch (UserInterruptException ignored) { + // ignore + } catch (EndOfFileException e) { + return; + } catch (Exception e) { + registry.trace(e); + } + } + } catch (Exception e) { + shell.printError(e); + } + } +} diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/Shell.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/Shell.java index 97e160651bb..3291ce87b08 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/Shell.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/Shell.java @@ -20,6 +20,8 @@ import org.apache.hadoop.hdds.cli.GenericCli; import org.apache.hadoop.ozone.om.exceptions.OMException; +import picocli.CommandLine; +import picocli.shell.jline3.PicocliCommands.PicocliCommandsFactory; /** * Ozone user interface commands. @@ -27,6 +29,7 @@ * This class uses dispatch method to make calls * to appropriate handlers that execute the ozone functions. */ +@CommandLine.Command public abstract class Shell extends GenericCli { public static final String OZONE_URI_DESCRIPTION = @@ -46,15 +49,48 @@ public abstract class Shell extends GenericCli { "Any unspecified information will be identified from\n" + "the config files.\n"; + private String name; + + @CommandLine.Spec + private CommandLine.Model.CommandSpec spec; + + @CommandLine.Option(names = { "--interactive" }, description = "Run in interactive mode") + private boolean interactive; + public Shell() { + super(new PicocliCommandsFactory()); + } + + public String name() { + return name; + } + + // override if custom prompt is needed + public String prompt() { + return name(); } - public Shell(Class type) { - super(type); + @Override + public void run(String[] argv) { + name = spec.name(); + + try { + // parse args to check if interactive mode is requested + getCmd().parseArgs(argv); + } catch (Exception ignored) { + // failure will be reported by regular, non-interactive run + } + + if (interactive) { + spec.name(""); // use short name (e.g. "token get" instead of "ozone sh token get") + new REPL(this, getCmd(), (PicocliCommandsFactory) getCmd().getFactory()); + } else { + super.run(argv); + } } @Override - protected void printError(Throwable errorArg) { + public void printError(Throwable errorArg) { OMException omException = null; if (errorArg instanceof OMException) { @@ -77,4 +113,3 @@ protected void printError(Throwable errorArg) { } } } - diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/bucket/BucketCommands.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/bucket/BucketCommands.java index 80e26e04451..8a92de696a7 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/bucket/BucketCommands.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/bucket/BucketCommands.java @@ -23,12 +23,9 @@ import org.apache.hadoop.hdds.cli.GenericParentCommand; import org.apache.hadoop.hdds.cli.HddsVersionProvider; import org.apache.hadoop.hdds.cli.MissingSubcommandException; -import org.apache.hadoop.hdds.cli.SubcommandWithParent; import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.ozone.shell.OzoneShell; import org.apache.hadoop.ozone.shell.Shell; -import org.kohsuke.MetaInfServices; import picocli.CommandLine.Command; import picocli.CommandLine.ParentCommand; @@ -55,9 +52,7 @@ }, mixinStandardHelpOptions = true, versionProvider = HddsVersionProvider.class) -@MetaInfServices(SubcommandWithParent.class) -public class BucketCommands implements GenericParentCommand, Callable, - SubcommandWithParent { +public class BucketCommands implements GenericParentCommand, Callable { @ParentCommand private Shell shell; @@ -77,9 +72,4 @@ public boolean isVerbose() { public OzoneConfiguration createOzoneConfiguration() { return shell.createOzoneConfiguration(); } - - @Override - public Class getParentType() { - return OzoneShell.class; - } } diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/bucket/InfoBucketHandler.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/bucket/InfoBucketHandler.java index 39044db797a..9d9bc3dd6e6 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/bucket/InfoBucketHandler.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/bucket/InfoBucketHandler.java @@ -53,7 +53,7 @@ public void execute(OzoneClient client, OzoneAddress address) */ public static class LinkBucket { private String volumeName; - private String bucketName; + private String name; private String sourceVolume; private String sourceBucket; private Instant creationTime; @@ -63,7 +63,7 @@ public static class LinkBucket { LinkBucket(OzoneBucket ozoneBucket) { this.volumeName = ozoneBucket.getVolumeName(); - this.bucketName = ozoneBucket.getName(); + this.name = ozoneBucket.getName(); this.sourceVolume = ozoneBucket.getSourceVolume(); this.sourceBucket = ozoneBucket.getSourceBucket(); this.creationTime = ozoneBucket.getCreationTime(); @@ -76,8 +76,8 @@ public String getVolumeName() { return volumeName; } - public String getBucketName() { - return bucketName; + public String getName() { + return name; } public String getSourceVolume() { diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/bucket/SetEncryptionKey.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/bucket/SetEncryptionKey.java index 86a50e9df3c..3df65165fa8 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/bucket/SetEncryptionKey.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/bucket/SetEncryptionKey.java @@ -19,7 +19,6 @@ import org.apache.hadoop.ozone.client.OzoneBucket; import org.apache.hadoop.ozone.client.OzoneClient; -import org.apache.hadoop.ozone.client.OzoneClientException; import org.apache.hadoop.ozone.shell.OzoneAddress; import picocli.CommandLine; @@ -70,7 +69,7 @@ public class SetEncryptionKey extends BucketHandler { @Override protected void execute(OzoneClient client, OzoneAddress address) - throws IOException, OzoneClientException { + throws IOException { String volumeName = address.getVolumeName(); String bucketName = address.getBucketName(); diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/bucket/SetReplicationConfigHandler.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/bucket/SetReplicationConfigHandler.java index 45d66fd1c3d..258a73aa93b 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/bucket/SetReplicationConfigHandler.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/bucket/SetReplicationConfigHandler.java @@ -21,7 +21,6 @@ import org.apache.hadoop.ozone.OzoneIllegalArgumentException; import org.apache.hadoop.ozone.client.OzoneBucket; import org.apache.hadoop.ozone.client.OzoneClient; -import org.apache.hadoop.ozone.client.OzoneClientException; import org.apache.hadoop.ozone.shell.OzoneAddress; import org.apache.hadoop.ozone.shell.ShellReplicationOptions; import picocli.CommandLine; @@ -40,7 +39,7 @@ public class SetReplicationConfigHandler extends BucketHandler { @Override protected void execute(OzoneClient client, OzoneAddress address) - throws IOException, OzoneClientException { + throws IOException { ReplicationConfig replicationConfig = replication.fromParams(getConf()) .orElseThrow(() -> new OzoneIllegalArgumentException( "Replication type and config must be specified.")); diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/bucket/UpdateBucketHandler.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/bucket/UpdateBucketHandler.java index 7ba62a5ce1c..e36fbce63e8 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/bucket/UpdateBucketHandler.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/bucket/UpdateBucketHandler.java @@ -19,7 +19,6 @@ import org.apache.hadoop.ozone.client.OzoneBucket; import org.apache.hadoop.ozone.client.OzoneClient; -import org.apache.hadoop.ozone.client.OzoneClientException; import org.apache.hadoop.ozone.shell.OzoneAddress; import picocli.CommandLine.Command; @@ -40,7 +39,7 @@ public class UpdateBucketHandler extends BucketHandler { @Override protected void execute(OzoneClient client, OzoneAddress address) - throws IOException, OzoneClientException { + throws IOException { String volumeName = address.getVolumeName(); String bucketName = address.getBucketName(); diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/keys/CatKeyHandler.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/keys/CatKeyHandler.java index 328c56f82e3..41a3e142a8d 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/keys/CatKeyHandler.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/keys/CatKeyHandler.java @@ -21,7 +21,6 @@ import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.ozone.client.OzoneBucket; import org.apache.hadoop.ozone.client.OzoneClient; -import org.apache.hadoop.ozone.client.OzoneClientException; import org.apache.hadoop.ozone.client.OzoneVolume; import org.apache.hadoop.ozone.shell.OzoneAddress; import picocli.CommandLine.Command; @@ -38,7 +37,7 @@ public class CatKeyHandler extends KeyHandler { @Override protected void execute(OzoneClient client, OzoneAddress address) - throws IOException, OzoneClientException { + throws IOException { String volumeName = address.getVolumeName(); String bucketName = address.getBucketName(); diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/keys/ChecksumKeyHandler.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/keys/ChecksumKeyHandler.java index b8acf783d6f..6bb25dc2ad8 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/keys/ChecksumKeyHandler.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/keys/ChecksumKeyHandler.java @@ -22,7 +22,6 @@ import org.apache.hadoop.hdds.scm.OzoneClientConfig; import org.apache.hadoop.ozone.client.OzoneBucket; import org.apache.hadoop.ozone.client.OzoneClient; -import org.apache.hadoop.ozone.client.OzoneClientException; import org.apache.hadoop.ozone.client.OzoneKeyDetails; import org.apache.hadoop.ozone.client.OzoneVolume; import org.apache.hadoop.ozone.client.protocol.ClientProtocol; @@ -49,7 +48,7 @@ public class ChecksumKeyHandler extends KeyHandler { @Override protected void execute(OzoneClient client, OzoneAddress address) - throws IOException, OzoneClientException { + throws IOException { ChecksumInfo checksumInfo = new ChecksumInfo(address, client, mode); printObjectAsJson(checksumInfo); diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/keys/CopyKeyHandler.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/keys/CopyKeyHandler.java index 81d2dbcae6a..a304dada153 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/keys/CopyKeyHandler.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/keys/CopyKeyHandler.java @@ -29,7 +29,6 @@ import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.client.OzoneBucket; import org.apache.hadoop.ozone.client.OzoneClient; -import org.apache.hadoop.ozone.client.OzoneClientException; import org.apache.hadoop.ozone.client.OzoneKeyDetails; import org.apache.hadoop.ozone.client.OzoneVolume; import org.apache.hadoop.ozone.shell.OzoneAddress; @@ -64,7 +63,7 @@ public class CopyKeyHandler extends BucketHandler { @Override protected void execute(OzoneClient client, OzoneAddress address) - throws IOException, OzoneClientException { + throws IOException { String volumeName = address.getVolumeName(); String bucketName = address.getBucketName(); diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/keys/DeleteKeyHandler.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/keys/DeleteKeyHandler.java index 4c795f1e82b..a67343976e7 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/keys/DeleteKeyHandler.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/keys/DeleteKeyHandler.java @@ -22,7 +22,6 @@ import org.apache.hadoop.ozone.OmUtils; import org.apache.hadoop.ozone.client.OzoneBucket; import org.apache.hadoop.ozone.client.OzoneClient; -import org.apache.hadoop.ozone.client.OzoneClientException; import org.apache.hadoop.ozone.client.OzoneKeyDetails; import org.apache.hadoop.ozone.client.OzoneVolume; import org.apache.hadoop.ozone.om.exceptions.OMException; @@ -55,7 +54,7 @@ public class DeleteKeyHandler extends KeyHandler { @Override protected void execute(OzoneClient client, OzoneAddress address) - throws IOException, OzoneClientException { + throws IOException { String volumeName = address.getVolumeName(); String bucketName = address.getBucketName(); diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/keys/GetKeyHandler.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/keys/GetKeyHandler.java index 501a64238f0..c01f93da9c7 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/keys/GetKeyHandler.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/keys/GetKeyHandler.java @@ -61,7 +61,7 @@ public class GetKeyHandler extends KeyHandler { @Override protected void execute(OzoneClient client, OzoneAddress address) - throws IOException, OzoneClientException { + throws IOException { String volumeName = address.getVolumeName(); String bucketName = address.getBucketName(); diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/keys/KeyCommands.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/keys/KeyCommands.java index bbef5841439..f4ac9e1fe8f 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/keys/KeyCommands.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/keys/KeyCommands.java @@ -23,12 +23,9 @@ import org.apache.hadoop.hdds.cli.GenericParentCommand; import org.apache.hadoop.hdds.cli.HddsVersionProvider; import org.apache.hadoop.hdds.cli.MissingSubcommandException; -import org.apache.hadoop.hdds.cli.SubcommandWithParent; import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.ozone.shell.OzoneShell; import org.apache.hadoop.ozone.shell.Shell; -import org.kohsuke.MetaInfServices; import picocli.CommandLine.Command; import picocli.CommandLine.ParentCommand; @@ -55,9 +52,8 @@ }, mixinStandardHelpOptions = true, versionProvider = HddsVersionProvider.class) -@MetaInfServices(SubcommandWithParent.class) public class KeyCommands - implements GenericParentCommand, Callable, SubcommandWithParent { + implements GenericParentCommand, Callable { @ParentCommand private Shell shell; @@ -77,9 +73,4 @@ public boolean isVerbose() { public OzoneConfiguration createOzoneConfiguration() { return shell.createOzoneConfiguration(); } - - @Override - public Class getParentType() { - return OzoneShell.class; - } } diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/keys/ListKeyHandler.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/keys/ListKeyHandler.java index 00652b58a95..c96e5e2b59e 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/keys/ListKeyHandler.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/keys/ListKeyHandler.java @@ -24,7 +24,6 @@ import com.google.common.base.Strings; import org.apache.hadoop.ozone.client.OzoneBucket; import org.apache.hadoop.ozone.client.OzoneClient; -import org.apache.hadoop.ozone.client.OzoneClientException; import org.apache.hadoop.ozone.client.OzoneKey; import org.apache.hadoop.ozone.client.OzoneVolume; import org.apache.hadoop.ozone.shell.ListOptions; @@ -47,7 +46,7 @@ public class ListKeyHandler extends VolumeBucketHandler { @Override protected void execute(OzoneClient client, OzoneAddress address) - throws IOException, OzoneClientException { + throws IOException { if (!Strings.isNullOrEmpty(address.getBucketName())) { listKeysInsideBucket(client, address); diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/keys/PutKeyHandler.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/keys/PutKeyHandler.java index 35095dd7ff2..30543f79074 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/keys/PutKeyHandler.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/keys/PutKeyHandler.java @@ -37,7 +37,6 @@ import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.client.OzoneBucket; import org.apache.hadoop.ozone.client.OzoneClient; -import org.apache.hadoop.ozone.client.OzoneClientException; import org.apache.hadoop.ozone.client.OzoneVolume; import org.apache.hadoop.ozone.client.io.OzoneDataStreamOutput; import org.apache.hadoop.ozone.client.io.OzoneOutputStream; @@ -75,7 +74,7 @@ public class PutKeyHandler extends KeyHandler { @Override protected void execute(OzoneClient client, OzoneAddress address) - throws IOException, OzoneClientException { + throws IOException { String volumeName = address.getVolumeName(); String bucketName = address.getBucketName(); diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/keys/RenameKeyHandler.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/keys/RenameKeyHandler.java index f71ac094faf..e48f0804967 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/keys/RenameKeyHandler.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/keys/RenameKeyHandler.java @@ -19,7 +19,6 @@ import org.apache.hadoop.ozone.client.OzoneBucket; import org.apache.hadoop.ozone.client.OzoneClient; -import org.apache.hadoop.ozone.client.OzoneClientException; import org.apache.hadoop.ozone.client.OzoneVolume; import org.apache.hadoop.ozone.shell.OzoneAddress; import org.apache.hadoop.ozone.shell.bucket.BucketHandler; @@ -46,7 +45,7 @@ public class RenameKeyHandler extends BucketHandler { @Override protected void execute(OzoneClient client, OzoneAddress address) - throws IOException, OzoneClientException { + throws IOException { String volumeName = address.getVolumeName(); String bucketName = address.getBucketName(); diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/keys/RewriteKeyHandler.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/keys/RewriteKeyHandler.java index 35e8da5f381..3dead7a979f 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/keys/RewriteKeyHandler.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/keys/RewriteKeyHandler.java @@ -21,7 +21,6 @@ import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.ozone.client.OzoneBucket; import org.apache.hadoop.ozone.client.OzoneClient; -import org.apache.hadoop.ozone.client.OzoneClientException; import org.apache.hadoop.ozone.client.OzoneKeyDetails; import org.apache.hadoop.ozone.client.OzoneVolume; import org.apache.hadoop.ozone.shell.MandatoryReplicationOptions; @@ -45,7 +44,7 @@ public class RewriteKeyHandler extends KeyHandler { private MandatoryReplicationOptions replication; @Override - protected void execute(OzoneClient client, OzoneAddress address) throws IOException, OzoneClientException { + protected void execute(OzoneClient client, OzoneAddress address) throws IOException { String volumeName = address.getVolumeName(); String bucketName = address.getBucketName(); String keyName = address.getKeyName(); diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/prefix/PrefixCommands.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/prefix/PrefixCommands.java index e2d703bbf21..f058c4214d2 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/prefix/PrefixCommands.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/prefix/PrefixCommands.java @@ -23,12 +23,9 @@ import org.apache.hadoop.hdds.cli.GenericParentCommand; import org.apache.hadoop.hdds.cli.HddsVersionProvider; import org.apache.hadoop.hdds.cli.MissingSubcommandException; -import org.apache.hadoop.hdds.cli.SubcommandWithParent; import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.ozone.shell.OzoneShell; import org.apache.hadoop.ozone.shell.Shell; -import org.kohsuke.MetaInfServices; import picocli.CommandLine.Command; import picocli.CommandLine.ParentCommand; @@ -45,9 +42,7 @@ }, mixinStandardHelpOptions = true, versionProvider = HddsVersionProvider.class) -@MetaInfServices(SubcommandWithParent.class) -public class PrefixCommands implements GenericParentCommand, Callable, - SubcommandWithParent { +public class PrefixCommands implements GenericParentCommand, Callable { @ParentCommand private Shell shell; @@ -67,9 +62,4 @@ public boolean isVerbose() { public OzoneConfiguration createOzoneConfiguration() { return shell.createOzoneConfiguration(); } - - @Override - public Class getParentType() { - return OzoneShell.class; - } } diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/s3/S3Handler.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/s3/S3Handler.java index 91cde308cb2..62b36230fcd 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/s3/S3Handler.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/s3/S3Handler.java @@ -49,7 +49,7 @@ protected OzoneAddress getAddress() throws OzoneClientException { @Override protected OzoneClient createClient(OzoneAddress address) - throws IOException, OzoneClientException { + throws IOException { return address.createClientForS3Commands(getConf(), omServiceID); } diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/snapshot/RenameSnapshotHandler.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/snapshot/RenameSnapshotHandler.java index 63b61b1ec66..f7569cac92e 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/snapshot/RenameSnapshotHandler.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/snapshot/RenameSnapshotHandler.java @@ -20,7 +20,6 @@ import java.io.IOException; import org.apache.hadoop.ozone.OmUtils; import org.apache.hadoop.ozone.client.OzoneClient; -import org.apache.hadoop.ozone.client.OzoneClientException; import org.apache.hadoop.ozone.shell.Handler; import org.apache.hadoop.ozone.shell.OzoneAddress; import org.apache.hadoop.ozone.shell.bucket.BucketUri; @@ -50,7 +49,7 @@ protected OzoneAddress getAddress() { } @Override - protected void execute(OzoneClient client, OzoneAddress address) throws IOException, OzoneClientException { + protected void execute(OzoneClient client, OzoneAddress address) throws IOException { String volumeName = snapshotPath.getValue().getVolumeName(); String bucketName = snapshotPath.getValue().getBucketName(); OmUtils.validateSnapshotName(snapshotNewName); diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/snapshot/SnapshotCommands.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/snapshot/SnapshotCommands.java index 25a3c1c66fe..e4ae7f5ad7a 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/snapshot/SnapshotCommands.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/snapshot/SnapshotCommands.java @@ -23,12 +23,9 @@ import org.apache.hadoop.hdds.cli.GenericParentCommand; import org.apache.hadoop.hdds.cli.HddsVersionProvider; import org.apache.hadoop.hdds.cli.MissingSubcommandException; -import org.apache.hadoop.hdds.cli.SubcommandWithParent; import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.ozone.shell.OzoneShell; import org.apache.hadoop.ozone.shell.Shell; -import org.kohsuke.MetaInfServices; import picocli.CommandLine.Command; import picocli.CommandLine.ParentCommand; @@ -48,9 +45,7 @@ }, mixinStandardHelpOptions = true, versionProvider = HddsVersionProvider.class) -@MetaInfServices(SubcommandWithParent.class) -public class SnapshotCommands implements GenericParentCommand, Callable, - SubcommandWithParent { +public class SnapshotCommands implements GenericParentCommand, Callable { @ParentCommand private Shell shell; @@ -70,9 +65,4 @@ public boolean isVerbose() { public OzoneConfiguration createOzoneConfiguration() { return shell.createOzoneConfiguration(); } - - @Override - public Class getParentType() { - return OzoneShell.class; - } } diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/tenant/TenantHandler.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/tenant/TenantHandler.java index a76ab7420af..8800d22e61e 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/tenant/TenantHandler.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/tenant/TenantHandler.java @@ -47,7 +47,7 @@ protected OzoneAddress getAddress() throws OzoneClientException { @Override protected OzoneClient createClient(OzoneAddress address) - throws IOException, OzoneClientException { + throws IOException { return address.createClientForS3Commands(getConf(), omServiceID); } diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/tenant/TenantUserCommands.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/tenant/TenantUserCommands.java index 86a051fb76e..baff85d0bf2 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/tenant/TenantUserCommands.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/tenant/TenantUserCommands.java @@ -20,11 +20,8 @@ import org.apache.hadoop.hdds.cli.GenericParentCommand; import org.apache.hadoop.hdds.cli.HddsVersionProvider; import org.apache.hadoop.hdds.cli.MissingSubcommandException; -import org.apache.hadoop.hdds.cli.SubcommandWithParent; import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.ozone.shell.OzoneShell; import org.apache.hadoop.ozone.shell.Shell; -import org.kohsuke.MetaInfServices; import picocli.CommandLine; import java.util.concurrent.Callable; @@ -46,9 +43,8 @@ }, mixinStandardHelpOptions = true, versionProvider = HddsVersionProvider.class) -@MetaInfServices(SubcommandWithParent.class) public class TenantUserCommands implements - GenericParentCommand, Callable, SubcommandWithParent { + GenericParentCommand, Callable { @CommandLine.ParentCommand private Shell shell; @@ -68,9 +64,4 @@ public boolean isVerbose() { public OzoneConfiguration createOzoneConfiguration() { return shell.createOzoneConfiguration(); } - - @Override - public Class getParentType() { - return OzoneShell.class; - } } diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/token/CancelTokenHandler.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/token/CancelTokenHandler.java index 73c9264a807..f76f88b6655 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/token/CancelTokenHandler.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/token/CancelTokenHandler.java @@ -19,7 +19,6 @@ package org.apache.hadoop.ozone.shell.token; import org.apache.hadoop.ozone.client.OzoneClient; -import org.apache.hadoop.ozone.client.OzoneClientException; import org.apache.hadoop.ozone.shell.OzoneAddress; import picocli.CommandLine.Command; @@ -34,7 +33,7 @@ public class CancelTokenHandler extends TokenHandler { @Override protected void execute(OzoneClient client, OzoneAddress address) - throws IOException, OzoneClientException { + throws IOException { client.getObjectStore().cancelDelegationToken(getToken()); out().printf("Token canceled successfully.%n"); } diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/token/GetTokenHandler.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/token/GetTokenHandler.java index dadba506ae4..133e983dd1c 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/token/GetTokenHandler.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/token/GetTokenHandler.java @@ -65,7 +65,7 @@ protected boolean isApplicable() { @Override protected void execute(OzoneClient client, OzoneAddress address) - throws IOException, OzoneClientException { + throws IOException { Token token = client.getObjectStore() .getDelegationToken(new Text(renewer.getValue())); diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/token/RenewTokenHandler.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/token/RenewTokenHandler.java index 581093050ae..8b578b0f172 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/token/RenewTokenHandler.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/token/RenewTokenHandler.java @@ -19,7 +19,6 @@ package org.apache.hadoop.ozone.shell.token; import org.apache.hadoop.ozone.client.OzoneClient; -import org.apache.hadoop.ozone.client.OzoneClientException; import org.apache.hadoop.ozone.shell.OzoneAddress; import picocli.CommandLine.Command; @@ -35,7 +34,7 @@ public class RenewTokenHandler extends TokenHandler { @Override protected void execute(OzoneClient client, OzoneAddress address) - throws IOException, OzoneClientException { + throws IOException { long expiryTime = client.getObjectStore().renewDelegationToken(getToken()); out().printf("Token renewed successfully, expiry time: %s.%n", Instant.ofEpochMilli(expiryTime)); diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/token/TokenCommands.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/token/TokenCommands.java index 5b449c6cc54..3223b5b49ed 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/token/TokenCommands.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/token/TokenCommands.java @@ -23,12 +23,9 @@ import org.apache.hadoop.hdds.cli.GenericParentCommand; import org.apache.hadoop.hdds.cli.HddsVersionProvider; import org.apache.hadoop.hdds.cli.MissingSubcommandException; -import org.apache.hadoop.hdds.cli.SubcommandWithParent; import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.ozone.shell.OzoneShell; import org.apache.hadoop.ozone.shell.Shell; -import org.kohsuke.MetaInfServices; import picocli.CommandLine.Command; import picocli.CommandLine.ParentCommand; @@ -45,9 +42,8 @@ }, mixinStandardHelpOptions = true, versionProvider = HddsVersionProvider.class) -@MetaInfServices(SubcommandWithParent.class) public class TokenCommands - implements GenericParentCommand, Callable, SubcommandWithParent { + implements GenericParentCommand, Callable { @ParentCommand private Shell shell; @@ -67,9 +63,4 @@ public boolean isVerbose() { public OzoneConfiguration createOzoneConfiguration() { return shell.createOzoneConfiguration(); } - - @Override - public Class getParentType() { - return OzoneShell.class; - } } diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/volume/VolumeCommands.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/volume/VolumeCommands.java index 8c52f0ada95..1cf88552030 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/volume/VolumeCommands.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/volume/VolumeCommands.java @@ -23,12 +23,9 @@ import org.apache.hadoop.hdds.cli.GenericParentCommand; import org.apache.hadoop.hdds.cli.HddsVersionProvider; import org.apache.hadoop.hdds.cli.MissingSubcommandException; -import org.apache.hadoop.hdds.cli.SubcommandWithParent; import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.ozone.shell.OzoneShell; import org.apache.hadoop.ozone.shell.Shell; -import org.kohsuke.MetaInfServices; import picocli.CommandLine.Command; import picocli.CommandLine.ParentCommand; @@ -53,9 +50,7 @@ }, mixinStandardHelpOptions = true, versionProvider = HddsVersionProvider.class) -@MetaInfServices(SubcommandWithParent.class) -public class VolumeCommands implements GenericParentCommand, Callable, - SubcommandWithParent { +public class VolumeCommands implements GenericParentCommand, Callable { @ParentCommand private Shell shell; @@ -75,9 +70,4 @@ public boolean isVerbose() { public OzoneConfiguration createOzoneConfiguration() { return shell.createOzoneConfiguration(); } - - @Override - public Class getParentType() { - return OzoneShell.class; - } } diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/utils/Filter.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/utils/Filter.java index 129e1a6158d..bc861bffafe 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/utils/Filter.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/utils/Filter.java @@ -82,10 +82,12 @@ public void setNextLevel(Map nextLevel) { public FilterOperator getFilterOperator(String op) { if (op.equalsIgnoreCase("equals")) { return FilterOperator.EQUALS; - } else if (op.equalsIgnoreCase("max")) { - return FilterOperator.MAX; - } else if (op.equalsIgnoreCase("min")) { - return FilterOperator.MIN; + } else if (op.equalsIgnoreCase("GREATER")) { + return FilterOperator.GREATER; + } else if (op.equalsIgnoreCase("LESSER")) { + return FilterOperator.LESSER; + } else if (op.equalsIgnoreCase("REGEX")) { + return FilterOperator.REGEX; } else { return null; } @@ -101,7 +103,8 @@ public String toString() { */ public enum FilterOperator { EQUALS, - MAX, - MIN; + LESSER, + GREATER, + REGEX; } } diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/utils/FormattingCLIUtils.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/utils/FormattingCLIUtils.java new file mode 100644 index 00000000000..050f1b06e7a --- /dev/null +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/utils/FormattingCLIUtils.java @@ -0,0 +1,291 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.ozone.utils; + +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +/** + * We define this class to output information in a tabular format, + * making the printed information easier to read. + * + * For example, in OM output: + * If it's in HA mode: + * + * +------------------------------------------------------+ + * | Ozone Manager Roles | + * +---------------------------------+---------+----------+ + * | Host Name | Node ID | Role | + * +---------------------------------+---------+----------+ + * | bigdata-ozone-online32 | om32 | FOLLOWER | + * | bigdata-ozone-online30 | om30 | FOLLOWER | + * | bigdata-ozone-online31 | om31 | LEADER | + * +---------------------------------+---------+----------+ + */ +public final class FormattingCLIUtils { + /** Table title. */ + private String title; + /** Last processed row type. */ + private TableRowType lastTableRowType; + /** StringBuilder object used to concatenate strings. */ + private StringBuilder join; + /** An ordered Map that holds each row of data. */ + private List tableRows; + /** Maps the maximum length of each column. */ + private Map maxColMap; + + /** + * Contains the title constructor. + * @param title titleName + */ + public FormattingCLIUtils(String title) { + this.init(); + this.title = title; + } + + /** + * Initialize the data. + */ + private void init() { + this.join = new StringBuilder(); + this.tableRows = new ArrayList<>(); + this.maxColMap = new HashMap<>(); + } + + /** + * Adds elements from the collection to the header data in the table. + * @param headers Header data + * @return FormattingCLIUtils object + */ + public FormattingCLIUtils addHeaders(List headers) { + return this.appendRows(TableRowType.HEADER, headers.toArray()); + } + + /** + * Adds a row of normal data to the table. + * @param objects Common row data + * @return FormattingCLIUtils object + */ + public FormattingCLIUtils addLine(Object[] objects) { + return this.appendRows(TableRowType.LINE, objects); + } + + /** + * Adds the middle row of data to the table. + * @param tableRowType TableRowType + * @param objects Table row data + * @return FormattingCLIUtils object + */ + private FormattingCLIUtils appendRows(TableRowType tableRowType, Object[] objects) { + if (objects != null && objects.length > 0) { + int len = objects.length; + if (this.maxColMap.size() > len) { + throw new IllegalArgumentException("The number of columns that inserted a row " + + "of data into the table is different from the number of previous columns, check!"); + } + List lines = new ArrayList<>(); + for (int i = 0; i < len; i++) { + Object o = objects[i]; + String value = o == null ? "null" : o.toString(); + lines.add(value); + Integer maxColSize = this.maxColMap.get(i); + if (maxColSize == null) { + this.maxColMap.put(i, value.length()); + continue; + } + if (value.length() > maxColSize) { + this.maxColMap.put(i, value.length()); + } + } + this.tableRows.add(new TableRow(tableRowType, lines)); + } + return this; + } + + /** + * Builds the string for the row of the table title. + */ + private void buildTitle() { + if (this.title != null) { + int maxTitleSize = 0; + for (Integer maxColSize : this.maxColMap.values()) { + maxTitleSize += maxColSize; + } + maxTitleSize += 3 * (this.maxColMap.size() - 1); + if (this.title.length() > maxTitleSize) { + this.title = this.title.substring(0, maxTitleSize); + } + this.join.append("+"); + for (int i = 0; i < maxTitleSize + 2; i++) { + this.join.append("-"); + } + this.join.append("+\n") + .append("|") + .append(StrUtils.center(this.title, maxTitleSize + 2, ' ')) + .append("|\n"); + this.lastTableRowType = TableRowType.TITLE; + } + } + + /** + * Build the table, first build the title, and then walk through each row of data to build. + */ + private void buildTable() { + this.buildTitle(); + for (int i = 0, len = this.tableRows.size(); i < len; i++) { + List data = this.tableRows.get(i).data; + switch (this.tableRows.get(i).tableRowType) { + case HEADER: + if (this.lastTableRowType != TableRowType.HEADER) { + this.buildRowBorder(data); + } + this.buildRowLine(data); + this.buildRowBorder(data); + break; + case LINE: + this.buildRowLine(data); + if (i == len - 1) { + this.buildRowBorder(data); + } + break; + default: + break; + } + } + } + + /** + * Method to build a border row. + * @param data dataLine + */ + private void buildRowBorder(List data) { + this.join.append("+"); + for (int i = 0, len = data.size(); i < len; i++) { + for (int j = 0; j < this.maxColMap.get(i) + 2; j++) { + this.join.append("-"); + } + this.join.append("+"); + } + this.join.append("\n"); + } + + /** + * A way to build rows of data. + * @param data dataLine + */ + private void buildRowLine(List data) { + this.join.append("|"); + for (int i = 0, len = data.size(); i < len; i++) { + this.join.append(StrUtils.center(data.get(i), this.maxColMap.get(i) + 2, ' ')) + .append("|"); + } + this.join.append("\n"); + } + + /** + * Rendering is born as a result. + * @return ASCII string of Table + */ + public String render() { + this.buildTable(); + return this.join.toString(); + } + + /** + * The type of each table row and the entity class of the data. + */ + private static class TableRow { + private TableRowType tableRowType; + private List data; + TableRow(TableRowType tableRowType, List data) { + this.tableRowType = tableRowType; + this.data = data; + } + } + + /** + * An enumeration class that distinguishes between table headers and normal table data. + */ + private enum TableRowType { + TITLE, HEADER, LINE + } + + /** + * String utility class. + */ + private static final class StrUtils { + /** + * Puts a string in the middle of a given size. + * @param str Character string + * @param size Total size + * @param padChar Fill character + * @return String result + */ + private static String center(String str, int size, char padChar) { + if (str != null && size > 0) { + int strLen = str.length(); + int pads = size - strLen; + if (pads > 0) { + str = leftPad(str, strLen + pads / 2, padChar); + str = rightPad(str, size, padChar); + } + } + return str; + } + + /** + * Left-fill the given string and size. + * @param str String + * @param size totalSize + * @param padChar Fill character + * @return String result + */ + private static String leftPad(final String str, int size, char padChar) { + int pads = size - str.length(); + return pads <= 0 ? str : repeat(padChar, pads).concat(str); + } + + /** + * Right-fill the given string and size. + * @param str String + * @param size totalSize + * @param padChar Fill character + * @return String result + */ + private static String rightPad(final String str, int size, char padChar) { + int pads = size - str.length(); + return pads <= 0 ? str : str.concat(repeat(padChar, pads)); + } + + /** + * Re-fill characters as strings. + * @param ch String + * @param repeat Number of repeats + * @return String + */ + private static String repeat(char ch, int repeat) { + char[] buf = new char[repeat]; + for (int i = repeat - 1; i >= 0; i--) { + buf[i] = ch; + } + return new String(buf); + } + } +} diff --git a/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/debug/TestDBDefinitionFactory.java b/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/debug/TestDBDefinitionFactory.java index 5e259012934..5f0be7859d4 100644 --- a/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/debug/TestDBDefinitionFactory.java +++ b/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/debug/TestDBDefinitionFactory.java @@ -18,6 +18,7 @@ package org.apache.hadoop.ozone.debug; +import java.nio.file.Path; import java.nio.file.Paths; import org.apache.hadoop.hdds.conf.OzoneConfiguration; @@ -43,16 +44,13 @@ public class TestDBDefinitionFactory { @Test public void testGetDefinition() { - DBDefinition definition = - DBDefinitionFactory.getDefinition(new OMDBDefinition().getName()); + DBDefinition definition = DBDefinitionFactory.getDefinition(OMDBDefinition.get().getName()); assertInstanceOf(OMDBDefinition.class, definition); - definition = DBDefinitionFactory.getDefinition( - new SCMDBDefinition().getName()); + definition = DBDefinitionFactory.getDefinition(SCMDBDefinition.get().getName()); assertInstanceOf(SCMDBDefinition.class, definition); - definition = DBDefinitionFactory.getDefinition( - new ReconSCMDBDefinition().getName()); + definition = DBDefinitionFactory.getDefinition(ReconSCMDBDefinition.get().getName()); assertInstanceOf(ReconSCMDBDefinition.class, definition); definition = DBDefinitionFactory.getDefinition( @@ -62,20 +60,19 @@ public void testGetDefinition() { definition = DBDefinitionFactory.getDefinition( RECON_CONTAINER_KEY_DB + "_1"); assertInstanceOf(ReconDBDefinition.class, definition); + DBDefinitionFactory.setDnDBSchemaVersion("V2"); - definition = - DBDefinitionFactory.getDefinition(Paths.get("/tmp/test-container.db"), - new OzoneConfiguration()); + final Path dbPath = Paths.get("/tmp/test-container.db"); + final OzoneConfiguration conf = new OzoneConfiguration(); + definition = DBDefinitionFactory.getDefinition(dbPath, conf); assertInstanceOf(DatanodeSchemaTwoDBDefinition.class, definition); + DBDefinitionFactory.setDnDBSchemaVersion("V1"); - definition = - DBDefinitionFactory.getDefinition(Paths.get("/tmp/test-container.db"), - new OzoneConfiguration()); + definition = DBDefinitionFactory.getDefinition(dbPath, conf); assertInstanceOf(DatanodeSchemaOneDBDefinition.class, definition); + DBDefinitionFactory.setDnDBSchemaVersion("V3"); - definition = - DBDefinitionFactory.getDefinition(Paths.get("/tmp/test-container.db"), - new OzoneConfiguration()); + definition = DBDefinitionFactory.getDefinition(dbPath, conf); assertInstanceOf(DatanodeSchemaThreeDBDefinition.class, definition); } } diff --git a/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/genconf/TestGenerateOzoneRequiredConfigurations.java b/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/genconf/TestGenerateOzoneRequiredConfigurations.java index 5b580c81c0e..6d264456682 100644 --- a/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/genconf/TestGenerateOzoneRequiredConfigurations.java +++ b/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/genconf/TestGenerateOzoneRequiredConfigurations.java @@ -18,16 +18,13 @@ package org.apache.hadoop.ozone.genconf; -import org.apache.commons.io.FileUtils; import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.ozone.test.GenericTestUtils; import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertNotEquals; import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.AfterEach; -import org.junit.jupiter.api.AfterAll; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.io.TempDir; import org.slf4j.Logger; @@ -40,7 +37,6 @@ import java.io.ByteArrayOutputStream; import java.io.File; -import java.io.IOException; import java.io.PrintStream; import java.io.UnsupportedEncodingException; import java.net.URL; @@ -54,7 +50,7 @@ * Tests GenerateOzoneRequiredConfigurations. */ public class TestGenerateOzoneRequiredConfigurations { - private static File outputBaseDir; + private static GenerateOzoneRequiredConfigurations genconfTool; private static final Logger LOG = LoggerFactory.getLogger(TestGenerateOzoneRequiredConfigurations.class); @@ -72,8 +68,6 @@ public class TestGenerateOzoneRequiredConfigurations { */ @BeforeAll public static void init() throws Exception { - outputBaseDir = GenericTestUtils.getTestDir(); - FileUtils.forceMkdir(outputBaseDir); genconfTool = new GenerateOzoneRequiredConfigurations(); } @@ -94,14 +88,6 @@ public void reset() { System.setErr(OLD_ERR); } - /** - * Cleans up the output base directory. - */ - @AfterAll - public static void cleanup() throws IOException { - FileUtils.deleteDirectory(outputBaseDir); - } - private void execute(String[] args, String msg) throws UnsupportedEncodingException { List arguments = new ArrayList(Arrays.asList(args)); diff --git a/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/repair/TestTransactionInfoRepair.java b/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/repair/ldb/TestTransactionInfoRepair.java similarity index 99% rename from hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/repair/TestTransactionInfoRepair.java rename to hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/repair/ldb/TestTransactionInfoRepair.java index a581e1d29d6..8a768d0f696 100644 --- a/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/repair/TestTransactionInfoRepair.java +++ b/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/repair/ldb/TestTransactionInfoRepair.java @@ -15,7 +15,7 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.apache.hadoop.ozone.repair; +package org.apache.hadoop.ozone.repair.ldb; import org.apache.hadoop.hdds.utils.TransactionInfo; import org.apache.hadoop.hdds.utils.db.Codec; diff --git a/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/scm/TestGetScmRatisRolesSubcommand.java b/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/scm/TestGetScmRatisRolesSubcommand.java new file mode 100644 index 00000000000..346b448cc25 --- /dev/null +++ b/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/scm/TestGetScmRatisRolesSubcommand.java @@ -0,0 +1,87 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.ozone.scm; + +import org.apache.hadoop.hdds.scm.client.ScmClient; +import org.apache.hadoop.ozone.admin.scm.GetScmRatisRolesSubcommand; +import org.apache.ozone.test.GenericTestUtils; +import org.junit.jupiter.api.Test; + +import java.util.ArrayList; +import java.util.List; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +import picocli.CommandLine; + +/** + * This unit test is used to verify whether the output of + * `TestGetScmRatisRolesSubcommand` meets the expected results. + */ +public class TestGetScmRatisRolesSubcommand { + + @Test + public void testGetScmHARatisRoles() throws Exception { + GetScmRatisRolesSubcommand cmd = new GetScmRatisRolesSubcommand(); + ScmClient client = mock(ScmClient.class); + CommandLine c = new CommandLine(cmd); + c.parseArgs("--table"); + + List result = new ArrayList<>(); + result.add("bigdata-ozone-online31:9894:FOLLOWER:61b1c8e5-da40-4567-8a17-96a0234ba14e:100.3.197.98"); + result.add("bigdata-ozone-online32:9894:LEADER:e428ca07-b2a3-4756-bf9b-a4abb033c7d1:100.3.192.89"); + result.add("bigdata-ozone-online30:9894:FOLLOWER:41f90734-b3ee-4284-ad96-40a286654952:100.3.196.51"); + + when(client.getScmRatisRoles()).thenAnswer(invocation -> result); + when(client.isScmRatisEnable()).thenAnswer(invocation -> true); + + try (GenericTestUtils.SystemOutCapturer capture = + new GenericTestUtils.SystemOutCapturer()) { + cmd.execute(client); + assertThat(capture.getOutput()).contains( + "bigdata-ozone-online31 | 9894 | FOLLOWER | 61b1c8e5-da40-4567-8a17-96a0234ba14e"); + assertThat(capture.getOutput()).contains( + "bigdata-ozone-online32 | 9894 | LEADER | e428ca07-b2a3-4756-bf9b-a4abb033c7d1"); + assertThat(capture.getOutput()).contains( + "bigdata-ozone-online30 | 9894 | FOLLOWER | 41f90734-b3ee-4284-ad96-40a286654952"); + } + } + + @Test + public void testGetScmStandAloneRoles() throws Exception { + + GetScmRatisRolesSubcommand cmd = new GetScmRatisRolesSubcommand(); + ScmClient client = mock(ScmClient.class); + CommandLine c = new CommandLine(cmd); + c.parseArgs("--table"); + + List result = new ArrayList<>(); + result.add("bigdata-ozone-online31:9894"); + + when(client.getScmRatisRoles()).thenAnswer(invocation -> result); + when(client.isScmRatisEnable()).thenAnswer(invocation -> false); + + try (GenericTestUtils.SystemOutCapturer capture = + new GenericTestUtils.SystemOutCapturer()) { + cmd.execute(client); + assertThat(capture.getOutput()).contains("| bigdata-ozone-online31 | 9894 |"); + } + } +} diff --git a/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneAddressClientCreation.java b/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneAddressClientCreation.java index 620142c244b..7ebb449bff2 100644 --- a/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneAddressClientCreation.java +++ b/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneAddressClientCreation.java @@ -42,7 +42,7 @@ public class TestOzoneAddressClientCreation { @Test - public void implicitNonHA() throws OzoneClientException, IOException { + public void implicitNonHA() throws IOException { TestableOzoneAddress address = new TestableOzoneAddress("/vol1/bucket1/key1"); address.createClient(new InMemoryConfiguration()); @@ -51,7 +51,7 @@ public void implicitNonHA() throws OzoneClientException, IOException { @Test public void implicitHAOneServiceId() - throws OzoneClientException, IOException { + throws IOException { TestableOzoneAddress address = new TestableOzoneAddress("/vol1/bucket1/key1"); address.createClient( @@ -62,7 +62,7 @@ public void implicitHAOneServiceId() @Test public void implicitHaMultipleServiceId() - throws OzoneClientException, IOException { + throws IOException { TestableOzoneAddress address = new TestableOzoneAddress("/vol1/bucket1/key1"); assertThrows(OzoneClientException.class, () -> @@ -72,7 +72,7 @@ public void implicitHaMultipleServiceId() @Test public void implicitHaMultipleServiceIdWithDefaultServiceId() - throws OzoneClientException, IOException { + throws IOException { TestableOzoneAddress address = new TestableOzoneAddress("/vol1/bucket1/key1"); InMemoryConfiguration conf = new InMemoryConfiguration(OZONE_OM_SERVICE_IDS_KEY, @@ -86,7 +86,7 @@ public void implicitHaMultipleServiceIdWithDefaultServiceId() @Test public void implicitHaMultipleServiceIdWithDefaultServiceIdForS3() - throws OzoneClientException, IOException { + throws IOException { TestableOzoneAddress address = new TestableOzoneAddress("/vol1/bucket1/key1"); OzoneConfiguration conf = new OzoneConfiguration(); @@ -100,7 +100,7 @@ public void implicitHaMultipleServiceIdWithDefaultServiceIdForS3() @Test public void explicitHaMultipleServiceId() - throws OzoneClientException, IOException { + throws IOException { TestableOzoneAddress address = new TestableOzoneAddress("o3://service1/vol1/bucket1/key1"); address.createClient( @@ -111,7 +111,7 @@ public void explicitHaMultipleServiceId() } @Test - public void explicitNonHAHostPort() throws OzoneClientException, IOException { + public void explicitNonHAHostPort() throws IOException { TestableOzoneAddress address = new TestableOzoneAddress("o3://om:9862/vol1/bucket1/key1"); address.createClient(new InMemoryConfiguration()); @@ -122,7 +122,7 @@ public void explicitNonHAHostPort() throws OzoneClientException, IOException { @Test public void explicitHAHostPortWithServiceId() - throws OzoneClientException, IOException { + throws IOException { TestableOzoneAddress address = new TestableOzoneAddress("o3://om:9862/vol1/bucket1/key1"); address.createClient( @@ -134,7 +134,7 @@ public void explicitHAHostPortWithServiceId() @Test public void explicitAHostPortWithServiceIds() - throws OzoneClientException, IOException { + throws IOException { TestableOzoneAddress address = new TestableOzoneAddress("o3://om:9862/vol1/bucket1/key1"); address.createClient( @@ -146,7 +146,7 @@ public void explicitAHostPortWithServiceIds() } @Test - public void explicitNonHAHost() throws OzoneClientException, IOException { + public void explicitNonHAHost() throws IOException { TestableOzoneAddress address = new TestableOzoneAddress("o3://om/vol1/bucket1/key1"); address.createClient( @@ -156,7 +156,7 @@ public void explicitNonHAHost() throws OzoneClientException, IOException { } @Test - public void explicitHAHostPort() throws OzoneClientException, IOException { + public void explicitHAHostPort() throws IOException { TestableOzoneAddress address = new TestableOzoneAddress("o3://om:1234/vol1/bucket1/key1"); address.createClient(new InMemoryConfiguration()); @@ -166,7 +166,7 @@ public void explicitHAHostPort() throws OzoneClientException, IOException { } @Test - public void explicitWrongScheme() throws OzoneClientException, IOException { + public void explicitWrongScheme() throws IOException { TestableOzoneAddress address = new TestableOzoneAddress("ssh://host/vol1/bucket1/key1"); assertThrows(OzoneClientException.class, () -> diff --git a/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/shell/keys/TestChecksumKeyHandler.java b/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/shell/keys/TestChecksumKeyHandler.java index d4fa929614f..3b22573eb13 100644 --- a/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/shell/keys/TestChecksumKeyHandler.java +++ b/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/shell/keys/TestChecksumKeyHandler.java @@ -24,7 +24,6 @@ import org.apache.hadoop.ozone.client.ObjectStore; import org.apache.hadoop.ozone.client.OzoneBucket; import org.apache.hadoop.ozone.client.OzoneClient; -import org.apache.hadoop.ozone.client.OzoneClientException; import org.apache.hadoop.ozone.client.OzoneKeyDetails; import org.apache.hadoop.ozone.client.OzoneVolume; import org.apache.hadoop.ozone.client.protocol.ClientProtocol; @@ -85,7 +84,7 @@ public void tearDown() { @Test public void testChecksumKeyHandler() - throws OzoneClientException, IOException { + throws IOException { OzoneAddress address = new OzoneAddress("o3://ozone1/volume/bucket/key"); long keySize = 1024L; diff --git a/pom.xml b/pom.xml index a35ac576e03..d59fb86ffd9 100644 --- a/pom.xml +++ b/pom.xml @@ -18,7 +18,7 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs 4.0.0 org.apache.ozone ozone-main - 1.5.0-SNAPSHOT + 2.0.0-SNAPSHOT Apache Ozone Main Apache Ozone Main pom @@ -44,9 +44,28 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs - ${distMgmtSnapshotsId} - ${distMgmtSnapshotsName} - ${distMgmtSnapshotsUrl} + apache.snapshots + https://repository.apache.org/snapshots + + false + never + + + false + never + + + + apache.snapshots.https + https://repository.apache.org/content/repositories/snapshots + + false + never + + + false + never + @@ -63,12 +82,15 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs + + 2023-01-01T00:00:00Z + 2.10.2 - 3.3.6 + 3.4.1 ${ozone.version} - 1.5.0-SNAPSHOT + 2.0.0-SNAPSHOT Indiana Dunes ${hdds.version} ${ozone.version} @@ -76,7 +98,7 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs ${hdds.version} - 3.1.1 + 3.1.2 1.0.6 @@ -95,7 +117,9 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs UTF-8 UTF-8 - 3.2.4 + true + + 3.2.7 bash false @@ -111,14 +135,14 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs 1.9.4 1.8.0 - 1.17.0 + 1.17.1 3.2.2 1.27.1 2.11.0 - 1.5.6-4 + 1.5.6-8 1.4.0 - 2.16.1 - 3.14.0 + 2.18.0 + 3.17.0 1.2 1.1 3.6.1 @@ -136,10 +160,10 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs 1.3.2 3.26.3 0.8.0.RELEASE - 1.78.1 + 1.79 10.14.2.0 3.0.2 - 2.8.0 + 2.9.1 3.2.6 0.8.12 3.30.2-GA @@ -147,7 +171,7 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs 2.3.3 2.3.9 0.10.4 - 3.1.19 + 3.1.20 0.1.55 2.0 3.1.0 @@ -176,7 +200,7 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs 2.0.16 - 2.24.0 + 2.24.2 3.4.4 1.2.25 @@ -193,11 +217,11 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs 2.5.0 - 3.7.1 - 1.1.1 + 3.23.4 + 1.3.0 3.1.12.2 - 2.1.9 + 3.6.1 4.12.0 4.2.2 2.6.1 @@ -214,11 +238,9 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs 6.0.0 2.10.1 - 2.7.5 - 3.6.0 4.11.0 2.2 - 5.10.3 + 5.11.3 3.8.4 @@ -232,7 +254,7 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs 1.58.0 7.7.3 - 3.46.1.3 + 3.47.1.0 3.1.9.Final @@ -257,7 +279,7 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs 3.4.0 3.9.0 3.1.3 - 3.1.0 + 3.3.0 3.6.0 3.4.2 3.4.0 @@ -265,25 +287,25 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs 1.6.1 1.7.0 3.5.0 - 3.10.0 + 3.11.1 3.7.1 0.16.1 3.1.3 3.6.0 - 3.7.1 + 3.8.1 4.2.2 - 0.45.0 - 3.4.1 - 2.4.0 + 0.45.1 + 3.5.0 + 2.5.0 1.0-beta-1 1.0-M1 - 3.4.0 - 3.20.0 + 3.6.0 + 3.21.0 3.1.0 9.3 1200 1.12.661 - 1.15.0 + 1.15.1 ${hadoop.version} @@ -294,10 +316,11 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs ${basedir}/target/classes 1.9.7 - 1.15.0 - 2.5.0 + 1.14 + 2.6.0 1.4.0 3.9.12 + 3.28.0 5.3.39 3.11.10 @@ -309,28 +332,27 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs 1.2.1 3.9.9 - 1.1.10.6 - 1.2.0 + 1.1.10.7 9.40 - com.squareup.okio - okio - ${okio.version} - - - com.squareup.okhttp - okhttp - ${okhttp.version} + info.picocli + picocli + ${picocli.version} info.picocli - picocli + picocli-shell-jline3 ${picocli.version} + + org.jline + jline + ${jline.version} + org.apache.derby derby @@ -1030,7 +1052,7 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs org.apache.hadoop.thirdparty - hadoop-shaded-protobuf_3_7 + hadoop-shaded-protobuf_3_25 ${hadoop-thirdparty.version} @@ -1308,11 +1330,6 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs snappy-java ${snappy-java.version} - - org.apache.hadoop.thirdparty - hadoop-shaded-guava - ${hadoop-shaded-guava.version} - com.github.vlsi.mxgraph jgraphx @@ -1428,6 +1445,7 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs ${maven-javadoc-plugin.version} none + true @@ -1703,6 +1721,38 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs + + org.apache.maven.plugins + maven-dependency-plugin + + + add-classpath-descriptor + prepare-package + + build-classpath + + + ${project.build.outputDirectory}/${project.artifactId}.classpath + $HDDS_LIB_JARS_DIR + true + runtime + ${classpath.skip} + + + + copy-jars + prepare-package + + copy-dependencies + + + ${project.build.directory}/share/ozone/lib + runtime + ${classpath.skip} + + + + maven-clean-plugin @@ -1943,7 +1993,7 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs package - makeAggregateBom + makeBom @@ -2075,12 +2125,26 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs void true - true + true true true true + + + skip-frontend + + + skipRecon + + + + true + true + + + rocks-native-tests