diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index bd0a12edd938..ace3a7e15b98 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -28,8 +28,10 @@ on: required: false env: FAIL_FAST: ${{ github.event_name == 'pull_request' }} + # Minimum required Java version for running Ozone is defined in pom.xml (javac.version). + TEST_JAVA_VERSION: 17 # JDK version used by CI build and tests; should match the JDK version in apache/ozone-runner image MAVEN_OPTS: -Dhttp.keepAlive=false -Dmaven.wagon.http.pool=false -Dmaven.wagon.http.retryHandler.class=standard -Dmaven.wagon.http.retryHandler.count=3 - OZONE_WITH_COVERAGE: ${{ github.repository == 'apache/ozone' && github.event_name == 'push' }} + OZONE_WITH_COVERAGE: ${{ github.event_name == 'push' }} jobs: build-info: runs-on: ubuntu-20.04 @@ -102,10 +104,6 @@ jobs: runs-on: ubuntu-20.04 timeout-minutes: 60 if: needs.build-info.outputs.needs-build == 'true' - strategy: - matrix: - java: [ 8 ] - fail-fast: false steps: - name: Checkout project uses: actions/checkout@v4 @@ -136,11 +134,11 @@ jobs: name: ratis-jars path: | ~/.m2/repository/org/apache/ratis - - name: Setup java + - name: Setup java ${{ env.TEST_JAVA_VERSION }} uses: actions/setup-java@v4 with: distribution: 'temurin' - java-version: ${{ matrix.java }} + java-version: ${{ env.TEST_JAVA_VERSION }} - name: Run a full build run: hadoop-ozone/dev-support/checks/build.sh -Pdist -Psrc -Dmaven.javadoc.skip=true ${{ inputs.ratis_args }} env: @@ -155,6 +153,7 @@ jobs: retention-days: 1 - name: Store source tarball for compilation uses: actions/upload-artifact@v4 + if: needs.build-info.outputs.needs-compile == 'true' with: name: ozone-src path: hadoop-ozone/dist/target/ozone-*-src.tar.gz @@ -171,6 +170,8 @@ jobs: - build-info - build - basic + - dependency + - license timeout-minutes: 45 if: needs.build-info.outputs.needs-compile == 'true' strategy: @@ -179,7 +180,7 @@ jobs: include: - os: ubuntu-20.04 - java: 8 - os: macos-12 + os: macos-13 fail-fast: false runs-on: ${{ matrix.os }} steps: @@ -212,13 +213,13 @@ jobs: name: ratis-jars path: | ~/.m2/repository/org/apache/ratis - - name: Setup java + - name: Setup java ${{ matrix.java }} uses: actions/setup-java@v4 with: distribution: 'temurin' java-version: ${{ matrix.java }} - name: Compile Ozone using Java ${{ matrix.java }} - run: hadoop-ozone/dev-support/checks/build.sh -Pdist -Dskip.npx -Dskip.installnpx -Djavac.version=${{ matrix.java }} ${{ inputs.ratis_args }} + run: hadoop-ozone/dev-support/checks/build.sh -Pdist -Dskip.npx -Dskip.installnpx -Dmaven.javadoc.failOnWarnings=${{ matrix.java != 8 }} -Djavac.version=${{ matrix.java }} ${{ inputs.ratis_args }} env: OZONE_WITH_COVERAGE: false DEVELOCITY_ACCESS_KEY: ${{ secrets.GE_ACCESS_TOKEN }} @@ -261,7 +262,7 @@ jobs: name: ratis-jars path: | ~/.m2/repository/org/apache/ratis - - name: Setup java + - name: Setup java 8 uses: actions/setup-java@v4 with: distribution: 'temurin' @@ -309,11 +310,11 @@ jobs: name: ratis-jars path: | ~/.m2/repository/org/apache/ratis - - name: Setup java + - name: Setup java ${{ env.TEST_JAVA_VERSION }} uses: actions/setup-java@v4 with: distribution: 'temurin' - java-version: 8 + java-version: ${{ env.TEST_JAVA_VERSION }} - name: Execute tests run: hadoop-ozone/dev-support/checks/${{ github.job }}.sh ${{ inputs.ratis_args }} continue-on-error: true @@ -388,6 +389,11 @@ jobs: name: ozone-repo path: | ~/.m2/repository/org/apache/ozone + - name: Setup java ${{ env.TEST_JAVA_VERSION }} + uses: actions/setup-java@v4 + with: + distribution: 'temurin' + java-version: ${{ env.TEST_JAVA_VERSION }} - name: Execute tests run: | hadoop-ozone/dev-support/checks/${{ github.job }}.sh @@ -402,11 +408,73 @@ jobs: name: ${{ github.job }} path: target/${{ github.job }} continue-on-error: true + repro: + needs: + - build-info + - build + runs-on: ubuntu-20.04 + timeout-minutes: 30 + steps: + - name: Checkout project + uses: actions/checkout@v4 + - name: Cache for maven dependencies + uses: actions/cache/restore@v4 + with: + path: | + ~/.m2/repository/*/*/* + !~/.m2/repository/org/apache/ozone + key: maven-repo-${{ hashFiles('**/pom.xml') }} + restore-keys: | + maven-repo- + - name: Download Ozone repo + id: download-ozone-repo + uses: actions/download-artifact@v4 + with: + name: ozone-repo + path: | + ~/.m2/repository/org/apache/ozone + - name: Download Ratis repo + if: ${{ inputs.ratis_args != '' }} + uses: actions/download-artifact@v4 + with: + name: ratis-jars + path: | + ~/.m2/repository/org/apache/ratis + - name: Setup java ${{ env.TEST_JAVA_VERSION }} + uses: actions/setup-java@v4 + with: + distribution: 'temurin' + java-version: ${{ env.TEST_JAVA_VERSION }} + - name: Execute tests + run: | + hadoop-ozone/dev-support/checks/${{ github.job }}.sh -Pdist -Psrc -Dmaven.javadoc.skip=true ${{ inputs.ratis_args }} + continue-on-error: true + - name: Summary of failures + run: hadoop-ozone/dev-support/checks/_summary.sh target/${{ github.job }}/summary.txt + if: ${{ !cancelled() }} + - name: Install diffoscope + run: | + sudo apt update -q + sudo apt install -y diffoscope + if: ${{ failure() }} + - name: Check artifact differences + run: | + hadoop-ozone/dev-support/checks/_diffoscope.sh + if: ${{ failure() }} + - name: Archive build results + uses: actions/upload-artifact@v4 + if: always() + with: + name: ${{ github.job }} + path: target/${{ github.job }} + continue-on-error: true acceptance: needs: - build-info - build - basic + - dependency + - license runs-on: ubuntu-20.04 timeout-minutes: 150 if: needs.build-info.outputs.needs-compose-tests == 'true' @@ -454,6 +522,8 @@ jobs: - build-info - build - basic + - dependency + - license runs-on: ubuntu-20.04 timeout-minutes: 60 if: needs.build-info.outputs.needs-kubernetes-tests == 'true' @@ -529,11 +599,11 @@ jobs: name: ratis-jars path: | ~/.m2/repository/org/apache/ratis - - name: Setup java + - name: Setup java ${{ env.TEST_JAVA_VERSION }} uses: actions/setup-java@v4 with: distribution: 'temurin' - java-version: 17 + java-version: ${{ env.TEST_JAVA_VERSION }} - name: Execute tests continue-on-error: true run: | @@ -546,7 +616,11 @@ jobs: env: DEVELOCITY_ACCESS_KEY: ${{ secrets.GE_ACCESS_TOKEN }} - name: Summary of failures - run: hadoop-ozone/dev-support/checks/_summary.sh target/${{ github.job }}/summary.txt + run: | + if [[ -s "target/${{ github.job }}/summary.md" ]]; then + cat target/${{ github.job }}/summary.md >> $GITHUB_STEP_SUMMARY + fi + hadoop-ozone/dev-support/checks/_summary.sh target/${{ github.job }}/summary.txt if: ${{ !cancelled() }} - name: Archive build results uses: actions/upload-artifact@v4 @@ -558,7 +632,7 @@ jobs: coverage: runs-on: ubuntu-20.04 timeout-minutes: 30 - if: github.repository == 'apache/ozone' && github.event_name == 'push' + if: github.event_name == 'push' needs: - build-info - acceptance @@ -587,15 +661,16 @@ jobs: run: | mkdir -p hadoop-ozone/dist/target tar xzvf target/artifacts/ozone-bin/ozone*.tar.gz -C hadoop-ozone/dist/target - - name: Calculate combined coverage - run: ./hadoop-ozone/dev-support/checks/coverage.sh - - name: Setup java 17 + - name: Setup java ${{ env.TEST_JAVA_VERSION }} uses: actions/setup-java@v4 with: distribution: 'temurin' - java-version: 17 + java-version: ${{ env.TEST_JAVA_VERSION }} + - name: Calculate combined coverage + run: ./hadoop-ozone/dev-support/checks/coverage.sh - name: Upload coverage to Sonar run: ./hadoop-ozone/dev-support/checks/sonar.sh + if: github.repository == 'apache/ozone' env: SONAR_TOKEN: ${{ secrets.SONARCLOUD_TOKEN }} GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} diff --git a/.mvn/extensions.xml b/.mvn/extensions.xml index 20f1c034c584..2f7ddb6ad2a2 100644 --- a/.mvn/extensions.xml +++ b/.mvn/extensions.xml @@ -29,6 +29,6 @@ com.gradle common-custom-user-data-maven-extension - 2.0 + 2.0.1 diff --git a/dev-support/ci/selective_ci_checks.bats b/dev-support/ci/selective_ci_checks.bats index a95a981bdd3e..6edd38d68fe8 100644 --- a/dev-support/ci/selective_ci_checks.bats +++ b/dev-support/ci/selective_ci_checks.bats @@ -52,7 +52,7 @@ load bats-assert/load.bash assert_output -p needs-build=true assert_output -p needs-compile=false assert_output -p needs-compose-tests=true - assert_output -p needs-dependency-check=false + assert_output -p needs-dependency-check=true assert_output -p needs-integration-tests=false assert_output -p needs-kubernetes-tests=false } @@ -76,7 +76,7 @@ load bats-assert/load.bash assert_output -p needs-build=true assert_output -p needs-compile=false assert_output -p needs-compose-tests=true - assert_output -p needs-dependency-check=false + assert_output -p needs-dependency-check=true assert_output -p needs-integration-tests=false assert_output -p needs-kubernetes-tests=true } @@ -100,7 +100,7 @@ load bats-assert/load.bash assert_output -p needs-build=true assert_output -p needs-compile=false assert_output -p needs-compose-tests=true - assert_output -p needs-dependency-check=false + assert_output -p needs-dependency-check=true assert_output -p needs-integration-tests=false assert_output -p needs-kubernetes-tests=true } @@ -112,7 +112,7 @@ load bats-assert/load.bash assert_output -p needs-build=true assert_output -p needs-compile=true assert_output -p needs-compose-tests=false - assert_output -p needs-dependency-check=false + assert_output -p needs-dependency-check=true assert_output -p needs-integration-tests=true assert_output -p needs-kubernetes-tests=false } @@ -136,7 +136,7 @@ load bats-assert/load.bash assert_output -p needs-build=true assert_output -p needs-compile=true assert_output -p needs-compose-tests=false - assert_output -p needs-dependency-check=false + assert_output -p needs-dependency-check=true assert_output -p needs-integration-tests=true assert_output -p needs-kubernetes-tests=false } @@ -148,7 +148,7 @@ load bats-assert/load.bash assert_output -p needs-build=true assert_output -p needs-compile=true assert_output -p needs-compose-tests=false - assert_output -p needs-dependency-check=false + assert_output -p needs-dependency-check=true assert_output -p needs-integration-tests=true assert_output -p needs-kubernetes-tests=false } @@ -160,7 +160,7 @@ load bats-assert/load.bash assert_output -p needs-build=true assert_output -p needs-compile=true assert_output -p needs-compose-tests=false - assert_output -p needs-dependency-check=false + assert_output -p needs-dependency-check=true assert_output -p needs-integration-tests=true assert_output -p needs-kubernetes-tests=false } @@ -172,7 +172,7 @@ load bats-assert/load.bash assert_output -p needs-build=true assert_output -p needs-compile=true assert_output -p needs-compose-tests=false - assert_output -p needs-dependency-check=false + assert_output -p needs-dependency-check=true assert_output -p needs-integration-tests=true assert_output -p needs-kubernetes-tests=false } @@ -184,7 +184,7 @@ load bats-assert/load.bash assert_output -p needs-build=true assert_output -p needs-compile=true assert_output -p needs-compose-tests=false - assert_output -p needs-dependency-check=false + assert_output -p needs-dependency-check=true assert_output -p needs-integration-tests=false assert_output -p needs-kubernetes-tests=false } @@ -196,7 +196,7 @@ load bats-assert/load.bash assert_output -p needs-build=true assert_output -p needs-compile=true assert_output -p needs-compose-tests=false - assert_output -p needs-dependency-check=false + assert_output -p needs-dependency-check=true assert_output -p needs-integration-tests=false assert_output -p needs-kubernetes-tests=false } @@ -208,7 +208,7 @@ load bats-assert/load.bash assert_output -p needs-build=true assert_output -p needs-compile=false assert_output -p needs-compose-tests=false - assert_output -p needs-dependency-check=false + assert_output -p needs-dependency-check=true assert_output -p needs-integration-tests=false assert_output -p needs-kubernetes-tests=true } @@ -232,7 +232,7 @@ load bats-assert/load.bash assert_output -p needs-build=true assert_output -p needs-compile=true assert_output -p needs-compose-tests=true - assert_output -p needs-dependency-check=false + assert_output -p needs-dependency-check=true assert_output -p needs-integration-tests=true assert_output -p needs-kubernetes-tests=true } @@ -244,7 +244,7 @@ load bats-assert/load.bash assert_output -p needs-build=true assert_output -p needs-compile=true assert_output -p needs-compose-tests=true - assert_output -p needs-dependency-check=false + assert_output -p needs-dependency-check=true assert_output -p needs-integration-tests=true assert_output -p needs-kubernetes-tests=true } @@ -256,7 +256,7 @@ load bats-assert/load.bash assert_output -p needs-build=true assert_output -p needs-compile=true assert_output -p needs-compose-tests=true - assert_output -p needs-dependency-check=false + assert_output -p needs-dependency-check=true assert_output -p needs-integration-tests=true assert_output -p needs-kubernetes-tests=true } @@ -268,7 +268,7 @@ load bats-assert/load.bash assert_output -p needs-build=true assert_output -p needs-compile=true assert_output -p needs-compose-tests=true - assert_output -p needs-dependency-check=false + assert_output -p needs-dependency-check=true assert_output -p needs-integration-tests=true assert_output -p needs-kubernetes-tests=true } @@ -429,3 +429,15 @@ load bats-assert/load.bash assert_output -p needs-integration-tests=false assert_output -p needs-kubernetes-tests=false } + +@test "properties file in resources" { + run dev-support/ci/selective_ci_checks.sh 71b8bdd8becf72d6f7d4e7986895504b8259b3e5 + + assert_output -p 'basic-checks=["rat","checkstyle","native"]' + assert_output -p needs-build=false + assert_output -p needs-compile=false + assert_output -p needs-compose-tests=false + assert_output -p needs-dependency-check=false + assert_output -p needs-integration-tests=true + assert_output -p needs-kubernetes-tests=false +} diff --git a/dev-support/ci/selective_ci_checks.sh b/dev-support/ci/selective_ci_checks.sh index e512b4a5d626..869d36fc6ccd 100755 --- a/dev-support/ci/selective_ci_checks.sh +++ b/dev-support/ci/selective_ci_checks.sh @@ -330,6 +330,7 @@ function check_needs_compile() { if [[ ${match_count} != "0" ]]; then compile_needed=true + dependency_check_needed=true fi start_end::group_end @@ -373,6 +374,7 @@ function check_needs_checkstyle() { "^hadoop-hdds/dev-support/checkstyle" "pom.xml" "src/..../java" + "src/..../resources/.*\.properties" ) local ignore_array=( "^hadoop-ozone/dist" @@ -519,6 +521,7 @@ function calculate_test_types_to_run() { echo "Looks like ${COUNT_CORE_OTHER_CHANGED_FILES} core files changed, running all tests." echo compose_tests_needed=true + dependency_check_needed=true integration_tests_needed=true kubernetes_tests_needed=true else @@ -526,12 +529,14 @@ function calculate_test_types_to_run() { echo if [[ ${COUNT_COMPOSE_CHANGED_FILES} != "0" ]] || [[ ${COUNT_ROBOT_CHANGED_FILES} != "0" ]]; then compose_tests_needed="true" + dependency_check_needed=true fi if [[ ${COUNT_INTEGRATION_CHANGED_FILES} != "0" ]]; then integration_tests_needed="true" fi if [[ ${COUNT_KUBERNETES_CHANGED_FILES} != "0" ]] || [[ ${COUNT_ROBOT_CHANGED_FILES} != "0" ]]; then kubernetes_tests_needed="true" + dependency_check_needed=true fi fi start_end::group_end @@ -589,6 +594,7 @@ get_count_robot_files get_count_misc_files check_needs_build +check_needs_dependency check_needs_compile # calculate basic checks to run @@ -596,7 +602,6 @@ BASIC_CHECKS="rat" check_needs_author check_needs_bats check_needs_checkstyle -check_needs_dependency check_needs_docs check_needs_findbugs check_needs_native diff --git a/dev-support/pom.xml b/dev-support/pom.xml index e11e3b32ee44..bc39cd3437e4 100644 --- a/dev-support/pom.xml +++ b/dev-support/pom.xml @@ -18,7 +18,7 @@ ozone-main org.apache.ozone - 1.5.0-SNAPSHOT + 2.0.0-SNAPSHOT 4.0.0 ozone-dev-support diff --git a/hadoop-hdds/annotations/pom.xml b/hadoop-hdds/annotations/pom.xml index 3bb148d5c255..0a9610870406 100644 --- a/hadoop-hdds/annotations/pom.xml +++ b/hadoop-hdds/annotations/pom.xml @@ -20,11 +20,11 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> org.apache.ozone hdds - 1.5.0-SNAPSHOT + 2.0.0-SNAPSHOT hdds-annotation-processing - 1.5.0-SNAPSHOT + 2.0.0-SNAPSHOT Apache Ozone annotation processing tools for validating custom annotations at compile time. diff --git a/hadoop-hdds/client/pom.xml b/hadoop-hdds/client/pom.xml index bf728403cb41..333b960fc243 100644 --- a/hadoop-hdds/client/pom.xml +++ b/hadoop-hdds/client/pom.xml @@ -20,11 +20,11 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> org.apache.ozone hdds - 1.5.0-SNAPSHOT + 2.0.0-SNAPSHOT hdds-client - 1.5.0-SNAPSHOT + 2.0.0-SNAPSHOT Apache Ozone Distributed Data Store Client Library Apache Ozone HDDS Client jar diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockDataStreamOutput.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockDataStreamOutput.java index d5423d4ec0bb..48c77f2c8634 100644 --- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockDataStreamOutput.java +++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockDataStreamOutput.java @@ -364,7 +364,6 @@ public void writeOnRetry(long len) throws IOException { * it is a no op. * @param bufferFull flag indicating whether bufferFull condition is hit or * its called as part flush/close - * @return minimum commit index replicated to all nodes * @throws IOException IOException in case watch gets timed out */ public void watchForCommit(boolean bufferFull) throws IOException { diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockInputStream.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockInputStream.java index f792a678dad4..34fb728be950 100644 --- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockInputStream.java +++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockInputStream.java @@ -274,16 +274,6 @@ protected BlockData getBlockDataUsingClient() throws IOException { blockID); } - DatanodeBlockID.Builder blkIDBuilder = - DatanodeBlockID.newBuilder().setContainerID(blockID.getContainerID()) - .setLocalID(blockID.getLocalID()) - .setBlockCommitSequenceId(blockID.getBlockCommitSequenceId()); - - int replicaIndex = pipeline.getReplicaIndex(pipeline.getClosestNode()); - if (replicaIndex > 0) { - blkIDBuilder.setReplicaIndex(replicaIndex); - } - GetBlockResponseProto response = ContainerProtocolCalls.getBlock( xceiverClient, VALIDATORS, blockID, tokenRef.get(), pipeline.getReplicaIndexes()); return response.getBlockData(); diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/ExtendedInputStream.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/ExtendedInputStream.java index 3e78abbf485a..e3f7f043a9ed 100644 --- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/ExtendedInputStream.java +++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/ExtendedInputStream.java @@ -75,7 +75,6 @@ public synchronized int read(ByteBuffer byteBuffer) throws IOException { * readWithStrategy implementation, as it will never be called by the tests. * * @param strategy - * @return * @throws IOException */ protected abstract int readWithStrategy(ByteReaderStrategy strategy) throws diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/ozone/client/io/BlockInputStreamFactory.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/ozone/client/io/BlockInputStreamFactory.java index d347dee85121..8287a5a78bb2 100644 --- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/ozone/client/io/BlockInputStreamFactory.java +++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/ozone/client/io/BlockInputStreamFactory.java @@ -43,7 +43,6 @@ public interface BlockInputStreamFactory { * @param blockInfo The blockInfo representing the block. * @param pipeline The pipeline to be used for reading the block * @param token The block Access Token - * @param verifyChecksum Whether to verify checksums or not. * @param xceiverFactory Factory to create the xceiver in the client * @param refreshFunction Function to refresh the block location if needed * @return BlockExtendedInputStream of the correct type. diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/ozone/client/io/BlockInputStreamFactoryImpl.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/ozone/client/io/BlockInputStreamFactoryImpl.java index 8a87234a7707..d9cadc948a61 100644 --- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/ozone/client/io/BlockInputStreamFactoryImpl.java +++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/ozone/client/io/BlockInputStreamFactoryImpl.java @@ -71,7 +71,6 @@ public BlockInputStreamFactoryImpl(ByteBufferPool byteBufferPool, * @param blockInfo The blockInfo representing the block. * @param pipeline The pipeline to be used for reading the block * @param token The block Access Token - * @param verifyChecksum Whether to verify checksums or not. * @param xceiverFactory Factory to create the xceiver in the client * @param refreshFunction Function to refresh the pipeline if needed * @return BlockExtendedInputStream of the correct type. diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/ozone/client/io/ECBlockInputStream.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/ozone/client/io/ECBlockInputStream.java index 6342de2c3381..83abb937b03e 100644 --- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/ozone/client/io/ECBlockInputStream.java +++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/ozone/client/io/ECBlockInputStream.java @@ -152,7 +152,6 @@ protected int calculateExpectedDataBlocks(ECReplicationConfig rConfig) { * Using the current position, returns the index of the blockStream we should * be reading from. This is the index in the internal array holding the * stream reference. The block group index will be one greater than this. - * @return */ protected int currentStreamIndex() { return (int)((position / ecChunkSize) % repConfig.getData()); @@ -206,7 +205,6 @@ protected BlockExtendedInputStream getOrOpenStream(int locationIndex) throws IOE * to the replicaIndex given based on the EC pipeline fetched from SCM. * @param replicaIndex * @param refreshFunc - * @return */ protected Function ecPipelineRefreshFunction( int replicaIndex, Function refreshFunc) { @@ -241,7 +239,6 @@ protected Function ecPipelineRefreshFunction( * potentially partial last stripe. Note that the internal block index is * numbered starting from 1. * @param index - Index number of the internal block, starting from 1 - * @return */ protected long internalBlockLength(int index) { long lastStripe = blockInfo.getLength() % stripeSize; @@ -344,7 +341,6 @@ protected boolean shouldRetryFailedRead(int failedIndex) { * strategy buffer. This call may read from several internal BlockInputStreams * if there is sufficient space in the buffer. * @param strategy - * @return * @throws IOException */ @Override @@ -409,7 +405,6 @@ protected void seekStreamIfNecessary(BlockExtendedInputStream stream, * group length. * @param stream Stream to read from * @param strategy The ReaderStrategy to read data into - * @return * @throws IOException */ private int readFromStream(BlockExtendedInputStream stream, diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/ozone/client/io/ECBlockInputStreamFactory.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/ozone/client/io/ECBlockInputStreamFactory.java index 66e7a31337a6..aca3cfed465f 100644 --- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/ozone/client/io/ECBlockInputStreamFactory.java +++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/ozone/client/io/ECBlockInputStreamFactory.java @@ -45,7 +45,6 @@ public interface ECBlockInputStreamFactory { * know are bad and should not be used. * @param repConfig The replication Config * @param blockInfo The blockInfo representing the block. - * @param verifyChecksum Whether to verify checksums or not. * @param xceiverFactory Factory to create the xceiver in the client * @param refreshFunction Function to refresh the block location if needed * @return BlockExtendedInputStream of the correct type. diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/ozone/client/io/ECBlockInputStreamFactoryImpl.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/ozone/client/io/ECBlockInputStreamFactoryImpl.java index 01d0b0a7b7e8..41c46aad379c 100644 --- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/ozone/client/io/ECBlockInputStreamFactoryImpl.java +++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/ozone/client/io/ECBlockInputStreamFactoryImpl.java @@ -68,7 +68,6 @@ private ECBlockInputStreamFactoryImpl(BlockInputStreamFactory streamFactory, * know are bad and should not be used. * @param repConfig The replication Config * @param blockInfo The blockInfo representing the block. - * @param verifyChecksum Whether to verify checksums or not. * @param xceiverFactory Factory to create the xceiver in the client * @param refreshFunction Function to refresh the pipeline if needed * @return BlockExtendedInputStream of the correct type. diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/ozone/client/io/ECBlockReconstructedStripeInputStream.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/ozone/client/io/ECBlockReconstructedStripeInputStream.java index 31f94e0acad6..229cc3f3e36e 100644 --- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/ozone/client/io/ECBlockReconstructedStripeInputStream.java +++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/ozone/client/io/ECBlockReconstructedStripeInputStream.java @@ -85,7 +85,7 @@ * Parity elements long. Missing or not needed elements should be set to null * in the array. The elements should be assigned to the array in EC index order. * - * Assuming we have n missing data locations, where n <= parity locations, the + * Assuming we have n missing data locations, where n {@literal <=} parity locations, the * ByteBuffers passed in from the client are either assigned to the decoder * input array, or they are assigned to the decoder output array, where * reconstructed data is written. The required number of parity buffers will be diff --git a/hadoop-hdds/common/pom.xml b/hadoop-hdds/common/pom.xml index 29cb513bb6fc..f2576f7cf08b 100644 --- a/hadoop-hdds/common/pom.xml +++ b/hadoop-hdds/common/pom.xml @@ -20,10 +20,10 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> org.apache.ozone hdds - 1.5.0-SNAPSHOT + 2.0.0-SNAPSHOT hdds-common - 1.5.0-SNAPSHOT + 2.0.0-SNAPSHOT Apache Ozone Distributed Data Store Common Apache Ozone HDDS Common jar diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsUtils.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsUtils.java index 794b972f1509..e3b90da794ba 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsUtils.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsUtils.java @@ -650,7 +650,7 @@ public static File createDir(String dirPath) { * Utility string formatter method to display SCM roles. * * @param nodes - * @return + * @return String */ public static String format(List nodes) { StringBuilder sb = new StringBuilder(); @@ -680,7 +680,8 @@ public static int roundupMb(long bytes) { /** * Unwrap exception to check if it is some kind of access control problem - * ({@link AccessControlException} or {@link SecretManager.InvalidToken}) + * ({@link org.apache.hadoop.security.AccessControlException} or + * {@link org.apache.hadoop.security.token.SecretManager.InvalidToken}) * or a RpcException. */ public static Throwable getUnwrappedException(Exception ex) { @@ -877,4 +878,17 @@ public static HddsProtos.UUID toProtobuf(UUID uuid) { ? Thread.currentThread().getStackTrace() : null; } + + /** + * Logs a warning to report that the class is not closed properly. + */ + public static void reportLeak(Class clazz, String stackTrace, Logger log) { + String warning = String.format("%s is not closed properly", clazz.getSimpleName()); + if (stackTrace != null && log.isDebugEnabled()) { + String debugMessage = String.format("%nStackTrace for unclosed instance: %s", + stackTrace); + warning = warning.concat(debugMessage); + } + log.warn(warning); + } } diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/annotation/InterfaceStability.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/annotation/InterfaceStability.java index 4251344139ab..6e9ee9467907 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/annotation/InterfaceStability.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/annotation/InterfaceStability.java @@ -27,9 +27,9 @@ * class or method not changing over time. Currently the stability can be * {@link Stable}, {@link Evolving} or {@link Unstable}.
* - *
  • All classes that are annotated with {@link Public} or - * {@link LimitedPrivate} must have InterfaceStability annotation.
  • - *
  • Classes that are {@link Private} are to be considered unstable unless + *
    • All classes that are annotated with {@link InterfaceAudience.Public} or + * {@link InterfaceAudience.LimitedPrivate} must have InterfaceStability annotation.
    • + *
    • Classes that are {@link InterfaceAudience.Private} are to be considered unstable unless * a different InterfaceStability annotation states otherwise.
    • *
    • Incompatible changes must not be made to classes marked as stable.
    • *
    diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/DecommissionUtils.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/DecommissionUtils.java index c176ad1464ec..20755a6e0ec5 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/DecommissionUtils.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/DecommissionUtils.java @@ -97,7 +97,6 @@ public static JsonNode getBeansJsonNode(String metricsJson) throws IOException { * Returns the number of decommissioning nodes. * * @param jsonNode - * @return */ public static int getNumDecomNodes(JsonNode jsonNode) { int numDecomNodes; @@ -118,7 +117,6 @@ public static int getNumDecomNodes(JsonNode jsonNode) { * @param numDecomNodes * @param countsMap * @param errMsg - * @return * @throws IOException */ @Nullable diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/protocol/DatanodeDetails.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/protocol/DatanodeDetails.java index 01f508d257c1..12efcc9aa202 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/protocol/DatanodeDetails.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/protocol/DatanodeDetails.java @@ -74,7 +74,8 @@ public class DatanodeDetails extends NodeImpl implements private static final Codec CODEC = new DelegatedCodec<>( Proto2Codec.get(ExtendedDatanodeDetailsProto.getDefaultInstance()), DatanodeDetails::getFromProtoBuf, - DatanodeDetails::getExtendedProtoBufMessage); + DatanodeDetails::getExtendedProtoBufMessage, + DatanodeDetails.class); public static Codec getCodec() { return CODEC; @@ -93,7 +94,6 @@ public static Codec getCodec() { private String version; private long setupTime; private String revision; - private String buildDate; private volatile HddsProtos.NodeOperationalState persistedOpState; private volatile long persistedOpStateExpiryEpochSec; private int initialVersion; @@ -111,7 +111,6 @@ private DatanodeDetails(Builder b) { version = b.version; setupTime = b.setupTime; revision = b.revision; - buildDate = b.buildDate; persistedOpState = b.persistedOpState; persistedOpStateExpiryEpochSec = b.persistedOpStateExpiryEpochSec; initialVersion = b.initialVersion; @@ -140,7 +139,6 @@ public DatanodeDetails(DatanodeDetails datanodeDetails) { this.version = datanodeDetails.version; this.setupTime = datanodeDetails.setupTime; this.revision = datanodeDetails.revision; - this.buildDate = datanodeDetails.buildDate; this.persistedOpState = datanodeDetails.getPersistedOpState(); this.persistedOpStateExpiryEpochSec = datanodeDetails.getPersistedOpStateExpiryEpochSec(); @@ -432,9 +430,6 @@ public static DatanodeDetails getFromProtoBuf( if (extendedDetailsProto.hasRevision()) { builder.setRevision(extendedDetailsProto.getRevision()); } - if (extendedDetailsProto.hasBuildDate()) { - builder.setBuildDate(extendedDetailsProto.getBuildDate()); - } return builder.build(); } @@ -526,9 +521,6 @@ public ExtendedDatanodeDetailsProto getExtendedProtoBufMessage() { if (!Strings.isNullOrEmpty(getRevision())) { extendedBuilder.setRevision(getRevision()); } - if (!Strings.isNullOrEmpty(getBuildDate())) { - extendedBuilder.setBuildDate(getBuildDate()); - } return extendedBuilder.build(); } @@ -621,7 +613,6 @@ public static final class Builder { private String version; private long setupTime; private String revision; - private String buildDate; private HddsProtos.NodeOperationalState persistedOpState; private long persistedOpStateExpiryEpochSec = 0; private int initialVersion; @@ -653,7 +644,6 @@ public Builder setDatanodeDetails(DatanodeDetails details) { this.version = details.getVersion(); this.setupTime = details.getSetupTime(); this.revision = details.getRevision(); - this.buildDate = details.getBuildDate(); this.persistedOpState = details.getPersistedOpState(); this.persistedOpStateExpiryEpochSec = details.getPersistedOpStateExpiryEpochSec(); @@ -800,18 +790,6 @@ public Builder setRevision(String rev) { return this; } - /** - * Sets the DataNode build date. - * - * @param date the build date of DataNode. - * - * @return DatanodeDetails.Builder - */ - public Builder setBuildDate(String date) { - this.buildDate = date; - return this; - } - /** * Sets the DataNode setup time. * @@ -1053,24 +1031,6 @@ public void setRevision(String rev) { this.revision = rev; } - /** - * Returns the DataNode build date. - * - * @return DataNode build date - */ - public String getBuildDate() { - return buildDate; - } - - /** - * Set DataNode build date. - * - * @param date DataNode build date - */ - public void setBuildDate(String date) { - this.buildDate = date; - } - @Override public HddsProtos.NetworkNode toProtobuf( int clientVersion) { diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/recon/ReconConfigKeys.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/recon/ReconConfigKeys.java index 3ed9f4e58e12..eb6142ea67d0 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/recon/ReconConfigKeys.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/recon/ReconConfigKeys.java @@ -20,11 +20,13 @@ /** * This class contains constants for Recon related configuration keys used in - * SCM & Datanode. + * SCM and Datanode. */ public final class ReconConfigKeys { /** + * This class contains constants for Recon related configuration keys used in + * SCM and Datanode. * Never constructed. */ private ReconConfigKeys() { @@ -71,7 +73,7 @@ private ReconConfigKeys() { * Recon administrator users delimited by a comma. * This is the list of users who can access admin only information from recon. * Users defined in - * {@link org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ADMINISTRATORS} + * {@link org.apache.hadoop.ozone.OzoneConfigKeys#OZONE_ADMINISTRATORS} * will always be able to access all recon information regardless of this * setting. */ diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfig.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfig.java index 3ef9317ced0c..dd78faf68279 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfig.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfig.java @@ -206,8 +206,7 @@ public int getScmDefaultLayoutVersionOnInit() { * required for SCMSecurityProtocol where the KerberosInfo references * the old configuration with * the annotation shown below:- - * @KerberosInfo(serverPrincipal = ScmConfigKeys - * .HDDS_SCM_KERBEROS_PRINCIPAL_KEY) + * {@code @KerberosInfo(serverPrincipal = ScmConfigKeys.HDDS_SCM_KERBEROS_PRINCIPAL_KEY)} */ public static class ConfigStrings { public static final String HDDS_SCM_KERBEROS_PRINCIPAL_KEY = diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java index db789783c7c0..c4b42acec435 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java @@ -131,6 +131,11 @@ public final class ScmConfigKeys { "hdds.ratis.snapshot.threshold"; public static final long HDDS_RATIS_SNAPSHOT_THRESHOLD_DEFAULT = 100000; + public static final String OZONE_SCM_CONTAINER_LIST_MAX_COUNT = + "ozone.scm.container.list.max.count"; + + public static final int OZONE_SCM_CONTAINER_LIST_MAX_COUNT_DEFAULT = 4096; + // TODO : this is copied from OzoneConsts, may need to move to a better place public static final String OZONE_SCM_CHUNK_SIZE_KEY = "ozone.scm.chunk.size"; // 4 MB by default diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmInfo.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmInfo.java index 19c39698dec7..aeb894564b5a 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmInfo.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmInfo.java @@ -30,6 +30,7 @@ public final class ScmInfo { private final String clusterId; private final String scmId; private final List peerRoles; + private final boolean scmRatisEnabled; /** * Builder for ScmInfo. @@ -38,6 +39,7 @@ public static class Builder { private String clusterId; private String scmId; private final List peerRoles; + private boolean scmRatisEnabled; public Builder() { peerRoles = new ArrayList<>(); @@ -73,15 +75,28 @@ public Builder setRatisPeerRoles(List roles) { return this; } + /** + * Set whether SCM enables Ratis. + * + * @param ratisEnabled If it is true, it means that the Ratis mode is turned on. + * If it is false, it means that the Ratis mode is not turned on. + * @return Builder for scmInfo + */ + public Builder setScmRatisEnabled(boolean ratisEnabled) { + scmRatisEnabled = ratisEnabled; + return this; + } + public ScmInfo build() { - return new ScmInfo(clusterId, scmId, peerRoles); + return new ScmInfo(clusterId, scmId, peerRoles, scmRatisEnabled); } } - private ScmInfo(String clusterId, String scmId, List peerRoles) { + private ScmInfo(String clusterId, String scmId, List peerRoles, boolean ratisEnabled) { this.clusterId = clusterId; this.scmId = scmId; this.peerRoles = Collections.unmodifiableList(peerRoles); + this.scmRatisEnabled = ratisEnabled; } /** @@ -107,4 +122,8 @@ public String getScmId() { public List getRatisPeerRoles() { return peerRoles; } + + public boolean getScmRatisEnabled() { + return scmRatisEnabled; + } } diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/client/ScmClient.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/client/ScmClient.java index 34b2680b301b..8662cac80904 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/client/ScmClient.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/client/ScmClient.java @@ -27,6 +27,7 @@ import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.ContainerBalancerStatusInfoResponseProto; import org.apache.hadoop.hdds.scm.DatanodeAdminError; import org.apache.hadoop.hdds.scm.container.ContainerID; +import org.apache.hadoop.hdds.scm.container.ContainerListResult; import org.apache.hadoop.hdds.scm.container.ContainerReplicaInfo; import org.apache.hadoop.hdds.scm.container.ReplicationManagerReport; import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline; @@ -122,10 +123,11 @@ void deleteContainer(long containerId, Pipeline pipeline, boolean force) * @param startContainerID start containerID. * @param count count must be {@literal >} 0. * - * @return a list of pipeline. + * @return a list of containers capped by max count allowed + * in "ozone.scm.container.list.max.count" and total number of containers. * @throws IOException */ - List listContainer(long startContainerID, + ContainerListResult listContainer(long startContainerID, int count) throws IOException; /** @@ -135,10 +137,11 @@ List listContainer(long startContainerID, * @param count count must be {@literal >} 0. * @param state Container of this state will be returned. * @param replicationConfig container replication Config. - * @return a list of pipeline. + * @return a list of containers capped by max count allowed + * in "ozone.scm.container.list.max.count" and total number of containers. * @throws IOException */ - List listContainer(long startContainerID, int count, + ContainerListResult listContainer(long startContainerID, int count, HddsProtos.LifeCycleState state, HddsProtos.ReplicationType replicationType, ReplicationConfig replicationConfig) @@ -392,11 +395,19 @@ StartContainerBalancerResponseProto startContainerBalancer( */ List getScmRatisRoles() throws IOException; + /** + * Get the current SCM mode. + * + * @return `true` indicates that it is in RATIS mode, + * while `false` indicates that it is in STANDALONE mode. + * @throws IOException an I/O exception of some sort has occurred. + */ + boolean isScmRatisEnable() throws IOException; + /** * Force generates new secret keys (rotate). * * @param force boolean flag that forcefully rotates the key on demand - * @return * @throws IOException */ boolean rotateSecretKeys(boolean force) throws IOException; @@ -414,7 +425,7 @@ StartContainerBalancerResponseProto startContainerBalancer( * considered to be failed if it has been sent more than MAX_RETRY limit * and its count is reset to -1. * - * @param count Maximum num of returned transactions, if < 0. return all. + * @param count Maximum num of returned transactions, if {@literal < 0}. return all. * @param startTxId The least transaction id to start with. * @return a list of failed deleted block transactions. * @throws IOException diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerID.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerID.java index 88522f2f9f45..90f690da5a14 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerID.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerID.java @@ -37,7 +37,7 @@ public final class ContainerID implements Comparable { private static final Codec CODEC = new DelegatedCodec<>( LongCodec.get(), ContainerID::valueOf, c -> c.id, - DelegatedCodec.CopyType.SHALLOW); + ContainerID.class, DelegatedCodec.CopyType.SHALLOW); public static final ContainerID MIN = ContainerID.valueOf(0); diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerInfo.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerInfo.java index 6bf2d5500c88..90eb8b47de1d 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerInfo.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerInfo.java @@ -46,7 +46,8 @@ public final class ContainerInfo implements Comparable { private static final Codec CODEC = new DelegatedCodec<>( Proto2Codec.get(HddsProtos.ContainerInfoProto.getDefaultInstance()), ContainerInfo::fromProtobuf, - ContainerInfo::getProtobuf); + ContainerInfo::getProtobuf, + ContainerInfo.class); public static Codec getCodec() { return CODEC; diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerListResult.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerListResult.java new file mode 100644 index 000000000000..9e8d5738db86 --- /dev/null +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerListResult.java @@ -0,0 +1,57 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdds.scm.container; + +import java.util.List; + +/** + * Wrapper class for the result of listing containers with their total count. + */ +public class ContainerListResult { + private final List containerInfoList; + private final long totalCount; + + /** + * Constructs a new ContainerListResult. + * + * @param containerInfoList the list of containers + * @param totalCount the total number of containers + */ + public ContainerListResult(List containerInfoList, long totalCount) { + this.containerInfoList = containerInfoList; + this.totalCount = totalCount; + } + + /** + * Gets the list of containers. + * + * @return the list of containers + */ + public List getContainerInfoList() { + return containerInfoList; + } + + /** + * Gets the total count of containers. + * + * @return the total count of containers + */ + public long getTotalCount() { + return totalCount; + } +} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/ReplicationManagerReport.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/ReplicationManagerReport.java index df8e9d45e134..45bc77d1d8f4 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/ReplicationManagerReport.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/ReplicationManagerReport.java @@ -146,7 +146,6 @@ public long getReportTimeStamp() { /** * Return a map of all stats and their value as a long. - * @return */ public Map getStats() { Map result = new HashMap<>(); @@ -159,7 +158,6 @@ public Map getStats() { /** * Return a map of all samples, with the stat as the key and the samples * for the stat as a List of Long. - * @return */ public Map> getSamples() { Map> result = new HashMap<>(); diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMHAUtils.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMHAUtils.java index af4e72993839..b71adb7099aa 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMHAUtils.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMHAUtils.java @@ -54,7 +54,7 @@ import java.util.List; import static org.apache.hadoop.hdds.HddsConfigKeys.OZONE_METADATA_DIRS; -import static org.apache.hadoop.ozone.OzoneConsts.SCM_RATIS_SNAPSHOT_DIR; +import static org.apache.hadoop.ozone.OzoneConsts.OZONE_RATIS_SNAPSHOT_DIR; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_DEFAULT_SERVICE_ID; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_SERVICE_IDS_KEY; @@ -159,7 +159,7 @@ public static String getSCMRatisSnapshotDirectory(ConfigurationSource conf) { OZONE_METADATA_DIRS); File metaDirPath = ServerUtils.getOzoneMetaDirPath(conf); snapshotDir = - Paths.get(metaDirPath.getPath(), SCM_RATIS_SNAPSHOT_DIR).toString(); + Paths.get(metaDirPath.getPath(), OZONE_RATIS_SNAPSHOT_DIR).toString(); } return snapshotDir; } diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMNodeInfo.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMNodeInfo.java index a5e443a598d3..66fe7d187835 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMNodeInfo.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMNodeInfo.java @@ -67,7 +67,6 @@ public class SCMNodeInfo { /** * Build SCM Node information from configuration. * @param conf - * @return */ public static List buildNodeInfo(ConfigurationSource conf) { diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/InnerNodeImpl.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/InnerNodeImpl.java index 332dddac25c9..779f2456be6b 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/InnerNodeImpl.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/InnerNodeImpl.java @@ -307,10 +307,13 @@ public void remove(Node node) { * @param loc string location of a node. If loc starts with "/", it's a * absolute path, otherwise a relative path. Following examples * are all accepted, + *
    +   *            {@code
        *            1.  /dc1/rm1/rack1          -> an inner node
        *            2.  /dc1/rm1/rack1/node1    -> a leaf node
        *            3.  rack1/node1             -> a relative path to this node
    -   *
    +   *            }
    +   *            
    * @return null if the node is not found */ @Override diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/pipeline/Pipeline.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/pipeline/Pipeline.java index 1486f05f55c0..6c5b4aff57f6 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/pipeline/Pipeline.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/pipeline/Pipeline.java @@ -68,6 +68,7 @@ public final class Pipeline { Proto2Codec.get(HddsProtos.Pipeline.getDefaultInstance()), Pipeline::getFromProtobufSetCreationTimestamp, p -> p.getProtobufMessage(ClientVersion.CURRENT_VERSION), + Pipeline.class, DelegatedCodec.CopyType.UNSUPPORTED); public static Codec getCodec() { @@ -243,7 +244,6 @@ public int getReplicaIndex(DatanodeDetails dn) { /** * Get the replicaIndex Map. - * @return */ public Map getReplicaIndexes() { return this.getNodes().stream().collect(Collectors.toMap(Function.identity(), this::getReplicaIndex)); diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineID.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineID.java index 5ca354562611..92e01735d532 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineID.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineID.java @@ -34,7 +34,7 @@ public final class PipelineID { private static final Codec CODEC = new DelegatedCodec<>( UuidCodec.get(), PipelineID::valueOf, c -> c.id, - DelegatedCodec.CopyType.SHALLOW); + PipelineID.class, DelegatedCodec.CopyType.SHALLOW); public static Codec getCodec() { return CODEC; diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocol.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocol.java index 45825cb2b621..3c3786b38d1b 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocol.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocol.java @@ -29,6 +29,7 @@ import org.apache.hadoop.hdds.scm.DatanodeAdminError; import org.apache.hadoop.hdds.scm.ScmConfig; import org.apache.hadoop.hdds.scm.ScmInfo; +import org.apache.hadoop.hdds.scm.container.ContainerListResult; import org.apache.hadoop.hdds.scm.container.ContainerID; import org.apache.hadoop.hdds.scm.container.ContainerInfo; import org.apache.hadoop.hdds.scm.container.ReplicationManagerReport; @@ -146,10 +147,11 @@ List getExistContainerWithPipelinesInBatch( * Usually the count will be replace with a very big * value instead of being unlimited in case the db is very big) * - * @return a list of container. + * @return a list of containers capped by max count allowed + * in "ozone.scm.container.list.max.count" and total number of containers. * @throws IOException */ - List listContainer(long startContainerID, + ContainerListResult listContainer(long startContainerID, int count) throws IOException; /** @@ -165,10 +167,11 @@ List listContainer(long startContainerID, * value instead of being unlimited in case the db is very big) * @param state Container with this state will be returned. * - * @return a list of container. + * @return a list of containers capped by max count allowed + * in "ozone.scm.container.list.max.count" and total number of containers. * @throws IOException */ - List listContainer(long startContainerID, + ContainerListResult listContainer(long startContainerID, int count, HddsProtos.LifeCycleState state) throws IOException; /** @@ -184,14 +187,14 @@ List listContainer(long startContainerID, * value instead of being unlimited in case the db is very big) * @param state Container with this state will be returned. * @param factor Container factor - * @return a list of container. + * @return a list of containers capped by max count allowed + * in "ozone.scm.container.list.max.count" and total number of containers. * @throws IOException */ - List listContainer(long startContainerID, + ContainerListResult listContainer(long startContainerID, int count, HddsProtos.LifeCycleState state, HddsProtos.ReplicationFactor factor) throws IOException; - /** * Ask SCM for a list of containers with a range of container ID, state * and replication config, and the limit of count. @@ -205,10 +208,11 @@ List listContainer(long startContainerID, * value instead of being unlimited in case the db is very big) * @param state Container with this state will be returned. * @param replicationConfig Replication config for the containers - * @return a list of container. + * @return a list of containers capped by max count allowed + * in "ozone.scm.container.list.max.count" and total number of containers. * @throws IOException */ - List listContainer(long startContainerID, + ContainerListResult listContainer(long startContainerID, int count, HddsProtos.LifeCycleState state, HddsProtos.ReplicationType replicationType, ReplicationConfig replicationConfig) throws IOException; @@ -337,7 +341,7 @@ Pipeline createReplicationPipeline(HddsProtos.ReplicationType type, * considered to be failed if it has been sent more than MAX_RETRY limit * and its count is reset to -1. * - * @param count Maximum num of returned transactions, if < 0. return all. + * @param count Maximum num of returned transactions, if {@literal < 0}. return all. * @param startTxId The least transaction id to start with. * @return a list of failed deleted block transactions. * @throws IOException diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/utils/CertificateSignRequest.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/utils/CertificateSignRequest.java index 1f04e868a851..553b1dc812e3 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/utils/CertificateSignRequest.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/utils/CertificateSignRequest.java @@ -27,13 +27,12 @@ import java.util.List; import java.util.Optional; +import com.google.common.base.Preconditions; import org.apache.commons.lang3.StringUtils; import org.apache.commons.validator.routines.DomainValidator; import org.apache.hadoop.hdds.security.SecurityConfig; import org.apache.hadoop.hdds.security.exception.SCMSecurityException; import org.apache.hadoop.hdds.security.x509.exception.CertificateException; - -import com.google.common.base.Preconditions; import org.apache.hadoop.ozone.OzoneSecurityUtil; import org.bouncycastle.asn1.ASN1EncodableVector; import org.bouncycastle.asn1.ASN1Object; @@ -390,7 +389,7 @@ private Optional getSubjectAltNameExtension() throws if (altNames != null) { return Optional.of(new Extension(Extension.subjectAlternativeName, false, new DEROctetString(new GeneralNames( - altNames.toArray(new GeneralName[altNames.size()]))))); + altNames.toArray(new GeneralName[0]))))); } return Optional.empty(); } @@ -414,12 +413,10 @@ private Extensions createExtensions() throws IOException { // Add subject alternate name extension Optional san = getSubjectAltNameExtension(); - if (san.isPresent()) { - extensions.add(san.get()); - } + san.ifPresent(extensions::add); return new Extensions( - extensions.toArray(new Extension[extensions.size()])); + extensions.toArray(new Extension[0])); } public CertificateSignRequest build() throws SCMSecurityException { diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/server/ServerUtils.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/server/ServerUtils.java index 31aaca568e4f..66685b4bbbde 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/server/ServerUtils.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/server/ServerUtils.java @@ -131,7 +131,7 @@ public static InetSocketAddress updateListenAddress(OzoneConfiguration conf, * Fall back to OZONE_METADATA_DIRS if not defined. * * @param conf - * @return + * @return File */ public static File getScmDbDir(ConfigurationSource conf) { File metadataDir = getDirectoryFromConfig(conf, diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/HddsVersionInfo.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/HddsVersionInfo.java index b76a316c90bd..386b1358b97f 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/HddsVersionInfo.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/HddsVersionInfo.java @@ -44,8 +44,6 @@ public static void main(String[] args) { System.out.println( "Source code repository " + HDDS_VERSION_INFO.getUrl() + " -r " + HDDS_VERSION_INFO.getRevision()); - System.out.println("Compiled by " + HDDS_VERSION_INFO.getUser() + " on " - + HDDS_VERSION_INFO.getDate()); System.out.println( "Compiled with protoc " + HDDS_VERSION_INFO.getHadoopProtoc2Version() + ", " + HDDS_VERSION_INFO.getGrpcProtocVersion() + diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/LeakDetector.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/LeakDetector.java index 477a291f9283..9579d4e73bf6 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/LeakDetector.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/LeakDetector.java @@ -31,7 +31,6 @@ /** * Simple general resource leak detector using {@link ReferenceQueue} and {@link java.lang.ref.WeakReference} to * observe resource object life-cycle and assert proper resource closure before they are GCed. - * *

    * Example usage: * @@ -43,16 +42,18 @@ * // report leaks, don't refer to the original object (MyResource) here. * System.out.println("MyResource is not closed before being discarded."); * }); - * - * @Override + * } + * } + * + *

    + *   {@code @Override
      *   public void close() {
      *     // proper resources cleanup...
      *     // inform tracker that this object is closed properly.
      *     leakTracker.close();
      *   }
    - * }
    - *
    - * }
    + * } + * */ public class LeakDetector { private static final Logger LOG = LoggerFactory.getLogger(LeakDetector.class); diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/VersionInfo.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/VersionInfo.java index 349c0a862062..d3de20cd476d 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/VersionInfo.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/VersionInfo.java @@ -65,18 +65,6 @@ public String getRevision() { return info.getProperty("revision", "Unknown"); } - public String getBranch() { - return info.getProperty("branch", "Unknown"); - } - - public String getDate() { - return info.getProperty("date", "Unknown"); - } - - public String getUser() { - return info.getProperty("user", "Unknown"); - } - public String getUrl() { return info.getProperty("url", "Unknown"); } @@ -108,7 +96,6 @@ public String getCompilePlatform() { public String getBuildVersion() { return getVersion() + " from " + getRevision() + - " by " + getUser() + " source checksum " + getSrcChecksum(); } } diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/BooleanCodec.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/BooleanCodec.java index 2ec396c0ffab..6d416ea2ef32 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/BooleanCodec.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/BooleanCodec.java @@ -36,6 +36,11 @@ private BooleanCodec() { // singleton } + @Override + public Class getTypeClass() { + return Boolean.class; + } + @Override public boolean supportCodecBuffer() { return true; diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/Codec.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/Codec.java index 46779648e67c..54bbf42c468d 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/Codec.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/Codec.java @@ -30,6 +30,9 @@ public interface Codec { byte[] EMPTY_BYTE_ARRAY = {}; + /** @return the class of the {@link T}. */ + Class getTypeClass(); + /** * Does this {@link Codec} support the {@link CodecBuffer} methods? * If this method returns true, this class must implement both diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/DelegatedCodec.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/DelegatedCodec.java index dff0b015ed58..2ed92e66d2ea 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/DelegatedCodec.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/DelegatedCodec.java @@ -23,9 +23,9 @@ import java.io.IOException; /** - * A {@link Codec} to serialize/deserialize objects by delegation. + * A {@link org.apache.hadoop.hdds.utils.db.Codec} to serialize/deserialize objects by delegation. * - * @param The object type of this {@link Codec}. + * @param The object type of this {@link org.apache.hadoop.hdds.utils.db.Codec}. * @param The object type of the {@link #delegate}. */ public class DelegatedCodec implements Codec { @@ -47,31 +47,39 @@ public enum CopyType { private final Codec delegate; private final CheckedFunction forward; private final CheckedFunction backward; + private final Class clazz; private final CopyType copyType; /** * Construct a {@link Codec} using the given delegate. * * @param delegate the delegate {@link Codec} - * @param forward a function to convert {@link DELEGATE} to {@link T}. - * @param backward a function to convert {@link T} back to {@link DELEGATE}. + * @param forward a function to convert {@code DELEGATE} to {@code T}. + * @param backward a function to convert {@code T} back to {@code DELEGATE}. * @param copyType How to {@link #copyObject(Object)}? */ public DelegatedCodec(Codec delegate, CheckedFunction forward, CheckedFunction backward, - CopyType copyType) { + Class clazz, CopyType copyType) { this.delegate = delegate; this.forward = forward; this.backward = backward; + this.clazz = clazz; this.copyType = copyType; } /** The same as new DelegatedCodec(delegate, forward, backward, DEEP). */ public DelegatedCodec(Codec delegate, CheckedFunction forward, - CheckedFunction backward) { - this(delegate, forward, backward, CopyType.DEEP); + CheckedFunction backward, + Class clazz) { + this(delegate, forward, backward, clazz, CopyType.DEEP); + } + + @Override + public Class getTypeClass() { + return clazz; } @Override diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/IntegerCodec.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/IntegerCodec.java index 50488053159f..d31be6fe9761 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/IntegerCodec.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/IntegerCodec.java @@ -36,6 +36,11 @@ private IntegerCodec() { // singleton } + @Override + public Class getTypeClass() { + return Integer.class; + } + @Override public boolean supportCodecBuffer() { return true; diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/LongCodec.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/LongCodec.java index 9e776cc18f7f..cf4819800083 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/LongCodec.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/LongCodec.java @@ -33,6 +33,11 @@ public static LongCodec get() { private LongCodec() { } + @Override + public Class getTypeClass() { + return Long.class; + } + @Override public boolean supportCodecBuffer() { return true; diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/Proto2Codec.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/Proto2Codec.java index 96d12d1ebe53..8eb4a3072156 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/Proto2Codec.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/Proto2Codec.java @@ -47,12 +47,19 @@ public static Codec get(T t) { return (Codec) codec; } + private final Class clazz; private final Parser parser; private Proto2Codec(M m) { + this.clazz = (Class) m.getClass(); this.parser = (Parser) m.getParserForType(); } + @Override + public Class getTypeClass() { + return clazz; + } + @Override public boolean supportCodecBuffer() { return true; diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/Proto3Codec.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/Proto3Codec.java index 30245e033e02..c1eb693a0076 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/Proto3Codec.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/Proto3Codec.java @@ -47,12 +47,19 @@ public static Codec get(T t) { return (Codec) codec; } + private final Class clazz; private final Parser parser; private Proto3Codec(M m) { + this.clazz = (Class) m.getClass(); this.parser = (Parser) m.getParserForType(); } + @Override + public Class getTypeClass() { + return clazz; + } + @Override public boolean supportCodecBuffer() { return true; diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/ShortCodec.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/ShortCodec.java index f6482e5712cf..beb296a29d1c 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/ShortCodec.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/ShortCodec.java @@ -37,6 +37,11 @@ private ShortCodec() { // singleton } + @Override + public Class getTypeClass() { + return Short.class; + } + @Override public boolean supportCodecBuffer() { return true; diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/StringCodecBase.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/StringCodecBase.java index 1df552379374..e35be632dc41 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/StringCodecBase.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/StringCodecBase.java @@ -59,6 +59,11 @@ abstract class StringCodecBase implements Codec { this.fixedLength = max == encoder.averageBytesPerChar(); } + @Override + public final Class getTypeClass() { + return String.class; + } + CharsetEncoder newEncoder() { return charset.newEncoder() .onMalformedInput(CodingErrorAction.REPORT) diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/UuidCodec.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/UuidCodec.java index dfccaa0ab750..d05b748b52a0 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/UuidCodec.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/UuidCodec.java @@ -40,6 +40,11 @@ public static int getSerializedSize() { private UuidCodec() { } + @Override + public Class getTypeClass() { + return UUID.class; + } + @Override public boolean supportCodecBuffer() { return true; diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java index b34a5d8387be..e483feba98d2 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java @@ -40,7 +40,6 @@ public final class OzoneConsts { public static final String SCM_CERT_SERIAL_ID = "scmCertSerialId"; public static final String PRIMARY_SCM_NODE_ID = "primaryScmNodeId"; - public static final String OZONE_SIMPLE_ROOT_USER = "root"; public static final String OZONE_SIMPLE_HDFS_USER = "hdfs"; public static final String STORAGE_ID = "storageID"; @@ -76,12 +75,6 @@ public final class OzoneConsts { "EEE, dd MMM yyyy HH:mm:ss zzz"; public static final String OZONE_TIME_ZONE = "GMT"; - public static final String OZONE_COMPONENT = "component"; - public static final String OZONE_FUNCTION = "function"; - public static final String OZONE_RESOURCE = "resource"; - public static final String OZONE_USER = "user"; - public static final String OZONE_REQUEST = "request"; - // OM Http server endpoints public static final String OZONE_OM_SERVICE_LIST_HTTP_ENDPOINT = "/serviceList"; @@ -101,14 +94,9 @@ public final class OzoneConsts { public static final String CONTAINER_EXTENSION = ".container"; - public static final String CONTAINER_META = ".meta"; - - // Refer to {@link ContainerReader} for container storage layout on disk. - public static final String CONTAINER_PREFIX = "containers"; public static final String CONTAINER_META_PATH = "metadata"; public static final String CONTAINER_TEMPORARY_CHUNK_PREFIX = "tmp"; public static final String CONTAINER_CHUNK_NAME_DELIMITER = "."; - public static final String CONTAINER_ROOT_PREFIX = "repository"; public static final String FILE_HASH = "SHA-256"; public static final String MD5_HASH = "MD5"; @@ -128,7 +116,6 @@ public final class OzoneConsts { * level DB names used by SCM and data nodes. */ public static final String CONTAINER_DB_SUFFIX = "container.db"; - public static final String PIPELINE_DB_SUFFIX = "pipeline.db"; public static final String DN_CONTAINER_DB = "-dn-" + CONTAINER_DB_SUFFIX; public static final String OM_DB_NAME = "om.db"; public static final String SCM_DB_NAME = "scm.db"; @@ -187,10 +174,8 @@ public final class OzoneConsts { public static final String OM_USER_PREFIX = "$"; public static final String OM_S3_PREFIX = "S3:"; public static final String OM_S3_CALLER_CONTEXT_PREFIX = "S3Auth:S3G|"; - public static final String OM_S3_VOLUME_PREFIX = "s3"; public static final String OM_S3_SECRET = "S3Secret:"; public static final String OM_PREFIX = "Prefix:"; - public static final String OM_TENANT = "Tenant:"; /** * Max chunk size limit. @@ -198,11 +183,6 @@ public final class OzoneConsts { public static final int OZONE_SCM_CHUNK_MAX_SIZE = 32 * 1024 * 1024; - /** - * Max OM Quota size of Long.MAX_VALUE. - */ - public static final long MAX_QUOTA_IN_BYTES = Long.MAX_VALUE; - /** * Quota RESET default is -1, which means quota is not set. */ @@ -214,36 +194,20 @@ public final class OzoneConsts { */ public enum Units { TB, GB, MB, KB, B } - /** - * Max number of keys returned per list buckets operation. - */ - public static final int MAX_LISTBUCKETS_SIZE = 1024; - - /** - * Max number of keys returned per list keys operation. - */ - public static final int MAX_LISTKEYS_SIZE = 1024; - - /** - * Max number of volumes returned per list volumes operation. - */ - public static final int MAX_LISTVOLUMES_SIZE = 1024; - - public static final int INVALID_PORT = -1; - /** * Object ID to identify reclaimable uncommitted blocks. */ public static final long OBJECT_ID_RECLAIM_BLOCKS = 0L; - /** * Default SCM Datanode ID file name. */ public static final String OZONE_SCM_DATANODE_ID_FILE_DEFAULT = "datanode.id"; - // The ServiceListJSONServlet context attribute where OzoneManager - // instance gets stored. + /** + * The ServiceListJSONServlet context attribute where OzoneManager + * instance gets stored. + */ public static final String OM_CONTEXT_ATTRIBUTE = "ozone.om"; public static final String SCM_CONTEXT_ATTRIBUTE = "ozone.scm"; @@ -308,12 +272,8 @@ private OzoneConsts() { public static final String KEY_PREFIX = "keyPrefix"; public static final String ACL = "acl"; public static final String ACLS = "acls"; - public static final String USER_ACL = "userAcl"; - public static final String ADD_ACLS = "addAcls"; - public static final String REMOVE_ACLS = "removeAcls"; public static final String MAX_NUM_OF_BUCKETS = "maxNumOfBuckets"; public static final String HAS_SNAPSHOT = "hasSnapshot"; - public static final String TO_KEY_NAME = "toKeyName"; public static final String STORAGE_TYPE = "storageType"; public static final String RESOURCE_TYPE = "resourceType"; public static final String IS_VERSION_ENABLED = "isVersionEnabled"; @@ -323,7 +283,6 @@ private OzoneConsts() { public static final String REPLICATION_TYPE = "replicationType"; public static final String REPLICATION_FACTOR = "replicationFactor"; public static final String REPLICATION_CONFIG = "replicationConfig"; - public static final String KEY_LOCATION_INFO = "keyLocationInfo"; public static final String MULTIPART_LIST = "multipartList"; public static final String UPLOAD_ID = "uploadID"; public static final String PART_NUMBER_MARKER = "partNumberMarker"; @@ -378,10 +337,6 @@ private OzoneConsts() { public static final String JAVA_TMP_DIR = "java.io.tmpdir"; public static final String LOCALHOST = "localhost"; - - public static final int S3_BUCKET_MIN_LENGTH = 3; - public static final int S3_BUCKET_MAX_LENGTH = 64; - public static final int S3_SECRET_KEY_MIN_LENGTH = 8; public static final int S3_REQUEST_HEADER_METADATA_SIZE_LIMIT_KB = 2; @@ -398,7 +353,6 @@ private OzoneConsts() { public static final String GDPR_ALGORITHM_NAME = "AES"; public static final int GDPR_DEFAULT_RANDOM_SECRET_LENGTH = 16; public static final Charset GDPR_CHARSET = StandardCharsets.UTF_8; - public static final String GDPR_LENGTH = "length"; public static final String GDPR_SECRET = "secret"; public static final String GDPR_ALGORITHM = "algorithm"; @@ -409,7 +363,7 @@ private OzoneConsts() { * contains illegal characters when creating/renaming key. * * Avoid the following characters in a key name: - * "\", "{", "}", "<", ">", "^", "%", "~", "#", "|", "`", "[", "]", Quotation + * {@literal "\", "{", "}", "<", ">", "^", "%", "~", "#", "|", "`", "[", "]"}, Quotation * marks and Non-printable ASCII characters (128–255 decimal characters). * https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html */ @@ -426,13 +380,6 @@ private OzoneConsts() { public static final String CONTAINER_DB_TYPE_ROCKSDB = "RocksDB"; - // SCM HA - public static final String SCM_SERVICE_ID_DEFAULT = "scmServiceIdDefault"; - - // SCM Ratis snapshot file to store the last applied index - public static final String SCM_RATIS_SNAPSHOT_INDEX = "scmRatisSnapshotIndex"; - - public static final String SCM_RATIS_SNAPSHOT_TERM = "scmRatisSnapshotTerm"; // An on-disk transient marker file used when replacing DB with checkpoint public static final String DB_TRANSIENT_MARKER = "dbInconsistentMarker"; @@ -440,10 +387,7 @@ private OzoneConsts() { // should remain prepared even after a restart. public static final String PREPARE_MARKER = "prepareMarker"; - // TODO : rename this to OZONE_RATIS_SNAPSHOT_DIR and use it in both - // SCM and OM - public static final String OM_RATIS_SNAPSHOT_DIR = "snapshot"; - public static final String SCM_RATIS_SNAPSHOT_DIR = "snapshot"; + public static final String OZONE_RATIS_SNAPSHOT_DIR = "snapshot"; public static final long DEFAULT_OM_UPDATE_ID = -1L; diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/ChunkBuffer.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/ChunkBuffer.java index d3a558ca4302..a24d39e5dacf 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/ChunkBuffer.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/ChunkBuffer.java @@ -39,13 +39,12 @@ static ChunkBuffer allocate(int capacity) { return allocate(capacity, 0); } - /** - * Similar to {@link ByteBuffer#allocate(int)} + /** Similar to {@link ByteBuffer#allocate(int)} * except that it can specify the increment. * * @param increment * the increment size so that this buffer is allocated incrementally. - * When increment <= 0, entire buffer is allocated in the beginning. + * When increment {@literal <= 0}, entire buffer is allocated in the beginning. */ static ChunkBuffer allocate(int capacity, int increment) { if (increment > 0 && increment < capacity) { diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/container/common/helpers/BlockData.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/container/common/helpers/BlockData.java index ea5c5453f3f0..b3ee5991737d 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/container/common/helpers/BlockData.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/container/common/helpers/BlockData.java @@ -39,7 +39,8 @@ public class BlockData { private static final Codec CODEC = new DelegatedCodec<>( Proto3Codec.get(ContainerProtos.BlockData.getDefaultInstance()), BlockData::getFromProtoBuf, - BlockData::getProtoBufMessage); + BlockData::getProtoBufMessage, + BlockData.class); public static Codec getCodec() { return CODEC; diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ChunkInfoList.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ChunkInfoList.java index fdf40af9e097..ab5d39e9c3d1 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ChunkInfoList.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ChunkInfoList.java @@ -27,7 +27,7 @@ /** * Helper class to convert between protobuf lists and Java lists of - * {@link ContainerProtos.ChunkInfo} objects. + * {@link org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ChunkInfo} objects. *

    * This class is immutable. */ @@ -36,6 +36,7 @@ public class ChunkInfoList { Proto3Codec.get(ContainerProtos.ChunkInfoList.getDefaultInstance()), ChunkInfoList::getFromProtoBuf, ChunkInfoList::getProtoBufMessage, + ChunkInfoList.class, DelegatedCodec.CopyType.SHALLOW); public static Codec getCodec() { @@ -49,7 +50,7 @@ public ChunkInfoList(List chunks) { } /** - * @return A new {@link ChunkInfoList} created from protobuf data. + * @return A new {@link #ChunkInfoList} created from protobuf data. */ public static ChunkInfoList getFromProtoBuf( ContainerProtos.ChunkInfoList chunksProto) { diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/upgrade/LayoutVersionInstanceFactory.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/upgrade/LayoutVersionInstanceFactory.java index 83e63a2a322d..b94dd024b2d3 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/upgrade/LayoutVersionInstanceFactory.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/upgrade/LayoutVersionInstanceFactory.java @@ -37,18 +37,16 @@ /** * Generic factory which stores different instances of Type 'T' sharded by - * a key & version. A single key can be associated with different versions + * a key and version. A single key can be associated with different versions * of 'T'. - * * Why does this class exist? * A typical use case during upgrade is to have multiple versions of a class * / method / object and chose them based on current layout * version at runtime. Before finalizing, an older version is typically * needed, and after finalize, a newer version is needed. This class serves * this purpose in a generic way. - * * For example, we can create a Factory to create multiple versions of - * OMRequests sharded by Request Type & Layout Version Supported. + * OMRequests sharded by Request Type and Layout Version Supported. */ public class LayoutVersionInstanceFactory { @@ -71,7 +69,7 @@ public class LayoutVersionInstanceFactory { /** * Register an instance with a given factory key (key + version). * For safety reasons we dont allow (1) re-registering, (2) registering an - * instance with version > SLV. + * instance with version > SLV. * * @param lvm LayoutVersionManager * @param key VersionFactoryKey key to associate with instance. @@ -138,13 +136,15 @@ private boolean isValid(LayoutVersionManager lvm, int version) { } /** + *

        * From the list of versioned instances for a given "key", this
        * returns the "floor" value corresponding to the given version.
    -   * For example, if we have key = "CreateKey",  entry -> [(1, CreateKeyV1),
    -   * (3, CreateKeyV2), and if the passed in key = CreateKey & version = 2, we
    +   * For example, if we have key = "CreateKey",  entry -> [(1, CreateKeyV1),
    +   * (3, CreateKeyV2), and if the passed in key = CreateKey & version = 2, we
        * return CreateKeyV1.
        * Since this is a priority queue based implementation, we use a O(1) peek()
        * lookup to get the current valid version.
    +   * 
    * @param lvm LayoutVersionManager * @param key Key and Version. * @return instance. diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/upgrade/LayoutVersionManager.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/upgrade/LayoutVersionManager.java index 3137d756e6b5..a765c2c94553 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/upgrade/LayoutVersionManager.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/upgrade/LayoutVersionManager.java @@ -74,7 +74,6 @@ public interface LayoutVersionManager { /** * Generic API for returning a registered handler for a given type. * @param type String type - * @return */ default Object getHandler(String type) { return null; diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/upgrade/UpgradeFinalizer.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/upgrade/UpgradeFinalizer.java index 44ae94870e35..19c0498aa7a6 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/upgrade/UpgradeFinalizer.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/upgrade/UpgradeFinalizer.java @@ -50,14 +50,14 @@ public interface UpgradeFinalizer { * Represents the current state in which the service is with regards to * finalization after an upgrade. * The state transitions are the following: - * ALREADY_FINALIZED - no entry no exit from this status without restart. + * {@code ALREADY_FINALIZED} - no entry no exit from this status without restart. * After an upgrade: - * FINALIZATION_REQUIRED -(finalize)-> STARTING_FINALIZATION - * -> FINALIZATION_IN_PROGRESS -> FINALIZATION_DONE from finalization done + * {@code FINALIZATION_REQUIRED -(finalize)-> STARTING_FINALIZATION + * -> FINALIZATION_IN_PROGRESS -> FINALIZATION_DONE} from finalization done * there is no more move possible, after a restart the service can end up in: - * - FINALIZATION_REQUIRED, if the finalization failed and have not reached - * FINALIZATION_DONE, - * - or it can be ALREADY_FINALIZED if the finalization was successfully done. + * {@code FINALIZATION_REQUIRED}, if the finalization failed and have not reached + * {@code FINALIZATION_DONE}, + * - or it can be {@code ALREADY_FINALIZED} if the finalization was successfully done. */ enum Status { ALREADY_FINALIZED, diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/upgrade/VersionFactoryKey.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/upgrade/VersionFactoryKey.java index bda45f5a745c..6465cc855012 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/upgrade/VersionFactoryKey.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/upgrade/VersionFactoryKey.java @@ -20,7 +20,7 @@ /** * "Key" element to the Version specific instance factory. Currently it has 2 - * dimensions -> a 'key' string and a version. This is to support a factory + * dimensions -> a 'key' string and a version. This is to support a factory * which returns an instance for a given "key" and "version". */ public class VersionFactoryKey { diff --git a/hadoop-hdds/common/src/main/resources/hdds-version-info.properties b/hadoop-hdds/common/src/main/resources/hdds-version-info.properties index bf887021c5ba..3ba2c2cbfa2f 100644 --- a/hadoop-hdds/common/src/main/resources/hdds-version-info.properties +++ b/hadoop-hdds/common/src/main/resources/hdds-version-info.properties @@ -18,9 +18,6 @@ version=${declared.hdds.version} revision=${version-info.scm.commit} -branch=${version-info.scm.branch} -user=${user.name} -date=${version-info.build.time} url=${version-info.scm.uri} srcChecksum=${version-info.source.md5} hadoopProtoc2Version=${proto2.hadooprpc.protobuf.version} diff --git a/hadoop-hdds/common/src/main/resources/ozone-default.xml b/hadoop-hdds/common/src/main/resources/ozone-default.xml index 9877427ea072..060b7cd7b49a 100644 --- a/hadoop-hdds/common/src/main/resources/ozone-default.xml +++ b/hadoop-hdds/common/src/main/resources/ozone-default.xml @@ -160,6 +160,13 @@ this not set. Ideally, this should be mapped to a fast disk like an SSD. + + ozone.scm.container.list.max.count + 4096 + OZONE, SCM, CONTAINER + The max number of containers info could be included in + response of ListContainer request. + hdds.datanode.dir @@ -1570,7 +1577,7 @@ hdds.datanode.metadata.rocksdb.cache.size - 64MB + 1GB OZONE, DATANODE, MANAGEMENT Size of the block metadata cache shared among RocksDB instances on each @@ -3469,9 +3476,9 @@ ozone.s3g.client.buffer.size OZONE, S3GATEWAY - 4KB + 4MB - The size of the buffer which is for read block. (4KB by default). + The size of the buffer which is for read block. (4MB by default). @@ -3743,6 +3750,15 @@ + + ozone.snapshot.deep.cleaning.enabled + false + OZONE, PERFORMANCE, OM + + Flag to enable/disable snapshot deep cleaning. + + + ozone.scm.event.ContainerReport.thread.pool.size 10 diff --git a/hadoop-hdds/config/pom.xml b/hadoop-hdds/config/pom.xml index 1c71bf3d90a4..60c63475ae34 100644 --- a/hadoop-hdds/config/pom.xml +++ b/hadoop-hdds/config/pom.xml @@ -20,10 +20,10 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> org.apache.ozone hdds - 1.5.0-SNAPSHOT + 2.0.0-SNAPSHOT hdds-config - 1.5.0-SNAPSHOT + 2.0.0-SNAPSHOT Apache Ozone Distributed Data Store Config Tools Apache Ozone HDDS Config jar diff --git a/hadoop-hdds/config/src/main/java/org/apache/hadoop/hdds/conf/ConfigurationSource.java b/hadoop-hdds/config/src/main/java/org/apache/hadoop/hdds/conf/ConfigurationSource.java index b1a20c9aecbc..0d6c0c908786 100644 --- a/hadoop-hdds/config/src/main/java/org/apache/hadoop/hdds/conf/ConfigurationSource.java +++ b/hadoop-hdds/config/src/main/java/org/apache/hadoop/hdds/conf/ConfigurationSource.java @@ -108,7 +108,7 @@ default String[] getTrimmedStrings(String name) { /** * Gets the configuration entries where the key contains the prefix. This * method will strip the prefix from the key in the return Map. - * Example: somePrefix.key->value will be key->value in the returned map. + * Example: {@code somePrefix.key->value} will be {@code key->value} in the returned map. * @param keyPrefix Prefix to search. * @return Map containing keys that match and their values. */ diff --git a/hadoop-hdds/container-service/pom.xml b/hadoop-hdds/container-service/pom.xml index d73bea95895e..c21ca8203b56 100644 --- a/hadoop-hdds/container-service/pom.xml +++ b/hadoop-hdds/container-service/pom.xml @@ -20,10 +20,10 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> org.apache.ozone hdds - 1.5.0-SNAPSHOT + 2.0.0-SNAPSHOT hdds-container-service - 1.5.0-SNAPSHOT + 2.0.0-SNAPSHOT Apache Ozone Distributed Data Store Container Service Apache Ozone HDDS Container Service jar diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/HddsDatanodeService.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/HddsDatanodeService.java index 495204aef1bf..2f4f72eb5fb4 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/HddsDatanodeService.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/HddsDatanodeService.java @@ -229,7 +229,6 @@ public void start() { datanodeDetails.setSetupTime(Time.now()); datanodeDetails.setRevision( HddsVersionInfo.HDDS_VERSION_INFO.getRevision()); - datanodeDetails.setBuildDate(HddsVersionInfo.HDDS_VERSION_INFO.getDate()); TracingUtil.initTracing( "HddsDatanodeService." + datanodeDetails.getUuidString() .substring(0, 8), conf); diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/HddsPolicyProvider.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/HddsPolicyProvider.java index eeed4fab5f72..52217ce7f83f 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/HddsPolicyProvider.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/HddsPolicyProvider.java @@ -24,7 +24,7 @@ import org.apache.hadoop.security.authorize.Service; import org.apache.ratis.util.MemoizedSupplier; -import java.util.Arrays; +import java.util.Collections; import java.util.List; import java.util.function.Supplier; @@ -50,7 +50,7 @@ public static HddsPolicyProvider getInstance() { } private static final List DN_SERVICES = - Arrays.asList( + Collections.singletonList( new Service( OZONE_SECURITY_RECONFIGURE_PROTOCOL_ACL, ReconfigureProtocol.class) diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/CommandHandlerMetrics.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/CommandHandlerMetrics.java index a6e4d6258d97..e52565952a51 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/CommandHandlerMetrics.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/CommandHandlerMetrics.java @@ -34,6 +34,7 @@ import static org.apache.hadoop.ozone.container.common.helpers.CommandHandlerMetrics.CommandMetricsMetricsInfo.TotalRunTimeMs; import static org.apache.hadoop.ozone.container.common.helpers.CommandHandlerMetrics.CommandMetricsMetricsInfo.QueueWaitingTaskCount; import static org.apache.hadoop.ozone.container.common.helpers.CommandHandlerMetrics.CommandMetricsMetricsInfo.InvocationCount; +import static org.apache.hadoop.ozone.container.common.helpers.CommandHandlerMetrics.CommandMetricsMetricsInfo.AvgRunTimeMs; import static org.apache.hadoop.ozone.container.common.helpers.CommandHandlerMetrics.CommandMetricsMetricsInfo.ThreadPoolActivePoolSize; import static org.apache.hadoop.ozone.container.common.helpers.CommandHandlerMetrics.CommandMetricsMetricsInfo.ThreadPoolMaxPoolSize; import static org.apache.hadoop.ozone.container.common.helpers.CommandHandlerMetrics.CommandMetricsMetricsInfo.CommandReceivedCount; @@ -46,6 +47,7 @@ public final class CommandHandlerMetrics implements MetricsSource { enum CommandMetricsMetricsInfo implements MetricsInfo { Command("The type of the SCM command"), TotalRunTimeMs("The total runtime of the command handler in milliseconds"), + AvgRunTimeMs("Average run time of the command handler in milliseconds"), QueueWaitingTaskCount("The number of queued tasks waiting for execution"), InvocationCount("The number of times the command handler has been invoked"), ThreadPoolActivePoolSize("The number of active threads in the thread pool"), @@ -108,6 +110,7 @@ public void getMetrics(MetricsCollector collector, boolean all) { commandHandler.getCommandType().name()); builder.addGauge(TotalRunTimeMs, commandHandler.getTotalRunTime()); + builder.addGauge(AvgRunTimeMs, commandHandler.getAverageRunTime()); builder.addGauge(QueueWaitingTaskCount, commandHandler.getQueuedCount()); builder.addGauge(InvocationCount, commandHandler.getInvocationCount()); int activePoolSize = commandHandler.getThreadPoolActivePoolSize(); diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerSet.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerSet.java index fa2999c0fa8f..5335021da9ea 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerSet.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerSet.java @@ -189,7 +189,6 @@ public int containerCount() { * Send FCR which will not contain removed containers. * * @param context StateContext - * @return */ public void handleVolumeFailures(StateContext context) { AtomicBoolean failedVolume = new AtomicBoolean(false); diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/HddsDispatcher.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/HddsDispatcher.java index c5855b38b74e..5fc971841554 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/HddsDispatcher.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/HddsDispatcher.java @@ -647,7 +647,7 @@ public Handler getHandler(ContainerProtos.ContainerType containerType) { @Override public void setClusterId(String clusterId) { - Preconditions.checkNotNull(clusterId, "clusterId Cannot be null"); + Preconditions.checkNotNull(clusterId, "clusterId cannot be null"); if (this.clusterId == null) { this.clusterId = clusterId; for (Map.Entry handlerMap : handlers.entrySet()) { diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/OpenContainerBlockMap.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/OpenContainerBlockMap.java index d6ca2d120e68..2e11cde3d9e4 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/OpenContainerBlockMap.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/OpenContainerBlockMap.java @@ -35,7 +35,7 @@ /** * Map: containerId {@literal ->} (localId {@literal ->} {@link BlockData}). * The outer container map does not entail locking for a better performance. - * The inner {@link BlockDataMap} is synchronized. + * The inner {@code BlockDataMap} is synchronized. * * This class will maintain list of open keys per container when closeContainer * command comes, it should autocommit all open keys of a open container before diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/ContainerDeletionChoosingPolicyTemplate.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/ContainerDeletionChoosingPolicyTemplate.java index c584ba790370..bb47b5b9b6ff 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/ContainerDeletionChoosingPolicyTemplate.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/ContainerDeletionChoosingPolicyTemplate.java @@ -90,7 +90,7 @@ public final List chooseContainerForBlockDeletion( /** * Abstract step for ordering the container data to be deleted. * Subclass need to implement the concrete ordering implementation - * in descending order (more prioritized -> less prioritized) + * in descending order (more prioritized -> less prioritized) * @param candidateContainers candidate containers to be ordered */ protected abstract void orderByDescendingPriority( diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/ContainerDispatcher.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/ContainerDispatcher.java index d02bae0a35ad..f075b6f67caa 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/ContainerDispatcher.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/ContainerDispatcher.java @@ -75,7 +75,6 @@ void validateContainerCommand( /** * Returns the handler for the specified containerType. * @param containerType - * @return */ Handler getHandler(ContainerProtos.ContainerType containerType); diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/EndpointStateMachine.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/EndpointStateMachine.java index a6c3b11de926..b3854e7ecd29 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/EndpointStateMachine.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/EndpointStateMachine.java @@ -234,12 +234,17 @@ public void logIfNeeded(Exception ex) { } if (missCounter == 0) { + long missedDurationSeconds = TimeUnit.MILLISECONDS.toSeconds( + this.getMissedCount() * getScmHeartbeatInterval(this.conf) + ); LOG.warn( - "Unable to communicate to {} server at {} for past {} seconds.", - serverName, - getAddress().getHostString() + ":" + getAddress().getPort(), - TimeUnit.MILLISECONDS.toSeconds(this.getMissedCount() * - getScmHeartbeatInterval(this.conf)), ex); + "Unable to communicate to {} server at {}:{} for past {} seconds.", + serverName, + address.getAddress(), + address.getPort(), + missedDurationSeconds, + ex + ); } if (LOG.isTraceEnabled()) { diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/CloseContainerCommandHandler.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/CloseContainerCommandHandler.java index bc703ac6a552..cd032d4b275d 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/CloseContainerCommandHandler.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/CloseContainerCommandHandler.java @@ -31,6 +31,8 @@ import org.apache.hadoop.hdds.protocol.proto .StorageContainerDatanodeProtocolProtos.CloseContainerCommandProto; import org.apache.hadoop.hdds.tracing.TracingUtil; +import org.apache.hadoop.metrics2.lib.MetricsRegistry; +import org.apache.hadoop.metrics2.lib.MutableRate; import org.apache.hadoop.ozone.container.common.interfaces.Container; import org.apache.hadoop.ozone.container.common.statemachine .SCMConnectionManager; @@ -58,7 +60,7 @@ public class CloseContainerCommandHandler implements CommandHandler { private final AtomicLong invocationCount = new AtomicLong(0); private final AtomicInteger queuedCount = new AtomicInteger(0); private final ThreadPoolExecutor executor; - private long totalTime; + private final MutableRate opsLatencyMs; /** * Constructs a close container command handler. @@ -72,6 +74,9 @@ public CloseContainerCommandHandler( new ThreadFactoryBuilder() .setNameFormat(threadNamePrefix + "CloseContainerThread-%d") .build()); + MetricsRegistry registry = new MetricsRegistry( + CloseContainerCommandHandler.class.getSimpleName()); + this.opsLatencyMs = registry.newRate(SCMCommandProto.Type.closeContainerCommand + "Ms"); } /** @@ -155,7 +160,7 @@ public void handle(SCMCommand command, OzoneContainer ozoneContainer, LOG.error("Can't close container #{}", containerId, e); } finally { long endTime = Time.monotonicNow(); - totalTime += endTime - startTime; + this.opsLatencyMs.add(endTime - startTime); } }, executor).whenComplete((v, e) -> queuedCount.decrementAndGet()); } @@ -204,15 +209,12 @@ public int getInvocationCount() { */ @Override public long getAverageRunTime() { - if (invocationCount.get() > 0) { - return totalTime / invocationCount.get(); - } - return 0; + return (long) this.opsLatencyMs.lastStat().mean(); } @Override public long getTotalRunTime() { - return totalTime; + return (long) this.opsLatencyMs.lastStat().total(); } @Override diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/ClosePipelineCommandHandler.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/ClosePipelineCommandHandler.java index 241abb6f4ae1..be39277fdfa6 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/ClosePipelineCommandHandler.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/ClosePipelineCommandHandler.java @@ -24,6 +24,8 @@ import org.apache.hadoop.hdds.ratis.RatisHelper; import org.apache.hadoop.hdds.scm.client.HddsClientUtils; import org.apache.hadoop.hdds.scm.pipeline.PipelineID; +import org.apache.hadoop.metrics2.lib.MetricsRegistry; +import org.apache.hadoop.metrics2.lib.MutableRate; import org.apache.hadoop.ozone.container.common.statemachine .SCMConnectionManager; import org.apache.hadoop.ozone.container.common.statemachine.StateContext; @@ -60,9 +62,9 @@ public class ClosePipelineCommandHandler implements CommandHandler { private final AtomicLong invocationCount = new AtomicLong(0); private final AtomicInteger queuedCount = new AtomicInteger(0); - private long totalTime; private final Executor executor; private final BiFunction newRaftClient; + private final MutableRate opsLatencyMs; /** * Constructs a closePipelineCommand handler. @@ -80,6 +82,9 @@ public ClosePipelineCommandHandler( Executor executor) { this.newRaftClient = newRaftClient; this.executor = executor; + MetricsRegistry registry = new MetricsRegistry( + ClosePipelineCommandHandler.class.getSimpleName()); + this.opsLatencyMs = registry.newRate(SCMCommandProto.Type.closePipelineCommand + "Ms"); } /** @@ -155,7 +160,7 @@ public void handle(SCMCommand command, OzoneContainer ozoneContainer, } } finally { long endTime = Time.monotonicNow(); - totalTime += endTime - startTime; + this.opsLatencyMs.add(endTime - startTime); } }, executor).whenComplete((v, e) -> queuedCount.decrementAndGet()); } @@ -187,15 +192,12 @@ public int getInvocationCount() { */ @Override public long getAverageRunTime() { - if (invocationCount.get() > 0) { - return totalTime / invocationCount.get(); - } - return 0; + return (long) this.opsLatencyMs.lastStat().mean(); } @Override public long getTotalRunTime() { - return totalTime; + return (long) this.opsLatencyMs.lastStat().total(); } @Override diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/CreatePipelineCommandHandler.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/CreatePipelineCommandHandler.java index 4a36a1987de6..62fc8a919d84 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/CreatePipelineCommandHandler.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/CreatePipelineCommandHandler.java @@ -30,6 +30,8 @@ import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMCommandProto; import org.apache.hadoop.hdds.ratis.RatisHelper; import org.apache.hadoop.hdds.scm.pipeline.PipelineID; +import org.apache.hadoop.metrics2.lib.MetricsRegistry; +import org.apache.hadoop.metrics2.lib.MutableRate; import org.apache.hadoop.ozone.container.common.statemachine.SCMConnectionManager; import org.apache.hadoop.ozone.container.common.statemachine.StateContext; import org.apache.hadoop.ozone.container.common.transport.server.XceiverServerSpi; @@ -59,8 +61,8 @@ public class CreatePipelineCommandHandler implements CommandHandler { private final AtomicInteger queuedCount = new AtomicInteger(0); private final BiFunction newRaftClient; - private long totalTime; private final Executor executor; + private final MutableRate opsLatencyMs; /** * Constructs a createPipelineCommand handler. @@ -75,6 +77,9 @@ public CreatePipelineCommandHandler(ConfigurationSource conf, Executor executor) { this.newRaftClient = newRaftClient; this.executor = executor; + MetricsRegistry registry = new MetricsRegistry( + CreatePipelineCommandHandler.class.getSimpleName()); + this.opsLatencyMs = registry.newRate(SCMCommandProto.Type.createPipelineCommand + "Ms"); } /** @@ -135,7 +140,7 @@ public void handle(SCMCommand command, OzoneContainer ozoneContainer, } } finally { long endTime = Time.monotonicNow(); - totalTime += endTime - startTime; + this.opsLatencyMs.add(endTime - startTime); } }, executor).whenComplete((v, e) -> queuedCount.decrementAndGet()); } @@ -167,15 +172,12 @@ public int getInvocationCount() { */ @Override public long getAverageRunTime() { - if (invocationCount.get() > 0) { - return totalTime / invocationCount.get(); - } - return 0; + return (long) this.opsLatencyMs.lastStat().mean(); } @Override public long getTotalRunTime() { - return totalTime; + return (long) this.opsLatencyMs.lastStat().total(); } @Override diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/DeleteBlocksCommandHandler.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/DeleteBlocksCommandHandler.java index bd7431c61452..136c58058210 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/DeleteBlocksCommandHandler.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/DeleteBlocksCommandHandler.java @@ -32,6 +32,8 @@ import org.apache.hadoop.hdds.protocol.proto .StorageContainerDatanodeProtocolProtos.DeletedBlocksTransaction; import org.apache.hadoop.hdds.utils.db.Table; +import org.apache.hadoop.metrics2.lib.MetricsRegistry; +import org.apache.hadoop.metrics2.lib.MutableRate; import org.apache.hadoop.ozone.container.common.helpers.BlockData; import org.apache.hadoop.ozone.container.common.helpers.BlockDeletingServiceMetrics; import org.apache.hadoop.ozone.container.common.helpers.ChunkInfoList; @@ -91,7 +93,6 @@ public class DeleteBlocksCommandHandler implements CommandHandler { private final ContainerSet containerSet; private final ConfigurationSource conf; private int invocationCount; - private long totalTime; private final ThreadPoolExecutor executor; private final LinkedBlockingQueue deleteCommandQueues; private final Daemon handlerThread; @@ -99,6 +100,7 @@ public class DeleteBlocksCommandHandler implements CommandHandler { private final BlockDeletingServiceMetrics blockDeleteMetrics; private final long tryLockTimeoutMs; private final Map schemaHandlers; + private final MutableRate opsLatencyMs; public DeleteBlocksCommandHandler(OzoneContainer container, ConfigurationSource conf, DatanodeConfiguration dnConf, @@ -121,6 +123,9 @@ public DeleteBlocksCommandHandler(OzoneContainer container, dnConf.getBlockDeleteThreads(), threadFactory); this.deleteCommandQueues = new LinkedBlockingQueue<>(dnConf.getBlockDeleteQueueLimit()); + MetricsRegistry registry = new MetricsRegistry( + DeleteBlocksCommandHandler.class.getSimpleName()); + this.opsLatencyMs = registry.newRate(SCMCommandProto.Type.deleteBlocksCommand + "Ms"); long interval = dnConf.getBlockDeleteCommandWorkerInterval().toMillis(); handlerThread = new Daemon(new DeleteCmdWorker(interval)); handlerThread.start(); @@ -354,10 +359,11 @@ private void processCmd(DeleteCmdInfo cmd) { DeletedContainerBlocksSummary summary = DeletedContainerBlocksSummary.getFrom(containerBlocks); LOG.info("Summary of deleting container blocks, numOfTransactions={}, " - + "numOfContainers={}, numOfBlocks={}", + + "numOfContainers={}, numOfBlocks={}, commandId={}.", summary.getNumOfTxs(), summary.getNumOfContainers(), - summary.getNumOfBlocks()); + summary.getNumOfBlocks(), + cmd.getCmd().getId()); if (LOG.isDebugEnabled()) { LOG.debug("Start to delete container blocks, TXIDs={}", summary.getTxIDSummary()); @@ -384,7 +390,8 @@ private void processCmd(DeleteCmdInfo cmd) { LOG.debug("Sending following block deletion ACK to SCM"); for (DeleteBlockTransactionResult result : blockDeletionACK .getResultsList()) { - LOG.debug("{} : {}", result.getTxID(), result.getSuccess()); + LOG.debug("TxId = {} : ContainerId = {} : {}", + result.getTxID(), result.getContainerID(), result.getSuccess()); } } } @@ -403,7 +410,7 @@ private void processCmd(DeleteCmdInfo cmd) { }; updateCommandStatus(cmd.getContext(), cmd.getCmd(), statusUpdater, LOG); long endTime = Time.monotonicNow(); - totalTime += endTime - startTime; + this.opsLatencyMs.add(endTime - startTime); invocationCount++; } } @@ -666,15 +673,12 @@ public int getInvocationCount() { @Override public long getAverageRunTime() { - if (invocationCount > 0) { - return totalTime / invocationCount; - } - return 0; + return (long) this.opsLatencyMs.lastStat().mean(); } @Override public long getTotalRunTime() { - return totalTime; + return (long) this.opsLatencyMs.lastStat().total(); } @Override diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/DeleteContainerCommandHandler.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/DeleteContainerCommandHandler.java index b76e306e1c07..59aaacc1c802 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/DeleteContainerCommandHandler.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/DeleteContainerCommandHandler.java @@ -22,6 +22,8 @@ import java.util.concurrent.RejectedExecutionException; import org.apache.hadoop.hdds.protocol.proto .StorageContainerDatanodeProtocolProtos.SCMCommandProto; +import org.apache.hadoop.metrics2.lib.MetricsRegistry; +import org.apache.hadoop.metrics2.lib.MutableRate; import org.apache.hadoop.ozone.container.common.statemachine .SCMConnectionManager; import org.apache.hadoop.ozone.container.common.statemachine.StateContext; @@ -39,7 +41,6 @@ import java.util.concurrent.ThreadPoolExecutor; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicInteger; -import java.util.concurrent.atomic.AtomicLong; /** * Handler to process the DeleteContainerCommand from SCM. @@ -51,10 +52,10 @@ public class DeleteContainerCommandHandler implements CommandHandler { private final AtomicInteger invocationCount = new AtomicInteger(0); private final AtomicInteger timeoutCount = new AtomicInteger(0); - private final AtomicLong totalTime = new AtomicLong(0); private final ThreadPoolExecutor executor; private final Clock clock; private int maxQueueSize; + private final MutableRate opsLatencyMs; public DeleteContainerCommandHandler( int threadPoolSize, Clock clock, int queueSize, String threadNamePrefix) { @@ -73,6 +74,9 @@ protected DeleteContainerCommandHandler(Clock clock, this.executor = executor; this.clock = clock; maxQueueSize = queueSize; + MetricsRegistry registry = new MetricsRegistry( + DeleteContainerCommandHandler.class.getSimpleName()); + this.opsLatencyMs = registry.newRate(SCMCommandProto.Type.deleteContainerCommand + "Ms"); } @Override public void handle(final SCMCommand command, @@ -124,7 +128,7 @@ private void handleInternal(SCMCommand command, StateContext context, } catch (IOException e) { LOG.error("Exception occurred while deleting the container.", e); } finally { - totalTime.getAndAdd(Time.monotonicNow() - startTime); + this.opsLatencyMs.add(Time.monotonicNow() - startTime); } } @@ -149,14 +153,12 @@ public int getTimeoutCount() { @Override public long getAverageRunTime() { - final int invocations = invocationCount.get(); - return invocations == 0 ? - 0 : totalTime.get() / invocations; + return (long) this.opsLatencyMs.lastStat().mean(); } @Override public long getTotalRunTime() { - return totalTime.get(); + return (long) this.opsLatencyMs.lastStat().total(); } @Override diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/FinalizeNewLayoutVersionCommandHandler.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/FinalizeNewLayoutVersionCommandHandler.java index bd7ec5710d91..77e152447b95 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/FinalizeNewLayoutVersionCommandHandler.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/FinalizeNewLayoutVersionCommandHandler.java @@ -20,6 +20,8 @@ import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.FinalizeNewLayoutVersionCommandProto; import org.apache.hadoop.hdds.protocol.proto .StorageContainerDatanodeProtocolProtos.SCMCommandProto; +import org.apache.hadoop.metrics2.lib.MetricsRegistry; +import org.apache.hadoop.metrics2.lib.MutableRate; import org.apache.hadoop.ozone.container.common.statemachine.DatanodeStateMachine; import org.apache.hadoop.ozone.container.common.statemachine .SCMConnectionManager; @@ -42,12 +44,15 @@ public class FinalizeNewLayoutVersionCommandHandler implements CommandHandler { LoggerFactory.getLogger(FinalizeNewLayoutVersionCommandHandler.class); private AtomicLong invocationCount = new AtomicLong(0); - private long totalTime; + private final MutableRate opsLatencyMs; /** * Constructs a FinalizeNewLayoutVersionCommandHandler. */ public FinalizeNewLayoutVersionCommandHandler() { + MetricsRegistry registry = new MetricsRegistry( + FinalizeNewLayoutVersionCommandHandler.class.getSimpleName()); + this.opsLatencyMs = registry.newRate(SCMCommandProto.Type.finalizeNewLayoutVersionCommand + "Ms"); } /** @@ -82,7 +87,7 @@ public void handle(SCMCommand command, OzoneContainer ozoneContainer, LOG.error("Exception during finalization.", e); } finally { long endTime = Time.monotonicNow(); - totalTime += endTime - startTime; + this.opsLatencyMs.add(endTime - startTime); } } @@ -113,15 +118,12 @@ public int getInvocationCount() { */ @Override public long getAverageRunTime() { - if (invocationCount.get() > 0) { - return totalTime / invocationCount.get(); - } - return 0; + return (long) this.opsLatencyMs.lastStat().mean(); } @Override public long getTotalRunTime() { - return totalTime; + return (long) this.opsLatencyMs.lastStat().total(); } @Override diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/ReconstructECContainersCommandHandler.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/ReconstructECContainersCommandHandler.java index 602687d7a003..030d169e9b84 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/ReconstructECContainersCommandHandler.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/ReconstructECContainersCommandHandler.java @@ -36,6 +36,7 @@ public class ReconstructECContainersCommandHandler implements CommandHandler { private final ReplicationSupervisor supervisor; private final ECReconstructionCoordinator coordinator; private final ConfigurationSource conf; + private String metricsName; public ReconstructECContainersCommandHandler(ConfigurationSource conf, ReplicationSupervisor supervisor, @@ -52,8 +53,16 @@ public void handle(SCMCommand command, OzoneContainer container, (ReconstructECContainersCommand) command; ECReconstructionCommandInfo reconstructionCommandInfo = new ECReconstructionCommandInfo(ecContainersCommand); - this.supervisor.addTask(new ECReconstructionCoordinatorTask( - coordinator, reconstructionCommandInfo)); + ECReconstructionCoordinatorTask task = new ECReconstructionCoordinatorTask( + coordinator, reconstructionCommandInfo); + if (this.metricsName == null) { + this.metricsName = task.getMetricName(); + } + this.supervisor.addTask(task); + } + + public String getMetricsName() { + return this.metricsName; } @Override @@ -63,23 +72,26 @@ public Type getCommandType() { @Override public int getInvocationCount() { - return 0; + return this.metricsName == null ? 0 : (int) this.supervisor + .getReplicationRequestCount(metricsName); } @Override public long getAverageRunTime() { - return 0; + return this.metricsName == null ? 0 : (int) this.supervisor + .getReplicationRequestAvgTime(metricsName); } @Override public long getTotalRunTime() { - return 0; + return this.metricsName == null ? 0 : this.supervisor + .getReplicationRequestTotalTime(metricsName); } @Override public int getQueuedCount() { - return supervisor - .getInFlightReplications(ECReconstructionCoordinatorTask.class); + return this.metricsName == null ? 0 : (int) this.supervisor + .getReplicationQueuedCount(metricsName); } public ConfigurationSource getConf() { diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/RefreshVolumeUsageCommandHandler.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/RefreshVolumeUsageCommandHandler.java index 3c14b2fb1614..1ab31ba1c413 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/RefreshVolumeUsageCommandHandler.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/RefreshVolumeUsageCommandHandler.java @@ -18,6 +18,8 @@ import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos; import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMCommandProto.Type; +import org.apache.hadoop.metrics2.lib.MetricsRegistry; +import org.apache.hadoop.metrics2.lib.MutableRate; import org.apache.hadoop.ozone.container.common.statemachine.SCMConnectionManager; import org.apache.hadoop.ozone.container.common.statemachine.StateContext; import org.apache.hadoop.ozone.container.ozoneimpl.OzoneContainer; @@ -27,7 +29,6 @@ import org.slf4j.LoggerFactory; import java.util.concurrent.atomic.AtomicInteger; -import java.util.concurrent.atomic.AtomicLong; /** * Command handler to refresh usage info of all volumes. @@ -38,9 +39,12 @@ public class RefreshVolumeUsageCommandHandler implements CommandHandler { LoggerFactory.getLogger(RefreshVolumeUsageCommandHandler.class); private final AtomicInteger invocationCount = new AtomicInteger(0); - private final AtomicLong totalTime = new AtomicLong(0); + private final MutableRate opsLatencyMs; public RefreshVolumeUsageCommandHandler() { + MetricsRegistry registry = new MetricsRegistry( + RefreshVolumeUsageCommandHandler.class.getSimpleName()); + this.opsLatencyMs = registry.newRate(Type.refreshVolumeUsageInfo + "Ms"); } @Override @@ -50,7 +54,7 @@ public void handle(SCMCommand command, OzoneContainer container, invocationCount.incrementAndGet(); final long startTime = Time.monotonicNow(); container.getVolumeSet().refreshAllVolumeUsage(); - totalTime.getAndAdd(Time.monotonicNow() - startTime); + this.opsLatencyMs.add(Time.monotonicNow() - startTime); } @Override @@ -66,14 +70,12 @@ public int getInvocationCount() { @Override public long getAverageRunTime() { - final int invocations = invocationCount.get(); - return invocations == 0 ? - 0 : totalTime.get() / invocations; + return (long) this.opsLatencyMs.lastStat().mean(); } @Override public long getTotalRunTime() { - return totalTime.get(); + return (long) this.opsLatencyMs.lastStat().total(); } @Override diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/ReplicateContainerCommandHandler.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/ReplicateContainerCommandHandler.java index 21b26339e238..242a4eb74bed 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/ReplicateContainerCommandHandler.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/ReplicateContainerCommandHandler.java @@ -43,29 +43,28 @@ public class ReplicateContainerCommandHandler implements CommandHandler { static final Logger LOG = LoggerFactory.getLogger(ReplicateContainerCommandHandler.class); - private int invocationCount; - - private long totalTime; - - private ConfigurationSource conf; - private ReplicationSupervisor supervisor; private ContainerReplicator downloadReplicator; private ContainerReplicator pushReplicator; + private String metricsName; + public ReplicateContainerCommandHandler( ConfigurationSource conf, ReplicationSupervisor supervisor, ContainerReplicator downloadReplicator, ContainerReplicator pushReplicator) { - this.conf = conf; this.supervisor = supervisor; this.downloadReplicator = downloadReplicator; this.pushReplicator = pushReplicator; } + public String getMetricsName() { + return this.metricsName; + } + @Override public void handle(SCMCommand command, OzoneContainer container, StateContext context, SCMConnectionManager connectionManager) { @@ -86,12 +85,16 @@ public void handle(SCMCommand command, OzoneContainer container, downloadReplicator : pushReplicator; ReplicationTask task = new ReplicationTask(replicateCommand, replicator); + if (metricsName == null) { + metricsName = task.getMetricName(); + } supervisor.addTask(task); } @Override public int getQueuedCount() { - return supervisor.getInFlightReplications(ReplicationTask.class); + return this.metricsName == null ? 0 : (int) this.supervisor + .getReplicationQueuedCount(metricsName); } @Override @@ -101,19 +104,19 @@ public SCMCommandProto.Type getCommandType() { @Override public int getInvocationCount() { - return this.invocationCount; + return this.metricsName == null ? 0 : (int) this.supervisor + .getReplicationRequestCount(metricsName); } @Override public long getAverageRunTime() { - if (invocationCount > 0) { - return totalTime / invocationCount; - } - return 0; + return this.metricsName == null ? 0 : (int) this.supervisor + .getReplicationRequestAvgTime(metricsName); } @Override public long getTotalRunTime() { - return totalTime; + return this.metricsName == null ? 0 : this.supervisor + .getReplicationRequestTotalTime(metricsName); } } diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/SetNodeOperationalStateCommandHandler.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/SetNodeOperationalStateCommandHandler.java index 6f7f4414eeb0..335636247950 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/SetNodeOperationalStateCommandHandler.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/SetNodeOperationalStateCommandHandler.java @@ -21,8 +21,10 @@ import org.apache.hadoop.hdds.conf.ConfigurationSource; import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; -import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos; +import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SetNodeOperationalStateCommandProto; import org.apache.hadoop.hdds.utils.HddsServerUtil; +import org.apache.hadoop.metrics2.lib.MetricsRegistry; +import org.apache.hadoop.metrics2.lib.MutableRate; import org.apache.hadoop.ozone.container.common.helpers.ContainerUtils; import org.apache.hadoop.ozone.container.common.statemachine.SCMConnectionManager; import org.apache.hadoop.ozone.container.common.statemachine.StateContext; @@ -39,7 +41,6 @@ import java.io.File; import java.io.IOException; import java.util.concurrent.atomic.AtomicInteger; -import java.util.concurrent.atomic.AtomicLong; import java.util.function.Consumer; @@ -54,7 +55,7 @@ public class SetNodeOperationalStateCommandHandler implements CommandHandler { private final ConfigurationSource conf; private final Consumer replicationSupervisor; private final AtomicInteger invocationCount = new AtomicInteger(0); - private final AtomicLong totalTime = new AtomicLong(0); + private final MutableRate opsLatencyMs; /** * Set Node State command handler. @@ -65,6 +66,9 @@ public SetNodeOperationalStateCommandHandler(ConfigurationSource conf, Consumer replicationSupervisor) { this.conf = conf; this.replicationSupervisor = replicationSupervisor; + MetricsRegistry registry = new MetricsRegistry( + SetNodeOperationalStateCommandHandler.class.getSimpleName()); + this.opsLatencyMs = registry.newRate(Type.setNodeOperationalStateCommand + "Ms"); } /** @@ -80,9 +84,6 @@ public void handle(SCMCommand command, OzoneContainer container, StateContext context, SCMConnectionManager connectionManager) { long startTime = Time.monotonicNow(); invocationCount.incrementAndGet(); - StorageContainerDatanodeProtocolProtos.SetNodeOperationalStateCommandProto - setNodeCmdProto = null; - if (command.getType() != Type.setNodeOperationalStateCommand) { LOG.warn("Skipping handling command, expected command " + "type {} but found {}", @@ -91,7 +92,7 @@ public void handle(SCMCommand command, OzoneContainer container, } SetNodeOperationalStateCommand setNodeCmd = (SetNodeOperationalStateCommand) command; - setNodeCmdProto = setNodeCmd.getProto(); + SetNodeOperationalStateCommandProto setNodeCmdProto = setNodeCmd.getProto(); DatanodeDetails dni = context.getParent().getDatanodeDetails(); HddsProtos.NodeOperationalState state = setNodeCmdProto.getNodeOperationalState(); @@ -106,7 +107,7 @@ public void handle(SCMCommand command, OzoneContainer container, // handler interface. } replicationSupervisor.accept(state); - totalTime.addAndGet(Time.monotonicNow() - startTime); + this.opsLatencyMs.add(Time.monotonicNow() - startTime); } // TODO - this duplicates code in HddsDatanodeService and InitDatanodeState @@ -125,8 +126,7 @@ private void persistDatanodeDetails(DatanodeDetails dnDetails) * @return Type */ @Override - public StorageContainerDatanodeProtocolProtos.SCMCommandProto.Type - getCommandType() { + public Type getCommandType() { return Type.setNodeOperationalStateCommand; } @@ -147,14 +147,12 @@ public int getInvocationCount() { */ @Override public long getAverageRunTime() { - final int invocations = invocationCount.get(); - return invocations == 0 ? - 0 : totalTime.get() / invocations; + return (long) this.opsLatencyMs.lastStat().mean(); } @Override public long getTotalRunTime() { - return totalTime.get(); + return (long) this.opsLatencyMs.lastStat().total(); } @Override diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/HeartbeatEndpointTask.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/HeartbeatEndpointTask.java index 44f0eae49ead..25db14a1012b 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/HeartbeatEndpointTask.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/HeartbeatEndpointTask.java @@ -489,7 +489,7 @@ public Builder setEndpointStateMachine(EndpointStateMachine rpcEndPoint) { /** * Sets the LayoutVersionManager. * - * @param versionMgr - config + * @param lvm config * @return Builder */ public Builder setLayoutVersionManager(HDDSLayoutVersionManager lvm) { diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/RegisterEndpointTask.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/RegisterEndpointTask.java index 71f95cc4d329..969756b40f8f 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/RegisterEndpointTask.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/RegisterEndpointTask.java @@ -244,7 +244,7 @@ public Builder setConfig(ConfigurationSource config) { /** * Sets the LayoutVersionManager. * - * @param versionMgr - config + * @param lvm config * @return Builder. */ public Builder setLayoutVersionManager(HDDSLayoutVersionManager lvm) { diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/VersionEndpointTask.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/VersionEndpointTask.java index e702b1e6e158..968c9b9a6e66 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/VersionEndpointTask.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/VersionEndpointTask.java @@ -17,6 +17,7 @@ package org.apache.hadoop.ozone.container.common.states.endpoint; import java.io.IOException; +import java.net.BindException; import java.util.concurrent.Callable; import org.apache.hadoop.hdds.conf.ConfigurationSource; @@ -104,7 +105,7 @@ public EndpointStateMachine.EndPointStates call() throws Exception { LOG.debug("Cannot execute GetVersion task as endpoint state machine " + "is in {} state", rpcEndPoint.getState()); } - } catch (DiskOutOfSpaceException ex) { + } catch (DiskOutOfSpaceException | BindException ex) { rpcEndPoint.setState(EndpointStateMachine.EndPointStates.SHUTDOWN); } catch (IOException ex) { rpcEndPoint.logIfNeeded(ex); diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/XceiverServerGrpc.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/XceiverServerGrpc.java index ad9c5c9d9ca0..42daaa94be39 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/XceiverServerGrpc.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/XceiverServerGrpc.java @@ -19,6 +19,7 @@ package org.apache.hadoop.ozone.container.common.transport.server; import java.io.IOException; +import java.net.BindException; import java.util.Collections; import java.util.List; import java.util.UUID; @@ -185,7 +186,16 @@ public HddsProtos.ReplicationType getServerType() { @Override public void start() throws IOException { if (!isStarted) { - server.start(); + try { + server.start(); + } catch (IOException e) { + LOG.error("Error while starting the server", e); + if (e.getMessage().contains("Failed to bind to address")) { + throw new BindException(e.getMessage()); + } else { + throw e; + } + } int realPort = server.getPort(); if (port == 0) { diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java index 9dc6af19353a..1048ec5092c7 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java @@ -41,8 +41,9 @@ import java.util.concurrent.ThreadPoolExecutor; import java.util.concurrent.atomic.AtomicBoolean; import java.util.function.Consumer; -import java.util.stream.Collectors; +import com.google.common.annotations.VisibleForTesting; +import com.google.common.base.Preconditions; import com.google.common.util.concurrent.ThreadFactoryBuilder; import org.apache.hadoop.hdds.HddsUtils; import org.apache.hadoop.hdds.conf.ConfigurationSource; @@ -69,19 +70,17 @@ import org.apache.hadoop.ozone.container.keyvalue.impl.KeyValueStreamDataChannel; import org.apache.hadoop.ozone.container.ozoneimpl.ContainerController; import org.apache.hadoop.util.Time; - -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Preconditions; import org.apache.ratis.proto.RaftProtos; -import org.apache.ratis.proto.RaftProtos.StateMachineEntryProto; import org.apache.ratis.proto.RaftProtos.LogEntryProto; import org.apache.ratis.proto.RaftProtos.RaftPeerRole; import org.apache.ratis.proto.RaftProtos.RoleInfoProto; +import org.apache.ratis.proto.RaftProtos.StateMachineEntryProto; import org.apache.ratis.proto.RaftProtos.StateMachineLogEntryProto; import org.apache.ratis.protocol.Message; import org.apache.ratis.protocol.RaftClientRequest; import org.apache.ratis.protocol.RaftGroupId; import org.apache.ratis.protocol.RaftGroupMemberId; +import org.apache.ratis.protocol.RaftPeer; import org.apache.ratis.protocol.RaftPeerId; import org.apache.ratis.protocol.exceptions.StateMachineException; import org.apache.ratis.server.RaftServer; @@ -97,10 +96,10 @@ import org.apache.ratis.thirdparty.com.google.protobuf.ByteString; import org.apache.ratis.thirdparty.com.google.protobuf.InvalidProtocolBufferException; import org.apache.ratis.thirdparty.com.google.protobuf.TextFormat; +import org.apache.ratis.util.JavaUtils; import org.apache.ratis.util.LifeCycle; import org.apache.ratis.util.TaskQueue; import org.apache.ratis.util.function.CheckedSupplier; -import org.apache.ratis.util.JavaUtils; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -184,7 +183,6 @@ long getStartTime() { private final SimpleStateMachineStorage storage = new SimpleStateMachineStorage(); - private final RaftGroupId gid; private final ContainerDispatcher dispatcher; private final ContainerController containerController; private final XceiverServerRatis ratisServer; @@ -219,7 +217,6 @@ public ContainerStateMachine(HddsDatanodeService hddsDatanodeService, RaftGroupI ConfigurationSource conf, String threadNamePrefix) { this.datanodeService = hddsDatanodeService; - this.gid = gid; this.dispatcher = dispatcher; this.containerController = containerController; this.ratisServer = ratisServer; @@ -283,8 +280,9 @@ public void initialize( throws IOException { super.initialize(server, id, raftStorage); storage.init(raftStorage); - ratisServer.notifyGroupAdd(gid); + ratisServer.notifyGroupAdd(id); + LOG.info("{}: initialize {}", server.getId(), id); loadSnapshot(storage.getLatestSnapshot()); } @@ -293,7 +291,7 @@ private long loadSnapshot(SingleFileSnapshotInfo snapshot) if (snapshot == null) { TermIndex empty = TermIndex.valueOf(0, RaftLog.INVALID_LOG_INDEX); LOG.info("{}: The snapshot info is null. Setting the last applied index " + - "to:{}", gid, empty); + "to:{}", getGroupId(), empty); setLastAppliedTermIndex(empty); return empty.getIndex(); } @@ -301,7 +299,7 @@ private long loadSnapshot(SingleFileSnapshotInfo snapshot) final File snapshotFile = snapshot.getFile().getPath().toFile(); final TermIndex last = SimpleStateMachineStorage.getTermIndexFromSnapshotFile(snapshotFile); - LOG.info("{}: Setting the last applied index to {}", gid, last); + LOG.info("{}: Setting the last applied index to {}", getGroupId(), last); setLastAppliedTermIndex(last); // initialize the dispatcher with snapshot so that it build the missing @@ -351,7 +349,7 @@ public long takeSnapshot() throws IOException { long startTime = Time.monotonicNow(); if (!isStateMachineHealthy()) { String msg = - "Failed to take snapshot " + " for " + gid + " as the stateMachine" + "Failed to take snapshot " + " for " + getGroupId() + " as the stateMachine" + " is unhealthy. The last applied index is at " + ti; StateMachineException sme = new StateMachineException(msg); LOG.error(msg); @@ -360,19 +358,19 @@ public long takeSnapshot() throws IOException { if (ti != null && ti.getIndex() != RaftLog.INVALID_LOG_INDEX) { final File snapshotFile = storage.getSnapshotFile(ti.getTerm(), ti.getIndex()); - LOG.info("{}: Taking a snapshot at:{} file {}", gid, ti, snapshotFile); + LOG.info("{}: Taking a snapshot at:{} file {}", getGroupId(), ti, snapshotFile); try (FileOutputStream fos = new FileOutputStream(snapshotFile)) { persistContainerSet(fos); fos.flush(); // make sure the snapshot file is synced fos.getFD().sync(); } catch (IOException ioe) { - LOG.error("{}: Failed to write snapshot at:{} file {}", gid, ti, + LOG.error("{}: Failed to write snapshot at:{} file {}", getGroupId(), ti, snapshotFile); throw ioe; } LOG.info("{}: Finished taking a snapshot at:{} file:{} took: {} ms", - gid, ti, snapshotFile, (Time.monotonicNow() - startTime)); + getGroupId(), ti, snapshotFile, (Time.monotonicNow() - startTime)); return ti.getIndex(); } return -1; @@ -386,7 +384,7 @@ public TransactionContext startTransaction(LogEntryProto entry, RaftPeerRole rol final StateMachineLogEntryProto stateMachineLogEntry = entry.getStateMachineLogEntry(); final ContainerCommandRequestProto logProto; try { - logProto = getContainerCommandRequestProto(gid, stateMachineLogEntry.getLogData()); + logProto = getContainerCommandRequestProto(getGroupId(), stateMachineLogEntry.getLogData()); } catch (InvalidProtocolBufferException e) { trx.setException(e); return trx; @@ -413,7 +411,7 @@ public TransactionContext startTransaction(RaftClientRequest request) long startTime = Time.monotonicNowNanos(); final ContainerCommandRequestProto proto = message2ContainerCommandRequestProto(request.getMessage()); - Preconditions.checkArgument(request.getRaftGroupId().equals(gid)); + Preconditions.checkArgument(request.getRaftGroupId().equals(getGroupId())); final TransactionContext.Builder builder = TransactionContext.newBuilder() .setClientRequest(request) @@ -449,7 +447,7 @@ public TransactionContext startTransaction(RaftClientRequest request) final WriteChunkRequestProto.Builder commitWriteChunkProto = WriteChunkRequestProto.newBuilder(write) .clearData(); protoBuilder.setWriteChunk(commitWriteChunkProto) - .setPipelineID(gid.getUuid().toString()) + .setPipelineID(getGroupId().getUuid().toString()) .setTraceID(proto.getTraceID()); builder.setStateMachineData(write.getData()); @@ -491,20 +489,20 @@ private static ContainerCommandRequestProto getContainerCommandRequestProto( private ContainerCommandRequestProto message2ContainerCommandRequestProto( Message message) throws InvalidProtocolBufferException { - return ContainerCommandRequestMessage.toProto(message.getContent(), gid); + return ContainerCommandRequestMessage.toProto(message.getContent(), getGroupId()); } private ContainerCommandResponseProto dispatchCommand( ContainerCommandRequestProto requestProto, DispatcherContext context) { if (LOG.isTraceEnabled()) { - LOG.trace("{}: dispatch {} containerID={} pipelineID={} traceID={}", gid, + LOG.trace("{}: dispatch {} containerID={} pipelineID={} traceID={}", getGroupId(), requestProto.getCmdType(), requestProto.getContainerID(), requestProto.getPipelineID(), requestProto.getTraceID()); } ContainerCommandResponseProto response = dispatcher.dispatch(requestProto, context); if (LOG.isTraceEnabled()) { - LOG.trace("{}: response {}", gid, response); + LOG.trace("{}: response {}", getGroupId(), response); } return response; } @@ -531,7 +529,7 @@ private CompletableFuture writeStateMachineData( RaftServer server = ratisServer.getServer(); Preconditions.checkArgument(!write.getData().isEmpty()); try { - if (server.getDivision(gid).getInfo().isLeader()) { + if (server.getDivision(getGroupId()).getInfo().isLeader()) { stateMachineDataCache.put(entryIndex, write.getData()); } } catch (InterruptedException ioe) { @@ -559,7 +557,7 @@ private CompletableFuture writeStateMachineData( return dispatchCommand(requestProto, context); } catch (Exception e) { LOG.error("{}: writeChunk writeStateMachineData failed: blockId" + - "{} logIndex {} chunkName {}", gid, write.getBlockID(), + "{} logIndex {} chunkName {}", getGroupId(), write.getBlockID(), entryIndex, write.getChunkData().getChunkName(), e); metrics.incNumWriteDataFails(); // write chunks go in parallel. It's possible that one write chunk @@ -573,7 +571,7 @@ private CompletableFuture writeStateMachineData( writeChunkFutureMap.put(entryIndex, writeChunkFuture); if (LOG.isDebugEnabled()) { LOG.debug("{}: writeChunk writeStateMachineData : blockId" + - "{} logIndex {} chunkName {}", gid, write.getBlockID(), + "{} logIndex {} chunkName {}", getGroupId(), write.getBlockID(), entryIndex, write.getChunkData().getChunkName()); } // Remove the future once it finishes execution from the @@ -587,7 +585,7 @@ private CompletableFuture writeStateMachineData( && r.getResult() != ContainerProtos.Result.CHUNK_FILE_INCONSISTENCY) { StorageContainerException sce = new StorageContainerException(r.getMessage(), r.getResult()); - LOG.error(gid + ": writeChunk writeStateMachineData failed: blockId" + + LOG.error(getGroupId() + ": writeChunk writeStateMachineData failed: blockId" + write.getBlockID() + " logIndex " + entryIndex + " chunkName " + write.getChunkData().getChunkName() + " Error message: " + r.getMessage() + " Container Result: " + r.getResult()); @@ -601,7 +599,7 @@ private CompletableFuture writeStateMachineData( metrics.incNumBytesWrittenCount( requestProto.getWriteChunk().getChunkData().getLen()); if (LOG.isDebugEnabled()) { - LOG.debug(gid + + LOG.debug(getGroupId() + ": writeChunk writeStateMachineData completed: blockId" + write.getBlockID() + " logIndex " + entryIndex + " chunkName " + write.getChunkData().getChunkName()); @@ -622,7 +620,7 @@ private StateMachine.DataChannel getStreamDataChannel( DispatcherContext context) throws StorageContainerException { if (LOG.isDebugEnabled()) { LOG.debug("{}: getStreamDataChannel {} containerID={} pipelineID={} " + - "traceID={}", gid, requestProto.getCmdType(), + "traceID={}", getGroupId(), requestProto.getCmdType(), requestProto.getContainerID(), requestProto.getPipelineID(), requestProto.getTraceID()); } @@ -705,9 +703,9 @@ private ExecutorService getChunkExecutor(WriteChunkRequestProto req) { } /** - * {@link #writeStateMachineData(ContainerCommandRequestProto, long, long, long)} + * {@link #writeStateMachineData} * calls are not synchronized with each other - * and also with {@link #applyTransaction(TransactionContext)}. + * and also with {@code applyTransaction(TransactionContext)}. */ @Override public CompletableFuture write(LogEntryProto entry, TransactionContext trx) { @@ -781,7 +779,7 @@ private ByteString readStateMachineData( new StorageContainerException(response.getMessage(), response.getResult()); LOG.error("gid {} : ReadStateMachine failed. cmd {} logIndex {} msg : " - + "{} Container Result: {}", gid, response.getCmdType(), index, + + "{} Container Result: {}", getGroupId(), response.getCmdType(), index, response.getMessage(), response.getResult()); stateMachineHealthy.set(false); throw sce; @@ -817,11 +815,9 @@ private ByteString readStateMachineData( */ @Override public CompletableFuture flush(long index) { - List> futureList = - writeChunkFutureMap.entrySet().stream().filter(x -> x.getKey() <= index) - .map(Map.Entry::getValue).collect(Collectors.toList()); return CompletableFuture.allOf( - futureList.toArray(new CompletableFuture[futureList.size()])); + writeChunkFutureMap.entrySet().stream().filter(x -> x.getKey() <= index) + .map(Map.Entry::getValue).toArray(CompletableFuture[]::new)); } /** @@ -858,7 +854,7 @@ public CompletableFuture read(LogEntryProto entry, TransactionContex .map(TransactionContext::getStateMachineContext) .orElse(null); final ContainerCommandRequestProto requestProto = context != null ? context.getLogProto() - : getContainerCommandRequestProto(gid, entry.getStateMachineLogEntry().getLogData()); + : getContainerCommandRequestProto(getGroupId(), entry.getStateMachineLogEntry().getLogData()); if (requestProto.getCmdType() != Type.WriteChunk) { throw new IllegalStateException("Cmd type:" + requestProto.getCmdType() @@ -876,7 +872,7 @@ public CompletableFuture read(LogEntryProto entry, TransactionContex return future; } catch (Exception e) { metrics.incNumReadStateMachineFails(); - LOG.error("{} unable to read stateMachineData:", gid, e); + LOG.error("{} unable to read stateMachineData:", getGroupId(), e); return completeExceptionally(e); } } @@ -922,7 +918,7 @@ public void notifyServerShutdown(RaftProtos.RoleInfoProto roleInfo, boolean allS // from `HddsDatanodeService.stop()`, otherwise, it indicates this `close` originates from ratis. if (allServer) { if (datanodeService != null && !datanodeService.isStopped()) { - LOG.info("{} is closed by ratis", gid); + LOG.info("{} is closed by ratis", getGroupId()); if (semaphore.tryAcquire()) { // run with a different thread, so this raft group can be closed Runnable runnable = () -> { @@ -954,7 +950,7 @@ public void notifyServerShutdown(RaftProtos.RoleInfoProto roleInfo, boolean allS CompletableFuture.runAsync(runnable); } } else { - LOG.info("{} is closed by HddsDatanodeService", gid); + LOG.info("{} is closed by HddsDatanodeService", getGroupId()); } } } @@ -985,7 +981,7 @@ private CompletableFuture applyTransaction( private void removeStateMachineDataIfNeeded(long index) { if (waitOnBothFollowers) { try { - RaftServer.Division division = ratisServer.getServer().getDivision(gid); + RaftServer.Division division = ratisServer.getServer().getDivision(getGroupId()); if (division.getInfo().isLeader()) { long minIndex = Arrays.stream(division.getInfo() .getFollowerNextIndices()).min().getAsLong(); @@ -1043,7 +1039,7 @@ public CompletableFuture applyTransaction(TransactionContext trx) { CompletableFuture applyTransactionFuture = new CompletableFuture<>(); final Consumer exceptionHandler = e -> { - LOG.error(gid + ": failed to applyTransaction at logIndex " + index + LOG.error(getGroupId() + ": failed to applyTransaction at logIndex " + index + " for " + requestProto.getCmdType(), e); stateMachineHealthy.compareAndSet(true, false); metrics.incNumApplyTransactionsFails(); @@ -1071,7 +1067,7 @@ public CompletableFuture applyTransaction(TransactionContext trx) { new StorageContainerException(r.getMessage(), r.getResult()); LOG.error( "gid {} : ApplyTransaction failed. cmd {} logIndex {} msg : " - + "{} Container Result: {}", gid, r.getCmdType(), index, + + "{} Container Result: {}", getGroupId(), r.getCmdType(), index, r.getMessage(), r.getResult()); metrics.incNumApplyTransactionsFails(); // Since the applyTransaction now is completed exceptionally, @@ -1080,12 +1076,12 @@ public CompletableFuture applyTransaction(TransactionContext trx) { // shutdown. applyTransactionFuture.completeExceptionally(sce); stateMachineHealthy.compareAndSet(true, false); - ratisServer.handleApplyTransactionFailure(gid, trx.getServerRole()); + ratisServer.handleApplyTransactionFailure(getGroupId(), trx.getServerRole()); } else { if (LOG.isDebugEnabled()) { LOG.debug( "gid {} : ApplyTransaction completed. cmd {} logIndex {} msg : " - + "{} Container Result: {}", gid, r.getCmdType(), index, + + "{} Container Result: {}", getGroupId(), r.getCmdType(), index, r.getMessage(), r.getResult()); } if (cmdType == Type.WriteChunk || cmdType == Type.PutSmallFile) { @@ -1162,26 +1158,26 @@ public void evictStateMachineCache() { } @Override - public void notifyFollowerSlowness(RoleInfoProto roleInfoProto) { - ratisServer.handleNodeSlowness(gid, roleInfoProto); + public void notifyFollowerSlowness(RoleInfoProto roleInfoProto, RaftPeer follower) { + ratisServer.handleFollowerSlowness(getGroupId(), roleInfoProto, follower); } @Override public void notifyExtendedNoLeader(RoleInfoProto roleInfoProto) { - ratisServer.handleNoLeader(gid, roleInfoProto); + ratisServer.handleNoLeader(getGroupId(), roleInfoProto); } @Override public void notifyLogFailed(Throwable t, LogEntryProto failedEntry) { - LOG.error("{}: {} {}", gid, TermIndex.valueOf(failedEntry), + LOG.error("{}: {} {}", getGroupId(), TermIndex.valueOf(failedEntry), toStateMachineLogEntryString(failedEntry.getStateMachineLogEntry()), t); - ratisServer.handleNodeLogFailure(gid, t); + ratisServer.handleNodeLogFailure(getGroupId(), t); } @Override public CompletableFuture notifyInstallSnapshotFromLeader( RoleInfoProto roleInfoProto, TermIndex firstTermIndexInLog) { - ratisServer.handleInstallSnapshotFromLeader(gid, roleInfoProto, + ratisServer.handleInstallSnapshotFromLeader(getGroupId(), roleInfoProto, firstTermIndexInLog); final CompletableFuture future = new CompletableFuture<>(); future.complete(firstTermIndexInLog); @@ -1190,7 +1186,7 @@ public CompletableFuture notifyInstallSnapshotFromLeader( @Override public void notifyGroupRemove() { - ratisServer.notifyGroupRemove(gid); + ratisServer.notifyGroupRemove(getGroupId()); // Make best effort to quasi-close all the containers on group removal. // Containers already in terminal state like CLOSED or UNHEALTHY will not // be affected. @@ -1198,7 +1194,7 @@ public void notifyGroupRemove() { try { containerController.markContainerForClose(cid); containerController.quasiCloseContainer(cid, - "Ratis group removed. Group id: " + gid); + "Ratis group removed. Group id: " + getGroupId()); } catch (IOException e) { LOG.debug("Failed to quasi-close container {}", cid); } @@ -1220,7 +1216,7 @@ public void notifyLeaderChanged(RaftGroupMemberId groupMemberId, @Override public String toStateMachineLogEntryString(StateMachineLogEntryProto proto) { - return smProtoToString(gid, containerController, proto); + return smProtoToString(getGroupId(), containerController, proto); } public static String smProtoToString(RaftGroupId gid, diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/XceiverServerRatis.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/XceiverServerRatis.java index 7899cdcc0e67..a4c143439852 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/XceiverServerRatis.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/XceiverServerRatis.java @@ -104,6 +104,7 @@ import org.apache.ratis.server.RaftServerRpc; import org.apache.ratis.server.protocol.TermIndex; import org.apache.ratis.server.storage.RaftStorage; +import org.apache.ratis.util.Preconditions; import org.apache.ratis.util.SizeInBytes; import org.apache.ratis.util.TimeDuration; import org.apache.ratis.util.TraditionalBinaryPrefix; @@ -161,19 +162,18 @@ private static long nextCallId() { private int clientPort; private int dataStreamPort; private final RaftServer server; + private final String name; private final List chunkExecutors; private final ContainerDispatcher dispatcher; private final ContainerController containerController; private final ClientId clientId = ClientId.randomId(); private final StateContext context; - private final long nodeFailureTimeoutMs; private boolean isStarted = false; private final DatanodeDetails datanodeDetails; private final ConfigurationSource conf; // TODO: Remove the gids set when Ratis supports an api to query active // pipelines private final ConcurrentMap activePipelines = new ConcurrentHashMap<>(); - private final RaftPeerId raftPeerId; // Timeout used while calling submitRequest directly. private final long requestTimeout; private final boolean shouldDeleteRatisLogDirectory; @@ -197,14 +197,14 @@ private XceiverServerRatis(HddsDatanodeService hddsDatanodeService, DatanodeDeta this.context = context; this.dispatcher = dispatcher; this.containerController = containerController; - this.raftPeerId = RatisHelper.toRaftPeerId(dd); String threadNamePrefix = datanodeDetails.threadNamePrefix(); chunkExecutors = createChunkExecutors(conf, threadNamePrefix); - nodeFailureTimeoutMs = ratisServerConfig.getFollowerSlownessTimeout(); shouldDeleteRatisLogDirectory = ratisServerConfig.shouldDeleteRatisLogDirectory(); RaftProperties serverProperties = newRaftProperties(); + final RaftPeerId raftPeerId = RatisHelper.toRaftPeerId(dd); + this.name = getClass().getSimpleName() + "(" + raftPeerId + ")"; this.server = RaftServer.newBuilder().setServerId(raftPeerId) .setProperties(serverProperties) @@ -474,7 +474,7 @@ private void setStateMachineDataConfigurations(RaftProperties properties) { // NOTE : the default value for the retry count in ratis is -1, // which means retry indefinitely. - int syncTimeoutRetryDefault = (int) nodeFailureTimeoutMs / + final int syncTimeoutRetryDefault = (int) ratisServerConfig.getFollowerSlownessTimeout() / dataSyncTimeout.toIntExact(TimeUnit.MILLISECONDS); int numSyncRetries = conf.getInt( OzoneConfigKeys.HDDS_CONTAINER_RATIS_STATEMACHINEDATA_SYNC_RETRIES, @@ -558,7 +558,7 @@ private static Parameters createTlsParameters(SecurityConfig conf, @Override public void start() throws IOException { if (!isStarted) { - LOG.info("Starting {} {}", getClass().getSimpleName(), server.getId()); + LOG.info("Starting {}", name); for (ThreadPoolExecutor executor : chunkExecutors) { executor.prestartAllCoreThreads(); } @@ -581,11 +581,11 @@ public void start() throws IOException { } } - private int getRealPort(InetSocketAddress address, Port.Name name) { + private int getRealPort(InetSocketAddress address, Port.Name portName) { int realPort = address.getPort(); - datanodeDetails.setPort(DatanodeDetails.newPort(name, realPort)); - LOG.info("{} {} is started using port {} for {}", - getClass().getSimpleName(), server.getId(), realPort, name); + final Port port = DatanodeDetails.newPort(portName, realPort); + datanodeDetails.setPort(port); + LOG.info("{} is started using port {}", name, port); return realPort; } @@ -593,7 +593,7 @@ private int getRealPort(InetSocketAddress address, Port.Name name) { public void stop() { if (isStarted) { try { - LOG.info("Stopping {} {}", getClass().getSimpleName(), server.getId()); + LOG.info("Closing {}", name); // shutdown server before the executors as while shutting down, // some of the tasks would be executed using the executors. server.close(); @@ -602,7 +602,7 @@ public void stop() { } isStarted = false; } catch (IOException e) { - LOG.error("XceiverServerRatis Could not be stopped gracefully.", e); + LOG.error("Failed to close {}.", name, e); } } } @@ -706,45 +706,40 @@ private GroupInfoRequest createGroupInfoRequest( nextCallId()); } - private void handlePipelineFailure(RaftGroupId groupId, - RoleInfoProto roleInfoProto) { - String msg; - UUID datanode = RatisHelper.toDatanodeId(roleInfoProto.getSelf()); - RaftPeerId id = RaftPeerId.valueOf(roleInfoProto.getSelf().getId()); + private void handlePipelineFailure(RaftGroupId groupId, RoleInfoProto roleInfoProto, String reason) { + final RaftPeerId raftPeerId = RaftPeerId.valueOf(roleInfoProto.getSelf().getId()); + Preconditions.assertEquals(getServer().getId(), raftPeerId, "raftPeerId"); + final StringBuilder b = new StringBuilder() + .append(name).append(" with datanodeId ").append(RatisHelper.toDatanodeId(raftPeerId)) + .append("handlePipelineFailure ").append(" for ").append(reason) + .append(": ").append(roleInfoProto.getRole()) + .append(" elapsed time=").append(roleInfoProto.getRoleElapsedTimeMs()).append("ms"); + switch (roleInfoProto.getRole()) { case CANDIDATE: - msg = datanode + " is in candidate state for " + - roleInfoProto.getCandidateInfo().getLastLeaderElapsedTimeMs() + "ms"; + final long lastLeaderElapsedTime = roleInfoProto.getCandidateInfo().getLastLeaderElapsedTimeMs(); + b.append(", lastLeaderElapsedTime=").append(lastLeaderElapsedTime).append("ms"); break; case FOLLOWER: - msg = datanode + " closes pipeline when installSnapshot from leader " + - "because leader snapshot doesn't contain any data to replay, " + - "all the log entries prior to the snapshot might have been purged." + - "So follower should not try to install snapshot from leader but" + - "can close the pipeline here. It's in follower state for " + - roleInfoProto.getRoleElapsedTimeMs() + "ms"; + b.append(", outstandingOp=").append(roleInfoProto.getFollowerInfo().getOutstandingOp()); break; case LEADER: - StringBuilder sb = new StringBuilder(); - sb.append(datanode).append(" has not seen follower/s"); - for (RaftProtos.ServerRpcProto follower : roleInfoProto.getLeaderInfo() - .getFollowerInfoList()) { - if (follower.getLastRpcElapsedTimeMs() > nodeFailureTimeoutMs) { - sb.append(" ").append(RatisHelper.toDatanodeId(follower.getId())) - .append(" for ").append(follower.getLastRpcElapsedTimeMs()) - .append("ms"); - } + final long followerSlownessTimeoutMs = ratisServerConfig.getFollowerSlownessTimeout(); + for (RaftProtos.ServerRpcProto follower : roleInfoProto.getLeaderInfo().getFollowerInfoList()) { + final long lastRpcElapsedTimeMs = follower.getLastRpcElapsedTimeMs(); + final boolean slow = lastRpcElapsedTimeMs > followerSlownessTimeoutMs; + final RaftPeerId followerId = RaftPeerId.valueOf(follower.getId().getId()); + b.append("\n Follower ").append(followerId) + .append(" with datanodeId ").append(RatisHelper.toDatanodeId(followerId)) + .append(" is ").append(slow ? "slow" : " responding") + .append(" with lastRpcElapsedTime=").append(lastRpcElapsedTimeMs).append("ms"); } - msg = sb.toString(); break; default: - LOG.error("unknown state: {}", roleInfoProto.getRole()); - throw new IllegalStateException("node" + id + " is in illegal role " - + roleInfoProto.getRole()); + throw new IllegalStateException("Unexpected role " + roleInfoProto.getRole()); } - triggerPipelineClose(groupId, msg, - ClosePipelineInfo.Reason.PIPELINE_FAILED); + triggerPipelineClose(groupId, b.toString(), ClosePipelineInfo.Reason.PIPELINE_FAILED); } private void triggerPipelineClose(RaftGroupId groupId, String detail, @@ -869,12 +864,12 @@ public void removeGroup(HddsProtos.PipelineID pipelineId) processReply(reply); } - void handleNodeSlowness(RaftGroupId groupId, RoleInfoProto roleInfoProto) { - handlePipelineFailure(groupId, roleInfoProto); + void handleFollowerSlowness(RaftGroupId groupId, RoleInfoProto roleInfoProto, RaftPeer follower) { + handlePipelineFailure(groupId, roleInfoProto, "slow follower " + follower.getId()); } void handleNoLeader(RaftGroupId groupId, RoleInfoProto roleInfoProto) { - handlePipelineFailure(groupId, roleInfoProto); + handlePipelineFailure(groupId, roleInfoProto, "no leader"); } void handleApplyTransactionFailure(RaftGroupId groupId, @@ -901,10 +896,9 @@ void handleApplyTransactionFailure(RaftGroupId groupId, void handleInstallSnapshotFromLeader(RaftGroupId groupId, RoleInfoProto roleInfoProto, TermIndex firstTermIndexInLog) { - LOG.warn("Install snapshot notification received from Leader with " + - "termIndex: {}, terminating pipeline: {}", + LOG.warn("handleInstallSnapshotFromLeader for firstTermIndexInLog={}, terminating pipeline: {}", firstTermIndexInLog, groupId); - handlePipelineFailure(groupId, roleInfoProto); + handlePipelineFailure(groupId, roleInfoProto, "install snapshot notification"); } /** @@ -950,7 +944,7 @@ void handleLeaderChangedNotification(RaftGroupMemberId groupMemberId, LOG.info("Leader change notification received for group: {} with new " + "leaderId: {}", groupMemberId.getGroupId(), raftPeerId1); // Save the reported leader to be sent with the report to SCM - boolean leaderForGroup = this.raftPeerId.equals(raftPeerId1); + final boolean leaderForGroup = server.getId().equals(raftPeerId1); activePipelines.compute(groupMemberId.getGroupId(), (key, value) -> value == null ? new ActivePipelineContext(leaderForGroup, false) : new ActivePipelineContext(leaderForGroup, value.isPendingClose())); diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/HddsVolume.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/HddsVolume.java index a74a23b7fbef..5fced0e39b39 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/HddsVolume.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/HddsVolume.java @@ -206,7 +206,7 @@ public void shutdown() { /** * Delete all files under - * /hdds//tmp/deleted-containers. + * volume/hdds/cluster-id/tmp/deleted-containers. * This is the directory where containers are moved when they are deleted * from the system, but before being removed from the filesystem. This * makes the deletion atomic. diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeInfo.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeInfo.java index af890269255d..3d1be9791ecc 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeInfo.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeInfo.java @@ -46,16 +46,18 @@ * - fsCapacity: reported total capacity from local fs. * - minVolumeFreeSpace (mvfs) : determines the free space for closing containers.This is like adding a few reserved bytes to reserved space. - Dn's will send close container action to SCM at this limit & it is + Dn's will send close container action to SCM at this limit, and it is configurable. * - * + *
    + * {@code
      * |----used----|   (avail)   |++mvfs++|++++reserved+++++++|
      * |<-     capacity                  ->|
      *              |     fsAvail      |-------other-----------|
      * |<-                   fsCapacity                      ->|
    - *
    + * }
    + *
      * What we could directly get from local fs:
      *     fsCapacity, fsAvail, (fsUsed = fsCapacity - fsAvail)
      * We could get from config:
    @@ -78,11 +80,13 @@
      * then we should use DedicatedDiskSpaceUsage for
      * `hdds.datanode.du.factory.classname`,
      * Then it is much simpler, since we don't care about other usage:
    - *
    + * {@code
      *  |----used----|             (avail)/fsAvail              |
      *  |<-              capacity/fsCapacity                  ->|
    + * }
      *
      *  We have avail == fsAvail.
    + *  
    */ public final class VolumeInfo { @@ -153,11 +157,14 @@ public long getCapacity() { } /** + *
    +   * {@code
        * Calculate available space use method A.
        * |----used----|   (avail)   |++++++++reserved++++++++|
        * |<-     capacity         ->|
    -   *
        * A) avail = capacity - used
    +   * }
    +   * 
    */ public long getAvailable() { return usage.getAvailable(); diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeUsage.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeUsage.java index 7e138b057168..34ba66c91bb2 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeUsage.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeUsage.java @@ -19,6 +19,7 @@ package org.apache.hadoop.ozone.container.common.volume; import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.hdds.conf.ConfigurationException; import org.apache.hadoop.hdds.conf.ConfigurationSource; import org.apache.hadoop.hdds.conf.StorageSize; import org.apache.hadoop.hdds.conf.StorageUnit; @@ -77,11 +78,15 @@ public long getUsedSpace() { } /** + *
    +   * {@code
        * Calculate available space use method B.
        * |----used----|   (avail)   |++++++++reserved++++++++|
        *              |     fsAvail      |-------other-------|
    -   *                          ->|~~~~|<-
    +   *                          ->|~~~~|<-
        *                      remainingReserved
    +   * }
    +   * 
    * B) avail = fsAvail - Max(reserved - other, 0); */ public SpaceUsageSource getCurrentUsage() { @@ -216,9 +221,8 @@ private static long getReserved(ConfigurationSource conf, String rootDir, for (String reserve : reserveList) { String[] words = reserve.split(":"); if (words.length < 2) { - LOG.error("Reserved space should be configured in a pair, but current value is {}", - reserve); - continue; + throw new ConfigurationException("hdds.datanode.dir.du.reserved - " + + "Reserved space should be configured in a pair, but current value is " + reserve); } try { diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ec/reconstruction/ECContainerOperationClient.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ec/reconstruction/ECContainerOperationClient.java index 487e6d37b282..95b7d06167f5 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ec/reconstruction/ECContainerOperationClient.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ec/reconstruction/ECContainerOperationClient.java @@ -18,11 +18,13 @@ package org.apache.hadoop.ozone.container.ec.reconstruction; import com.google.common.collect.ImmutableList; +import jakarta.annotation.Nonnull; import org.apache.commons.collections.map.SingletonMap; import org.apache.hadoop.hdds.client.ECReplicationConfig; import org.apache.hadoop.hdds.conf.ConfigurationSource; import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; +import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerDataProto.State; import org.apache.hadoop.hdds.scm.XceiverClientManager; import org.apache.hadoop.hdds.scm.XceiverClientSpi; import org.apache.hadoop.hdds.scm.client.ClientTrustManager; @@ -34,8 +36,6 @@ import org.apache.hadoop.ozone.container.common.helpers.BlockData; import org.apache.hadoop.security.token.Token; import org.apache.hadoop.security.token.TokenIdentifier; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerDataProto.State; -import jakarta.annotation.Nonnull; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -44,7 +44,6 @@ import java.util.List; import java.util.Objects; import java.util.Set; -import java.util.stream.Collectors; /** * This class wraps necessary container-level rpc calls @@ -93,14 +92,11 @@ public BlockData[] listBlock(long containerId, DatanodeDetails dn, try { return BlockData.getFromProtoBuf(i); } catch (IOException e) { - LOG.debug("Failed while converting to protobuf BlockData. Returning" - + " null for listBlock from DN: " + dn, - e); + LOG.debug("Failed while converting to protobuf BlockData. Returning null for listBlock from DN: {}", dn, e); // TODO: revisit here. return null; } - }).collect(Collectors.toList()) - .toArray(new BlockData[blockDataList.size()]); + }).toArray(BlockData[]::new); } finally { this.xceiverClientManager.releaseClient(xceiverClient, false); } diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ec/reconstruction/ECReconstructionCoordinator.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ec/reconstruction/ECReconstructionCoordinator.java index 8fadd19b67d3..4273ed2b7163 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ec/reconstruction/ECReconstructionCoordinator.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ec/reconstruction/ECReconstructionCoordinator.java @@ -42,7 +42,6 @@ import org.apache.hadoop.hdds.utils.IOUtils; import org.apache.hadoop.io.ByteBufferPool; import org.apache.hadoop.io.ElasticByteBufferPool; -import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.client.io.BlockInputStreamFactory; import org.apache.hadoop.ozone.client.io.BlockInputStreamFactoryImpl; import org.apache.hadoop.ozone.client.io.ECBlockInputStreamProxy; @@ -370,7 +369,7 @@ private void logBlockGroupDetails(BlockLocationInfo blockLocationInfo, .append(" block length: ") .append(data.getSize()) .append(" block group length: ") - .append(getBlockDataLength(data)) + .append(data.getBlockGroupLength()) .append(" chunk list: \n"); int cnt = 0; for (ContainerProtos.ChunkInfo chunkInfo : data.getChunks()) { @@ -572,7 +571,7 @@ private long calcEffectiveBlockGroupLen(BlockData[] blockGroup, continue; } - long putBlockLen = getBlockDataLength(blockGroup[i]); + long putBlockLen = blockGroup[i].getBlockGroupLength(); // Use safe length is the minimum of the lengths recorded across the // stripe blockGroupLen = Math.min(putBlockLen, blockGroupLen); @@ -580,16 +579,6 @@ private long calcEffectiveBlockGroupLen(BlockData[] blockGroup, return blockGroupLen == Long.MAX_VALUE ? 0 : blockGroupLen; } - private long getBlockDataLength(BlockData blockData) { - String lenStr = blockData.getMetadata() - .get(OzoneConsts.BLOCK_GROUP_LEN_KEY_IN_PUT_BLOCK); - // If we don't have the length, then it indicates a problem with the stripe. - // All replica should carry the length, so if it is not there, we return 0, - // which will cause us to set the length of the block to zero and not - // attempt to reconstruct it. - return (lenStr == null) ? 0 : Long.parseLong(lenStr); - } - public ECReconstructionMetrics getECReconstructionMetrics() { return this.metrics; } diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainer.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainer.java index 98d81c15d0ad..ae3288a3e983 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainer.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainer.java @@ -932,7 +932,6 @@ private ContainerReplicaProto.State getHddsState() /** * Returns container DB file. - * @return */ public File getContainerDBFile() { return KeyValueContainerLocationUtil.getContainerDBFile(containerData); diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerData.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerData.java index 47d4f3f9e70a..4ea8552e780e 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerData.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerData.java @@ -430,7 +430,6 @@ public KeyPrefixFilter getDeletingBlockKeyFilter() { /** * Schema v3 use a prefix as startKey, * for other schemas just return null. - * @return */ public String startKeyEmpty() { if (hasSchema(SCHEMA_V3)) { @@ -442,7 +441,6 @@ public String startKeyEmpty() { /** * Schema v3 use containerID as key prefix, * for other schemas just return null. - * @return */ public String containerPrefix() { if (hasSchema(SCHEMA_V3)) { diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/BlockUtils.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/BlockUtils.java index 7773b54f7942..945efbcf6ea9 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/BlockUtils.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/BlockUtils.java @@ -99,7 +99,6 @@ public static DatanodeStore getUncachedDatanodeStore( * opened by this thread, the other thread will get a RocksDB exception. * @param containerData The container data * @param conf Configuration - * @return * @throws IOException */ public static DatanodeStore getUncachedDatanodeStore( diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/KeyValueContainerUtil.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/KeyValueContainerUtil.java index b7d5b5fa59eb..691ccaa630d1 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/KeyValueContainerUtil.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/KeyValueContainerUtil.java @@ -439,13 +439,13 @@ public static boolean isSameSchemaVersion(String schema, String other) { /** * Moves container directory to a new location - * under "/hdds//tmp/deleted-containers" + * under "volume/hdds/cluster-id/tmp/deleted-containers" * and updates metadata and chunks path. * Containers will be moved under it before getting deleted * to avoid, in case of failure, having artifact leftovers * on the default container path on the disk. * - * Delete operation for Schema < V3 + * Delete operation for Schema < V3 * 1. Container is marked DELETED * 2. Container is removed from memory container set * 3. Container DB handler from cache is removed and closed @@ -460,7 +460,6 @@ public static boolean isSameSchemaVersion(String schema, String other) { * 5. Container is deleted from tmp directory. * * @param keyValueContainerData - * @return true if renaming was successful */ public static void moveToDeletedContainerDir( KeyValueContainerData keyValueContainerData, diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/ChunkManagerFactory.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/ChunkManagerFactory.java index 288a2d3e3312..aa5d52f3ceeb 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/ChunkManagerFactory.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/ChunkManagerFactory.java @@ -46,7 +46,6 @@ private ChunkManagerFactory() { * @param conf Configuration * @param manager This parameter will be used only for read data of * FILE_PER_CHUNK layout file. Can be null for other cases. - * @return */ public static ChunkManager createChunkManager(ConfigurationSource conf, BlockManager manager, VolumeSet volSet) { diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/StreamDataChannelBase.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/StreamDataChannelBase.java index 8df856d4b93e..601e7b2712c6 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/StreamDataChannelBase.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/StreamDataChannelBase.java @@ -99,7 +99,9 @@ public void setLinked() { linked.set(true); } - /** @return true iff {@link StateMachine.DataChannel} is already linked. */ + /** + * @return true if {@link org.apache.ratis.statemachine.StateMachine.DataChannel} is already linked. + */ public boolean cleanUp() { if (linked.get()) { // already linked, nothing to do. diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/AbstractDatanodeStore.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/AbstractDatanodeStore.java index 26719d7f035a..88aeb3c174dd 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/AbstractDatanodeStore.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/AbstractDatanodeStore.java @@ -300,9 +300,9 @@ protected static void checkTableStatus(Table table, String name) /** * Block Iterator for KeyValue Container. This block iterator returns blocks - * which match with the {@link MetadataKeyFilters.KeyPrefixFilter}. If no + * which match with the {@link org.apache.hadoop.hdds.utils.MetadataKeyFilters.KeyPrefixFilter}. If no * filter is specified, then default filter used is - * {@link MetadataKeyFilters#getUnprefixedKeyFilter()} + * {@link org.apache.hadoop.hdds.utils.MetadataKeyFilters#getUnprefixedKeyFilter()} */ @InterfaceAudience.Public public static class KeyValueBlockIterator implements @@ -405,9 +405,9 @@ public void close() throws IOException { /** * Block localId Iterator for KeyValue Container. * This Block localId iterator returns localIds - * which match with the {@link MetadataKeyFilters.KeyPrefixFilter}. If no + * which match with the {@link org.apache.hadoop.hdds.utils.MetadataKeyFilters.KeyPrefixFilter}. If no * filter is specified, then default filter used is - * {@link MetadataKeyFilters#getUnprefixedKeyFilter()} + * {@link org.apache.hadoop.hdds.utils.MetadataKeyFilters#getUnprefixedKeyFilter()} */ @InterfaceAudience.Public public static class KeyValueBlockLocalIdIterator implements diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/DatanodeSchemaOneDBDefinition.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/DatanodeSchemaOneDBDefinition.java index 4f54e85da2b1..bd1c0fb368af 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/DatanodeSchemaOneDBDefinition.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/DatanodeSchemaOneDBDefinition.java @@ -51,27 +51,21 @@ public class DatanodeSchemaOneDBDefinition BLOCK_DATA = new DBColumnFamilyDefinition<>( StringUtils.bytes2String(DEFAULT_COLUMN_FAMILY), - String.class, SchemaOneKeyCodec.get(), - BlockData.class, BlockData.getCodec()); public static final DBColumnFamilyDefinition METADATA = new DBColumnFamilyDefinition<>( StringUtils.bytes2String(DEFAULT_COLUMN_FAMILY), - String.class, SchemaOneKeyCodec.get(), - Long.class, LongCodec.get()); public static final DBColumnFamilyDefinition DELETED_BLOCKS = new DBColumnFamilyDefinition<>( StringUtils.bytes2String(DEFAULT_COLUMN_FAMILY), - String.class, SchemaOneKeyCodec.get(), - ChunkInfoList.class, SchemaOneChunkInfoListCodec.get()); private static final Map>> diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/DatanodeSchemaThreeDBDefinition.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/DatanodeSchemaThreeDBDefinition.java index d47446d49b0f..10537ca6f2d3 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/DatanodeSchemaThreeDBDefinition.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/DatanodeSchemaThreeDBDefinition.java @@ -59,45 +59,35 @@ public class DatanodeSchemaThreeDBDefinition BLOCK_DATA = new DBColumnFamilyDefinition<>( "block_data", - String.class, FixedLengthStringCodec.get(), - BlockData.class, BlockData.getCodec()); public static final DBColumnFamilyDefinition METADATA = new DBColumnFamilyDefinition<>( "metadata", - String.class, FixedLengthStringCodec.get(), - Long.class, LongCodec.get()); public static final DBColumnFamilyDefinition DELETE_TRANSACTION = new DBColumnFamilyDefinition<>( "delete_txns", - String.class, FixedLengthStringCodec.get(), - DeletedBlocksTransaction.class, Proto2Codec.get(DeletedBlocksTransaction.getDefaultInstance())); public static final DBColumnFamilyDefinition FINALIZE_BLOCKS = new DBColumnFamilyDefinition<>( "finalize_blocks", - String.class, FixedLengthStringCodec.get(), - Long.class, LongCodec.get()); public static final DBColumnFamilyDefinition LAST_CHUNK_INFO = new DBColumnFamilyDefinition<>( "last_chunk_info", - String.class, FixedLengthStringCodec.get(), - BlockData.class, BlockData.getCodec()); private static String separator = ""; diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/DatanodeSchemaTwoDBDefinition.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/DatanodeSchemaTwoDBDefinition.java index b9e7ec7bd5bf..bf6b1d0a29ca 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/DatanodeSchemaTwoDBDefinition.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/DatanodeSchemaTwoDBDefinition.java @@ -18,7 +18,6 @@ package org.apache.hadoop.ozone.container.metadata; import org.apache.hadoop.hdds.conf.ConfigurationSource; -import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos; import org.apache.hadoop.hdds.utils.db.DBColumnFamilyDefinition; import org.apache.hadoop.hdds.utils.db.DBDefinition; import org.apache.hadoop.hdds.utils.db.FixedLengthStringCodec; @@ -44,45 +43,35 @@ public class DatanodeSchemaTwoDBDefinition BLOCK_DATA = new DBColumnFamilyDefinition<>( "block_data", - String.class, StringCodec.get(), - BlockData.class, BlockData.getCodec()); public static final DBColumnFamilyDefinition METADATA = new DBColumnFamilyDefinition<>( "metadata", - String.class, StringCodec.get(), - Long.class, LongCodec.get()); public static final DBColumnFamilyDefinition DELETE_TRANSACTION = new DBColumnFamilyDefinition<>( "delete_txns", - Long.class, LongCodec.get(), - StorageContainerDatanodeProtocolProtos.DeletedBlocksTransaction.class, Proto2Codec.get(DeletedBlocksTransaction.getDefaultInstance())); public static final DBColumnFamilyDefinition FINALIZE_BLOCKS = new DBColumnFamilyDefinition<>( "finalize_blocks", - String.class, FixedLengthStringCodec.get(), - Long.class, LongCodec.get()); public static final DBColumnFamilyDefinition LAST_CHUNK_INFO = new DBColumnFamilyDefinition<>( "last_chunk_info", - String.class, FixedLengthStringCodec.get(), - BlockData.class, BlockData.getCodec()); public DatanodeSchemaTwoDBDefinition(String dbPath, diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/SchemaOneChunkInfoListCodec.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/SchemaOneChunkInfoListCodec.java index 4beb20754322..25a49eaabe4f 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/SchemaOneChunkInfoListCodec.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/SchemaOneChunkInfoListCodec.java @@ -25,7 +25,8 @@ import java.io.IOException; /** - * Codec for parsing {@link ContainerProtos.ChunkInfoList} objects from data + * Codec for parsing {@link org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ChunkInfoList} + * objects from data * that may have been written using schema version one. Before upgrading * schema versions, deleted block IDs were stored with a duplicate copy of * their ID as the value in the database. After upgrading the code, any @@ -56,6 +57,11 @@ private SchemaOneChunkInfoListCodec() { // singleton } + @Override + public Class getTypeClass() { + return ChunkInfoList.class; + } + @Override public byte[] toPersistedFormat(ChunkInfoList chunkList) { return chunkList.getProtoBufMessage().toByteArray(); diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/SchemaOneKeyCodec.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/SchemaOneKeyCodec.java index 2f1660f4d2e2..add24874a312 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/SchemaOneKeyCodec.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/SchemaOneKeyCodec.java @@ -48,6 +48,11 @@ private SchemaOneKeyCodec() { // singleton } + @Override + public Class getTypeClass() { + return String.class; + } + @Override public byte[] toPersistedFormat(String stringObject) throws IOException { try { diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerController.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerController.java index 9ddf8084e3bb..567741a98d8b 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerController.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerController.java @@ -163,7 +163,6 @@ public void closeContainer(final long containerId) throws IOException { * Returns the Container given a container id. * * @param containerId ID of the container - * @return Container */ public void addFinalizedBlock(final long containerId, final long localId) { diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/OzoneContainer.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/OzoneContainer.java index 508176c25274..d06276285051 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/OzoneContainer.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/OzoneContainer.java @@ -490,10 +490,10 @@ public void start(String clusterId) throws IOException { replicationServer.start(); datanodeDetails.setPort(Name.REPLICATION, replicationServer.getPort()); - writeChannel.start(); - readChannel.start(); hddsDispatcher.init(); hddsDispatcher.setClusterId(clusterId); + writeChannel.start(); + readChannel.start(); blockDeletingService.start(); recoveringContainerScrubbingService.start(); diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ReplicationSupervisor.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ReplicationSupervisor.java index 92ff4b6d8d61..9513cac84efe 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ReplicationSupervisor.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ReplicationSupervisor.java @@ -43,6 +43,8 @@ import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ReplicationCommandPriority; +import org.apache.hadoop.metrics2.lib.MetricsRegistry; +import org.apache.hadoop.metrics2.lib.MutableRate; import org.apache.hadoop.ozone.container.common.statemachine.DatanodeConfiguration; import org.apache.hadoop.ozone.container.common.statemachine.StateContext; import org.apache.hadoop.ozone.container.replication.ReplicationServer.ReplicationConfig; @@ -50,6 +52,7 @@ import com.google.common.annotations.VisibleForTesting; import com.google.common.util.concurrent.ThreadFactoryBuilder; +import org.apache.hadoop.util.Time; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -77,6 +80,10 @@ public final class ReplicationSupervisor { private final Map failureCounter = new ConcurrentHashMap<>(); private final Map timeoutCounter = new ConcurrentHashMap<>(); private final Map skippedCounter = new ConcurrentHashMap<>(); + private final Map queuedCounter = new ConcurrentHashMap<>(); + + private final MetricsRegistry registry; + private final Map opsLatencyMs = new ConcurrentHashMap<>(); private static final Map METRICS_MAP; @@ -218,6 +225,7 @@ private ReplicationSupervisor(StateContext context, ExecutorService executor, nodeStateUpdated(dn.getPersistedOpState()); } } + registry = new MetricsRegistry(ReplicationSupervisor.class.getSimpleName()); } /** @@ -240,6 +248,9 @@ public void addTask(AbstractReplicationTask task) { failureCounter.put(task.getMetricName(), new AtomicLong(0)); timeoutCounter.put(task.getMetricName(), new AtomicLong(0)); skippedCounter.put(task.getMetricName(), new AtomicLong(0)); + queuedCounter.put(task.getMetricName(), new AtomicLong(0)); + opsLatencyMs.put(task.getMetricName(), registry.newRate( + task.getClass().getSimpleName() + "Ms")); METRICS_MAP.put(task.getMetricName(), task.getMetricDescriptionSegment()); } } @@ -253,6 +264,7 @@ public void addTask(AbstractReplicationTask task) { taskCounter.computeIfAbsent(task.getClass(), k -> new AtomicInteger()).incrementAndGet(); } + queuedCounter.get(task.getMetricName()).incrementAndGet(); executor.execute(new TaskRunner(task)); } } @@ -353,6 +365,7 @@ public TaskRunner(AbstractReplicationTask task) { @Override public void run() { + final long startTime = Time.monotonicNow(); try { requestCounter.get(task.getMetricName()).incrementAndGet(); @@ -401,6 +414,8 @@ public void run() { LOG.warn("Failed {}", this, e); failureCounter.get(task.getMetricName()).incrementAndGet(); } finally { + queuedCounter.get(task.getMetricName()).decrementAndGet(); + opsLatencyMs.get(task.getMetricName()).add(Time.monotonicNow() - startTime); inFlight.remove(task); decrementTaskCounter(task); } @@ -511,4 +526,22 @@ public long getReplicationSkippedCount(String metricsName) { return counter != null ? counter.get() : 0; } + public long getReplicationQueuedCount() { + return getCount(queuedCounter); + } + + public long getReplicationQueuedCount(String metricsName) { + AtomicLong counter = queuedCounter.get(metricsName); + return counter != null ? counter.get() : 0; + } + + public long getReplicationRequestAvgTime(String metricsName) { + MutableRate rate = opsLatencyMs.get(metricsName); + return rate != null ? (long) rate.lastStat().mean() : 0; + } + + public long getReplicationRequestTotalTime(String metricsName) { + MutableRate rate = opsLatencyMs.get(metricsName); + return rate != null ? (long) rate.lastStat().total() : 0; + } } diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ReplicationSupervisorMetrics.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ReplicationSupervisorMetrics.java index a1763976af99..cd1103a0c463 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ReplicationSupervisorMetrics.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ReplicationSupervisorMetrics.java @@ -67,7 +67,7 @@ public void getMetrics(MetricsCollector collector, boolean all) { supervisor.getTotalInFlightReplications()) .addGauge(Interns.info("numQueuedReplications", "Number of replications in queue"), - supervisor.getQueueSize()) + supervisor.getReplicationQueuedCount()) .addGauge(Interns.info("numRequestedReplications", "Number of requested replications"), supervisor.getReplicationRequestCount()) @@ -107,7 +107,10 @@ public void getMetrics(MetricsCollector collector, boolean all) { .addGauge(Interns.info("numSkipped" + metricsName, "Number of " + descriptionSegment + " skipped as the container is " + "already present"), - supervisor.getReplicationSkippedCount(metricsName)); + supervisor.getReplicationSkippedCount(metricsName)) + .addGauge(Interns.info("numQueued" + metricsName, + "Number of " + descriptionSegment + " in queue"), + supervisor.getReplicationQueuedCount(metricsName)); } }); } diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/stream/StreamingSource.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/stream/StreamingSource.java index 5fdfc931b99c..e49f3c3d6e5c 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/stream/StreamingSource.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/stream/StreamingSource.java @@ -27,9 +27,9 @@ public interface StreamingSource { /** * - * @param id: custom identifier + * @param id custom identifier * - * @return map of files which should be copied (logical name -> real path) + * @return map of files which should be copied (logical name -> real path) */ Map getFilesToStream(String id) throws InterruptedException; diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocolPB/StorageContainerDatanodeProtocolClientSideTranslatorPB.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocolPB/StorageContainerDatanodeProtocolClientSideTranslatorPB.java index eeb99b5a3db2..d6b44f2a6416 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocolPB/StorageContainerDatanodeProtocolClientSideTranslatorPB.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocolPB/StorageContainerDatanodeProtocolClientSideTranslatorPB.java @@ -122,7 +122,6 @@ private SCMDatanodeResponse submitRequest(Type type, /** * Returns SCM version. * - * @param unused - set to null and unused. * @return Version info. */ @Override diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestReconstructECContainersCommandHandler.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestReconstructECContainersCommandHandler.java new file mode 100644 index 000000000000..7e6c76081803 --- /dev/null +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestReconstructECContainersCommandHandler.java @@ -0,0 +1,139 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + *

    + * http://www.apache.org/licenses/LICENSE-2.0 + *

    + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ +package org.apache.hadoop.ozone.container.common.statemachine.commandhandler; + +import com.google.protobuf.ByteString; +import com.google.protobuf.Proto2Utils; +import org.apache.hadoop.hdds.client.ECReplicationConfig; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.protocol.DatanodeDetails; +import org.apache.hadoop.hdds.protocol.MockDatanodeDetails; +import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMCommandProto; +import org.apache.hadoop.metrics2.impl.MetricsCollectorImpl; +import org.apache.hadoop.ozone.container.common.helpers.CommandHandlerMetrics; +import org.apache.hadoop.ozone.container.common.statemachine.SCMConnectionManager; +import org.apache.hadoop.ozone.container.common.statemachine.StateContext; +import org.apache.hadoop.ozone.container.ec.reconstruction.ECReconstructionCoordinator; +import org.apache.hadoop.ozone.container.ozoneimpl.OzoneContainer; +import org.apache.hadoop.ozone.container.replication.ReplicationSupervisor; +import org.apache.hadoop.ozone.protocol.commands.ReconstructECContainersCommand; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; + +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.stream.Collectors; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.doNothing; +import static org.mockito.Mockito.when; + +/** + * Test cases to verify {@link ReconstructECContainersCommandHandler}. + */ +public class TestReconstructECContainersCommandHandler { + private OzoneConfiguration conf; + private ReplicationSupervisor supervisor; + private ECReconstructionCoordinator coordinator; + private OzoneContainer ozoneContainer; + private StateContext stateContext; + private SCMConnectionManager connectionManager; + + @BeforeEach + public void setUp() { + supervisor = mock(ReplicationSupervisor.class); + coordinator = mock(ECReconstructionCoordinator.class); + conf = new OzoneConfiguration(); + ozoneContainer = mock(OzoneContainer.class); + connectionManager = mock(SCMConnectionManager.class); + stateContext = mock(StateContext.class); + } + + @Test + public void testMetrics() { + ReconstructECContainersCommandHandler commandHandler = + new ReconstructECContainersCommandHandler(conf, supervisor, coordinator); + doNothing().when(supervisor).addTask(any()); + Map handlerMap = new HashMap<>(); + handlerMap.put(commandHandler.getCommandType(), commandHandler); + CommandHandlerMetrics metrics = CommandHandlerMetrics.create(handlerMap); + try { + byte[] missingIndexes = {1, 2}; + ByteString missingContainerIndexes = Proto2Utils.unsafeByteString(missingIndexes); + ECReplicationConfig ecReplicationConfig = new ECReplicationConfig(3, 2); + List dnDetails = getDNDetails(5); + List sources = + dnDetails.stream().map(a -> new ReconstructECContainersCommand + .DatanodeDetailsAndReplicaIndex(a, dnDetails.indexOf(a))) + .collect(Collectors.toList()); + List targets = getDNDetails(2); + ReconstructECContainersCommand reconstructECContainersCommand = + new ReconstructECContainersCommand(1L, sources, targets, + missingContainerIndexes, ecReplicationConfig); + + commandHandler.handle(reconstructECContainersCommand, ozoneContainer, + stateContext, connectionManager); + String metricsName = "ECReconstructions"; + assertEquals(commandHandler.getMetricsName(), metricsName); + when(supervisor.getReplicationRequestCount(metricsName)).thenReturn(1L); + assertEquals(commandHandler.getInvocationCount(), 1); + + commandHandler.handle(new ReconstructECContainersCommand(2L, sources, + targets, missingContainerIndexes, ecReplicationConfig), ozoneContainer, + stateContext, connectionManager); + commandHandler.handle(new ReconstructECContainersCommand(3L, sources, + targets, missingContainerIndexes, ecReplicationConfig), ozoneContainer, + stateContext, connectionManager); + commandHandler.handle(new ReconstructECContainersCommand(4L, sources, + targets, missingContainerIndexes, ecReplicationConfig), ozoneContainer, + stateContext, connectionManager); + commandHandler.handle(new ReconstructECContainersCommand(5L, sources, + targets, missingContainerIndexes, ecReplicationConfig), ozoneContainer, + stateContext, connectionManager); + commandHandler.handle(new ReconstructECContainersCommand(6L, sources, + targets, missingContainerIndexes, ecReplicationConfig), ozoneContainer, + stateContext, connectionManager); + + when(supervisor.getReplicationRequestCount(metricsName)).thenReturn(5L); + when(supervisor.getReplicationRequestTotalTime(metricsName)).thenReturn(10L); + when(supervisor.getReplicationRequestAvgTime(metricsName)).thenReturn(2L); + when(supervisor.getReplicationQueuedCount(metricsName)).thenReturn(1L); + assertEquals(commandHandler.getInvocationCount(), 5); + assertEquals(commandHandler.getQueuedCount(), 1); + assertEquals(commandHandler.getTotalRunTime(), 10); + assertEquals(commandHandler.getAverageRunTime(), 2); + + MetricsCollectorImpl metricsCollector = new MetricsCollectorImpl(); + metrics.getMetrics(metricsCollector, true); + assertEquals(1, metricsCollector.getRecords().size()); + } finally { + metrics.unRegister(); + } + } + + private List getDNDetails(int numDns) { + List dns = new ArrayList<>(); + for (int i = 0; i < numDns; i++) { + dns.add(MockDatanodeDetails.randomDatanodeDetails()); + } + return dns; + } +} diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestReplicateContainerCommandHandler.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestReplicateContainerCommandHandler.java new file mode 100644 index 000000000000..9de00877e5be --- /dev/null +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestReplicateContainerCommandHandler.java @@ -0,0 +1,118 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + *

    + * http://www.apache.org/licenses/LICENSE-2.0 + *

    + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ +package org.apache.hadoop.ozone.container.common.statemachine.commandhandler; + +import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.protocol.DatanodeDetails; +import org.apache.hadoop.hdds.protocol.MockDatanodeDetails; +import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMCommandProto; +import org.apache.hadoop.metrics2.impl.MetricsCollectorImpl; +import org.apache.hadoop.ozone.container.common.helpers.CommandHandlerMetrics; +import org.apache.hadoop.ozone.container.common.statemachine.SCMConnectionManager; +import org.apache.hadoop.ozone.container.common.statemachine.StateContext; +import org.apache.hadoop.ozone.container.ozoneimpl.OzoneContainer; +import org.apache.hadoop.ozone.container.replication.ContainerReplicator; +import org.apache.hadoop.ozone.container.replication.ReplicationSupervisor; +import org.apache.hadoop.ozone.protocol.commands.ReplicateContainerCommand; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; + +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.when; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.doNothing; + +/** + * Test cases to verify {@link ReplicateContainerCommandHandler}. + */ +public class TestReplicateContainerCommandHandler { + private OzoneConfiguration conf; + private ReplicationSupervisor supervisor; + private ContainerReplicator downloadReplicator; + private ContainerReplicator pushReplicator; + private OzoneContainer ozoneContainer; + private StateContext stateContext; + private SCMConnectionManager connectionManager; + + @BeforeEach + public void setUp() { + conf = new OzoneConfiguration(); + supervisor = mock(ReplicationSupervisor.class); + downloadReplicator = mock(ContainerReplicator.class); + pushReplicator = mock(ContainerReplicator.class); + ozoneContainer = mock(OzoneContainer.class); + connectionManager = mock(SCMConnectionManager.class); + stateContext = mock(StateContext.class); + } + + @Test + public void testMetrics() { + ReplicateContainerCommandHandler commandHandler = + new ReplicateContainerCommandHandler(conf, supervisor, + downloadReplicator, pushReplicator); + Map handlerMap = new HashMap<>(); + handlerMap.put(commandHandler.getCommandType(), commandHandler); + CommandHandlerMetrics metrics = CommandHandlerMetrics.create(handlerMap); + try { + doNothing().when(supervisor).addTask(any()); + DatanodeDetails source = MockDatanodeDetails.randomDatanodeDetails(); + DatanodeDetails target = MockDatanodeDetails.randomDatanodeDetails(); + List sourceList = new ArrayList<>(); + sourceList.add(source); + + ReplicateContainerCommand command = ReplicateContainerCommand.fromSources( + 1, sourceList); + commandHandler.handle(command, ozoneContainer, stateContext, connectionManager); + String metricsName = "ContainerReplications"; + assertEquals(commandHandler.getMetricsName(), metricsName); + when(supervisor.getReplicationRequestCount(metricsName)).thenReturn(1L); + assertEquals(commandHandler.getInvocationCount(), 1); + + commandHandler.handle(ReplicateContainerCommand.fromSources(2, sourceList), + ozoneContainer, stateContext, connectionManager); + commandHandler.handle(ReplicateContainerCommand.fromSources(3, sourceList), + ozoneContainer, stateContext, connectionManager); + commandHandler.handle(ReplicateContainerCommand.toTarget(4, target), + ozoneContainer, stateContext, connectionManager); + commandHandler.handle(ReplicateContainerCommand.toTarget(5, target), + ozoneContainer, stateContext, connectionManager); + commandHandler.handle(ReplicateContainerCommand.fromSources(6, sourceList), + ozoneContainer, stateContext, connectionManager); + + when(supervisor.getReplicationRequestCount(metricsName)).thenReturn(5L); + when(supervisor.getReplicationRequestTotalTime(metricsName)).thenReturn(10L); + when(supervisor.getReplicationRequestAvgTime(metricsName)).thenReturn(3L); + when(supervisor.getReplicationQueuedCount(metricsName)).thenReturn(1L); + assertEquals(commandHandler.getInvocationCount(), 5); + assertEquals(commandHandler.getQueuedCount(), 1); + assertEquals(commandHandler.getTotalRunTime(), 10); + assertEquals(commandHandler.getAverageRunTime(), 3); + + MetricsCollectorImpl metricsCollector = new MetricsCollectorImpl(); + metrics.getMetrics(metricsCollector, true); + assertEquals(1, metricsCollector.getRecords().size()); + } finally { + metrics.unRegister(); + } + } +} diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestReservedVolumeSpace.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestReservedVolumeSpace.java index 5e2dd0c75c9b..5e0a31944f78 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestReservedVolumeSpace.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestReservedVolumeSpace.java @@ -20,6 +20,7 @@ import org.apache.hadoop.conf.StorageUnit; import org.apache.hadoop.hdds.HddsConfigKeys; +import org.apache.hadoop.hdds.conf.ConfigurationException; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.fs.MockSpaceUsageCheckFactory; import org.apache.hadoop.hdds.scm.ScmConfigKeys; @@ -37,6 +38,7 @@ import static org.apache.hadoop.hdds.scm.ScmConfigKeys.HDDS_DATANODE_DIR_DU_RESERVED_PERCENT; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.HDDS_DATANODE_DIR_DU_RESERVED_PERCENT_DEFAULT; import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertThrows; import static org.junit.jupiter.api.Assertions.assertNotEquals; /** @@ -166,6 +168,16 @@ public void testInvalidConfig() throws Exception { assertEquals(getExpectedDefaultReserved(hddsVolume2), reservedFromVolume2); } + @Test + public void testInvalidConfigThrowsException() { + OzoneConfiguration conf = new OzoneConfiguration(); + conf.set(ScmConfigKeys.HDDS_DATANODE_DIR_DU_RESERVED, "15GB"); + + assertThrows(ConfigurationException.class, + () -> volumeBuilder.conf(conf).build(), + "Reserved space should be configured in a pair"); + } + @Test public void testPathsCanonicalized() throws Exception { OzoneConfiguration conf = new OzoneConfiguration(); diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainer.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainer.java index e1a3de30ddf3..584db675d930 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainer.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainer.java @@ -689,7 +689,7 @@ public void testContainerRocksDB(ContainerTestVersionInfo versionInfo) try (DBHandle db = BlockUtils.getDB(keyValueContainerData, CONF)) { RDBStore store = (RDBStore) db.getStore().getStore(); - long defaultCacheSize = 64 * OzoneConsts.MB; + long defaultCacheSize = OzoneConsts.GB; long cacheSize = Long.parseLong(store .getProperty("rocksdb.block-cache-capacity")); assertEquals(defaultCacheSize, cacheSize); diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueHandlerWithUnhealthyContainer.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueHandlerWithUnhealthyContainer.java index f0c8a2077eac..1db2d7ff53eb 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueHandlerWithUnhealthyContainer.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueHandlerWithUnhealthyContainer.java @@ -35,12 +35,14 @@ import org.apache.hadoop.ozone.container.common.volume.MutableVolumeSet; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.io.TempDir; import org.junit.jupiter.params.ParameterizedTest; import org.junit.jupiter.params.provider.Arguments; import org.junit.jupiter.params.provider.MethodSource; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import java.io.File; import java.io.IOException; import java.util.Arrays; import java.util.UUID; @@ -72,6 +74,9 @@ public class TestKeyValueHandlerWithUnhealthyContainer { public static final Logger LOG = LoggerFactory.getLogger( TestKeyValueHandlerWithUnhealthyContainer.class); + @TempDir + private File tempDir; + private IncrementalReportSender mockIcrSender; @BeforeEach @@ -220,6 +225,7 @@ public void testMarkContainerUnhealthyInFailedVolume() throws IOException { KeyValueContainerData mockContainerData = mock(KeyValueContainerData.class); HddsVolume mockVolume = mock(HddsVolume.class); when(mockContainerData.getVolume()).thenReturn(mockVolume); + when(mockContainerData.getMetadataPath()).thenReturn(tempDir.getAbsolutePath()); KeyValueContainer container = new KeyValueContainer( mockContainerData, new OzoneConfiguration()); diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestReplicationSupervisor.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestReplicationSupervisor.java index ef37c226653a..315e0c0253b4 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestReplicationSupervisor.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestReplicationSupervisor.java @@ -87,6 +87,7 @@ import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertDoesNotThrow; import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertTrue; import static org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ReplicationCommandPriority.LOW; import static org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ReplicationCommandPriority.NORMAL; import static org.mockito.Mockito.any; @@ -488,6 +489,15 @@ public void testMultipleReplication(ContainerLayoutVersion layout, assertEquals(0, ecReconstructionSupervisor.getReplicationRequestCount( task1.getMetricName())); + assertTrue(replicationSupervisor.getReplicationRequestTotalTime( + task1.getMetricName()) > 0); + assertTrue(ecReconstructionSupervisor.getReplicationRequestTotalTime( + task2.getMetricName()) > 0); + assertTrue(replicationSupervisor.getReplicationRequestAvgTime( + task1.getMetricName()) > 0); + assertTrue(ecReconstructionSupervisor.getReplicationRequestAvgTime( + task2.getMetricName()) > 0); + MetricsCollectorImpl replicationMetricsCollector = new MetricsCollectorImpl(); replicationMetrics.getMetrics(replicationMetricsCollector, true); assertEquals(1, replicationMetricsCollector.getRecords().size()); diff --git a/hadoop-hdds/crypto-api/pom.xml b/hadoop-hdds/crypto-api/pom.xml index db19cc4f3414..ca54b3de9f2d 100644 --- a/hadoop-hdds/crypto-api/pom.xml +++ b/hadoop-hdds/crypto-api/pom.xml @@ -19,11 +19,11 @@ org.apache.ozone hdds - 1.5.0-SNAPSHOT + 2.0.0-SNAPSHOT hdds-crypto-api - 1.5.0-SNAPSHOT + 2.0.0-SNAPSHOT Apache Ozone Distributed Data Store cryptographic functions Apache Ozone HDDS Crypto diff --git a/hadoop-hdds/crypto-default/pom.xml b/hadoop-hdds/crypto-default/pom.xml index c586f91712b8..6024c3e2ddf2 100644 --- a/hadoop-hdds/crypto-default/pom.xml +++ b/hadoop-hdds/crypto-default/pom.xml @@ -19,11 +19,11 @@ org.apache.ozone hdds - 1.5.0-SNAPSHOT + 2.0.0-SNAPSHOT hdds-crypto-default - 1.5.0-SNAPSHOT + 2.0.0-SNAPSHOT Default implementation of Apache Ozone Distributed Data Store's cryptographic functions Apache Ozone HDDS Crypto - Default diff --git a/hadoop-hdds/dev-support/checkstyle/checkstyle.xml b/hadoop-hdds/dev-support/checkstyle/checkstyle.xml index 3a69c793c26f..288085ef9481 100644 --- a/hadoop-hdds/dev-support/checkstyle/checkstyle.xml +++ b/hadoop-hdds/dev-support/checkstyle/checkstyle.xml @@ -88,6 +88,7 @@ + diff --git a/hadoop-hdds/docs/content/concept/Datanodes.md b/hadoop-hdds/docs/content/concept/Datanodes.md index 47c09a798fc4..cf246712f68f 100644 --- a/hadoop-hdds/docs/content/concept/Datanodes.md +++ b/hadoop-hdds/docs/content/concept/Datanodes.md @@ -76,3 +76,15 @@ blocks that get reported. That is a 40x reduction in the block reports. This extra indirection helps tremendously with scaling Ozone. SCM has far less block data to process and the namespace service (Ozone Manager) as a different service are critical to scaling Ozone. + + +## Notable configurations + +key | default |

    description
    +----|---------|------------ +dfs.container.ratis.datanode.storage.dir | none | This directory is used for storing Ratis metadata like logs. +ozone.scm.datanode.id.dir | none | The path that datanodes will use to store the datanode ID. +hdds.datanode.dir | none | Determines where HDDS data will be stored on the local filesystem. +hdds.datanode.dir.du.reserved | none | Reserved space in bytes per volume. Always leave this much space free for non dfs use. +ozone.metadata.dirs | none | Directory to store persisted data (RocksDB). +ozone.recon.address | 0.0.0.0:9891 | RPC address of the Recon. Use to connect Recon. diff --git a/hadoop-hdds/docs/content/concept/Datanodes.zh.md b/hadoop-hdds/docs/content/concept/Datanodes.zh.md index 8f129df7b9be..32071c9e51e4 100644 --- a/hadoop-hdds/docs/content/concept/Datanodes.zh.md +++ b/hadoop-hdds/docs/content/concept/Datanodes.zh.md @@ -49,3 +49,15 @@ Ozone 的存储容器是一个自包含的超级块,容器中包含一系列 SCM 如何获得容器的位置?这一点和现有的 HDFS 十分相似。数据节点会定期发送类似于块报告的容器报告,容器报告比块报告的内容简洁的多,比如,对于一个存储容量为 196 TB 的集群,Ozone 大概会拥有四万个容器,相比于 HDFS 的一百五十万个块,块报告数量缩减为四十分之一。 这种间接管理的方式大大地提高了 Ozone 的扩展性,因为 SCM 需要处理的块数据大大减少,且命名服务(OM)作为一个独特的服务主体对于扩展 Ozone 具有重要意义。 + + +## 需要关注的配置项 + +配置项 |默认值 |
    描述
    +----|---------|------------ +dfs.container.ratis.datanode.storage.dir | none | 该目录用于存储 Ratis 元数据,如日志。 +ozone.scm.datanode.id.dir | none | 数据节点上用于存储数据节点 ID 的路径。 +hdds.datanode.dir | none | 此配置决定数据节点上的数据将存储在本地文件系统的哪个位置。 +hdds.datanode.dir.du.reserved | none | 每个卷保留的存储空间(以字节为单位)。始终为非DFS用途保留这么多空闲空间。 +ozone.metadata.dirs | none | 用于存储持久化数据(RocksDB)的目录。 +ozone.recon.address | 0.0.0.0:9891 | Recon的RPC地址。 使用 连接到Recon。 \ No newline at end of file diff --git a/hadoop-hdds/docs/content/feature/Quota.md b/hadoop-hdds/docs/content/feature/Quota.md index 90e413357b50..53c196307fa3 100644 --- a/hadoop-hdds/docs/content/feature/Quota.md +++ b/hadoop-hdds/docs/content/feature/Quota.md @@ -1,6 +1,6 @@ --- title: "Quota in Ozone" -date: "2020-October-22" +date: "2020-10-22" weight: 4 summary: Quota in Ozone icon: user diff --git a/hadoop-hdds/docs/content/feature/Quota.zh.md b/hadoop-hdds/docs/content/feature/Quota.zh.md index 16e5db26cde3..d690947ef06c 100644 --- a/hadoop-hdds/docs/content/feature/Quota.zh.md +++ b/hadoop-hdds/docs/content/feature/Quota.zh.md @@ -1,6 +1,6 @@ --- title: "Ozone 中的配额" -date: "2020-October-22" +date: "2020-10-22" weight: 4 summary: Ozone中的配额 icon: user diff --git a/hadoop-hdds/docs/content/security/GDPR.md b/hadoop-hdds/docs/content/security/GDPR.md index 25b2f2c4416b..409a3ae7be0d 100644 --- a/hadoop-hdds/docs/content/security/GDPR.md +++ b/hadoop-hdds/docs/content/security/GDPR.md @@ -1,6 +1,6 @@ --- title: "GDPR in Ozone" -date: "2019-September-17" +date: "2019-09-17" weight: 3 icon: user menu: diff --git a/hadoop-hdds/docs/content/security/GDPR.zh.md b/hadoop-hdds/docs/content/security/GDPR.zh.md index a7db4030871b..8fd3514138f0 100644 --- a/hadoop-hdds/docs/content/security/GDPR.zh.md +++ b/hadoop-hdds/docs/content/security/GDPR.zh.md @@ -1,6 +1,6 @@ --- title: "Ozone 中的 GDPR" -date: "2019-September-17" +date: "2019-09-17" weight: 3 summary: Ozone 中的 GDPR menu: diff --git a/hadoop-hdds/docs/content/security/SecureOzone.md b/hadoop-hdds/docs/content/security/SecureOzone.md index 76fd74701095..bbeef79b6135 100644 --- a/hadoop-hdds/docs/content/security/SecureOzone.md +++ b/hadoop-hdds/docs/content/security/SecureOzone.md @@ -1,6 +1,6 @@ --- title: "Securing Ozone" -date: "2019-April-03" +date: "2019-04-03" summary: Overview of Ozone security concepts and steps to secure Ozone Manager and SCM. weight: 1 menu: diff --git a/hadoop-hdds/docs/content/security/SecureOzone.zh.md b/hadoop-hdds/docs/content/security/SecureOzone.zh.md index a7660233f4d0..e74b5d8dfab5 100644 --- a/hadoop-hdds/docs/content/security/SecureOzone.zh.md +++ b/hadoop-hdds/docs/content/security/SecureOzone.zh.md @@ -1,6 +1,6 @@ --- title: "安全化 Ozone" -date: "2019-April-03" +date: "2019-04-03" summary: 简要介绍 Ozone 中的安全概念以及安全化 OM 和 SCM 的步骤。 weight: 1 menu: diff --git a/hadoop-hdds/docs/content/security/SecuringDatanodes.md b/hadoop-hdds/docs/content/security/SecuringDatanodes.md index 717e746cfb91..2254155e1f4e 100644 --- a/hadoop-hdds/docs/content/security/SecuringDatanodes.md +++ b/hadoop-hdds/docs/content/security/SecuringDatanodes.md @@ -1,6 +1,6 @@ --- title: "Securing Datanodes" -date: "2019-April-03" +date: "2019-04-03" weight: 3 menu: main: diff --git a/hadoop-hdds/docs/content/security/SecuringDatanodes.zh.md b/hadoop-hdds/docs/content/security/SecuringDatanodes.zh.md index 608be16e8a3b..8b37fd2f6ee2 100644 --- a/hadoop-hdds/docs/content/security/SecuringDatanodes.zh.md +++ b/hadoop-hdds/docs/content/security/SecuringDatanodes.zh.md @@ -1,6 +1,6 @@ --- title: "安全化 Datanode" -date: "2019-April-03" +date: "2019-04-03" weight: 3 menu: main: diff --git a/hadoop-hdds/docs/content/security/SecuringOzoneHTTP.md b/hadoop-hdds/docs/content/security/SecuringOzoneHTTP.md index 47c04eb94d93..a8601d7a5e1f 100644 --- a/hadoop-hdds/docs/content/security/SecuringOzoneHTTP.md +++ b/hadoop-hdds/docs/content/security/SecuringOzoneHTTP.md @@ -1,6 +1,6 @@ --- title: "Securing HTTP" -date: "2020-June-17" +date: "2020-06-17" summary: Secure HTTP web-consoles for Ozone services weight: 4 menu: diff --git a/hadoop-hdds/docs/content/security/SecuringOzoneHTTP.zh.md b/hadoop-hdds/docs/content/security/SecuringOzoneHTTP.zh.md index 07b3f6164f6f..5907a7caf9a2 100644 --- a/hadoop-hdds/docs/content/security/SecuringOzoneHTTP.zh.md +++ b/hadoop-hdds/docs/content/security/SecuringOzoneHTTP.zh.md @@ -1,6 +1,6 @@ --- title: "安全化 HTTP" -date: "2020-June-17" +date: "2020-06-17" summary: 安全化 Ozone 服务的 HTTP 网络控制台 weight: 4 menu: diff --git a/hadoop-hdds/docs/content/security/SecuringS3.md b/hadoop-hdds/docs/content/security/SecuringS3.md index e6218b95e91e..04ef6921af65 100644 --- a/hadoop-hdds/docs/content/security/SecuringS3.md +++ b/hadoop-hdds/docs/content/security/SecuringS3.md @@ -1,6 +1,6 @@ --- title: "Securing S3" -date: "2019-April-03" +date: "2019-04-03" summary: Ozone supports S3 protocol, and uses AWS Signature Version 4 protocol which allows a seamless S3 experience. weight: 5 menu: diff --git a/hadoop-hdds/docs/content/security/SecuringS3.zh.md b/hadoop-hdds/docs/content/security/SecuringS3.zh.md index 218786fd366f..395b9303354b 100644 --- a/hadoop-hdds/docs/content/security/SecuringS3.zh.md +++ b/hadoop-hdds/docs/content/security/SecuringS3.zh.md @@ -1,6 +1,6 @@ --- title: "安全化 S3" -date: "2019-April-03" +date: "2019-04-03" summary: Ozone 支持 S3 协议,并使用 AWS Signature Version 4 protocol which allows a seamless S3 experience. weight: 5 diff --git a/hadoop-hdds/docs/content/security/SecuringTDE.md b/hadoop-hdds/docs/content/security/SecuringTDE.md index 3b75bee1bfd5..0d04a28aec77 100644 --- a/hadoop-hdds/docs/content/security/SecuringTDE.md +++ b/hadoop-hdds/docs/content/security/SecuringTDE.md @@ -1,6 +1,6 @@ --- title: "Transparent Data Encryption" -date: "2019-April-03" +date: "2019-04-03" summary: TDE allows data on the disks to be encrypted-at-rest and automatically decrypted during access. weight: 2 menu: diff --git a/hadoop-hdds/docs/content/security/SecuringTDE.zh.md b/hadoop-hdds/docs/content/security/SecuringTDE.zh.md index ed42519e0b25..d7fa4941e446 100644 --- a/hadoop-hdds/docs/content/security/SecuringTDE.zh.md +++ b/hadoop-hdds/docs/content/security/SecuringTDE.zh.md @@ -1,6 +1,6 @@ --- title: "透明数据加密" -date: "2019-April-03" +date: "2019-04-03" summary: 透明数据加密(Transparent Data Encryption,TDE)以密文形式在磁盘上保存数据,但可以在用户访问的时候自动进行解密。 weight: 2 menu: diff --git a/hadoop-hdds/docs/content/security/SecurityAcls.md b/hadoop-hdds/docs/content/security/SecurityAcls.md index 9976cbbc4fba..ee48999ed25d 100644 --- a/hadoop-hdds/docs/content/security/SecurityAcls.md +++ b/hadoop-hdds/docs/content/security/SecurityAcls.md @@ -1,6 +1,6 @@ --- title: "Ozone ACLs" -date: "2019-April-03" +date: "2019-04-03" weight: 6 menu: main: diff --git a/hadoop-hdds/docs/content/security/SecurityAcls.zh.md b/hadoop-hdds/docs/content/security/SecurityAcls.zh.md index 3d95fcf0877b..99751cd62da3 100644 --- a/hadoop-hdds/docs/content/security/SecurityAcls.zh.md +++ b/hadoop-hdds/docs/content/security/SecurityAcls.zh.md @@ -1,6 +1,6 @@ --- title: "Ozone 访问控制列表" -date: "2019-April-03" +date: "2019-04-03" weight: 6 menu: main: diff --git a/hadoop-hdds/docs/content/security/SecurityWithRanger.md b/hadoop-hdds/docs/content/security/SecurityWithRanger.md index bbbd8c19f32e..7dc1895ad3dc 100644 --- a/hadoop-hdds/docs/content/security/SecurityWithRanger.md +++ b/hadoop-hdds/docs/content/security/SecurityWithRanger.md @@ -1,6 +1,6 @@ --- title: "Apache Ranger" -date: "2019-April-03" +date: "2019-04-03" weight: 7 menu: main: diff --git a/hadoop-hdds/docs/content/security/SecurityWithRanger.zh.md b/hadoop-hdds/docs/content/security/SecurityWithRanger.zh.md index b7c7b8721bbe..8917c0b84bcf 100644 --- a/hadoop-hdds/docs/content/security/SecurityWithRanger.zh.md +++ b/hadoop-hdds/docs/content/security/SecurityWithRanger.zh.md @@ -1,6 +1,6 @@ --- title: "Apache Ranger" -date: "2019-April-03" +date: "2019-04-03" weight: 7 menu: main: diff --git a/hadoop-hdds/docs/pom.xml b/hadoop-hdds/docs/pom.xml index d14ae28c10d8..7f4ffbb8a709 100644 --- a/hadoop-hdds/docs/pom.xml +++ b/hadoop-hdds/docs/pom.xml @@ -20,10 +20,10 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> org.apache.ozone hdds - 1.5.0-SNAPSHOT + 2.0.0-SNAPSHOT hdds-docs - 1.5.0-SNAPSHOT + 2.0.0-SNAPSHOT Apache Ozone/HDDS Documentation Apache Ozone/HDDS Documentation jar diff --git a/hadoop-hdds/erasurecode/pom.xml b/hadoop-hdds/erasurecode/pom.xml index 201336d5ed3a..b540d1c68ea6 100644 --- a/hadoop-hdds/erasurecode/pom.xml +++ b/hadoop-hdds/erasurecode/pom.xml @@ -20,10 +20,10 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> org.apache.ozone hdds - 1.5.0-SNAPSHOT + 2.0.0-SNAPSHOT hdds-erasurecode - 1.5.0-SNAPSHOT + 2.0.0-SNAPSHOT Apache Ozone Distributed Data Store Earsurecode utils Apache Ozone HDDS Erasurecode diff --git a/hadoop-hdds/erasurecode/src/main/java/org/apache/ozone/erasurecode/CodecRegistry.java b/hadoop-hdds/erasurecode/src/main/java/org/apache/ozone/erasurecode/CodecRegistry.java index 83650c132b05..2069a51be171 100644 --- a/hadoop-hdds/erasurecode/src/main/java/org/apache/ozone/erasurecode/CodecRegistry.java +++ b/hadoop-hdds/erasurecode/src/main/java/org/apache/ozone/erasurecode/CodecRegistry.java @@ -19,9 +19,9 @@ import com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.hdds.annotation.InterfaceAudience; -import org.apache.ozone.erasurecode.rawcoder.RawErasureCoderFactory; import org.apache.ozone.erasurecode.rawcoder.NativeRSRawErasureCoderFactory; import org.apache.ozone.erasurecode.rawcoder.NativeXORRawErasureCoderFactory; +import org.apache.ozone.erasurecode.rawcoder.RawErasureCoderFactory; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -31,7 +31,6 @@ import java.util.Map; import java.util.ServiceLoader; import java.util.Set; -import java.util.stream.Collectors; /** * This class registers all coder implementations. @@ -108,8 +107,8 @@ void updateCoders(Iterable coderFactories) { String codecName = entry.getKey(); List coders = entry.getValue(); coderNameMap.put(codecName, coders.stream(). - map(RawErasureCoderFactory::getCoderName). - collect(Collectors.toList()).toArray(new String[0])); + map(RawErasureCoderFactory::getCoderName) + .toArray(String[]::new)); } } diff --git a/hadoop-hdds/framework/pom.xml b/hadoop-hdds/framework/pom.xml index 70cce849aec1..37d41cde390a 100644 --- a/hadoop-hdds/framework/pom.xml +++ b/hadoop-hdds/framework/pom.xml @@ -20,10 +20,10 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> org.apache.ozone hdds - 1.5.0-SNAPSHOT + 2.0.0-SNAPSHOT hdds-server-framework - 1.5.0-SNAPSHOT + 2.0.0-SNAPSHOT Apache Ozone Distributed Data Store Server Framework Apache Ozone HDDS Server Framework diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/protocol/SCMSecurityProtocol.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/protocol/SCMSecurityProtocol.java index cbb4f3fc2ee7..0cb39482e98c 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/protocol/SCMSecurityProtocol.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/protocol/SCMSecurityProtocol.java @@ -118,7 +118,6 @@ String getSCMCertificate(ScmNodeDetailsProto scmNodeDetails, /** * Get Root CA certificate. - * @return * @throws IOException */ String getRootCACertificate() throws IOException; diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/protocolPB/SCMSecurityProtocolClientSideTranslatorPB.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/protocolPB/SCMSecurityProtocolClientSideTranslatorPB.java index a938d53c7c42..71918308f145 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/protocolPB/SCMSecurityProtocolClientSideTranslatorPB.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/protocolPB/SCMSecurityProtocolClientSideTranslatorPB.java @@ -330,7 +330,6 @@ public SCMGetCertResponseProto getCACert() throws IOException { * @param role - node type: OM/SCM/DN. * @param startSerialId - start cert serial id. * @param count - max number of certificates returned in a batch. - * @return * @throws IOException */ @Override diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/MoveDataNodePair.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/MoveDataNodePair.java index 42e8f8202cbd..4690054a87db 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/MoveDataNodePair.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/MoveDataNodePair.java @@ -36,6 +36,7 @@ public class MoveDataNodePair { Proto2Codec.get(MoveDataNodePairProto.getDefaultInstance()), MoveDataNodePair::getFromProtobuf, pair -> pair.getProtobufMessage(ClientVersion.CURRENT_VERSION), + MoveDataNodePair.class, DelegatedCodec.CopyType.SHALLOW); public static Codec getCodec() { diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/ScmBlockLocationProtocolClientSideTranslatorPB.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/ScmBlockLocationProtocolClientSideTranslatorPB.java index 1f114304ccaa..d9b198d4b14a 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/ScmBlockLocationProtocolClientSideTranslatorPB.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/ScmBlockLocationProtocolClientSideTranslatorPB.java @@ -307,7 +307,7 @@ public boolean addSCM(AddSCMRequest request) throws IOException { } /** * Sort the datanodes based on distance from client. - * @return List + * @return list of datanodes; * @throws IOException */ @Override diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/StorageContainerLocationProtocolClientSideTranslatorPB.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/StorageContainerLocationProtocolClientSideTranslatorPB.java index d5972cfe0760..d76ce3b72c96 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/StorageContainerLocationProtocolClientSideTranslatorPB.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/StorageContainerLocationProtocolClientSideTranslatorPB.java @@ -108,6 +108,7 @@ import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.ContainerBalancerStatusInfoRequestProto; import org.apache.hadoop.hdds.scm.DatanodeAdminError; import org.apache.hadoop.hdds.scm.ScmInfo; +import org.apache.hadoop.hdds.scm.container.ContainerListResult; import org.apache.hadoop.hdds.scm.container.ContainerID; import org.apache.hadoop.hdds.scm.container.ContainerInfo; import org.apache.hadoop.hdds.scm.container.ReplicationManagerReport; @@ -128,6 +129,7 @@ import java.io.Closeable; import java.io.IOException; +import java.util.Arrays; import java.util.ArrayList; import java.util.HashMap; import java.util.List; @@ -157,6 +159,12 @@ public final class StorageContainerLocationProtocolClientSideTranslatorPB private final StorageContainerLocationProtocolPB rpcProxy; private final SCMContainerLocationFailoverProxyProvider fpp; + /** + * This is used to check if 'leader' or 'follower' exists, + * in order to confirm whether we have enabled Ratis. + */ + private final List scmRatisRolesToCheck = Arrays.asList("leader", "follower"); + /** * Creates a new StorageContainerLocationProtocolClientSideTranslatorPB. * @@ -382,19 +390,19 @@ public List getExistContainerWithPipelinesInBatch( * {@inheritDoc} */ @Override - public List listContainer(long startContainerID, int count) + public ContainerListResult listContainer(long startContainerID, int count) throws IOException { return listContainer(startContainerID, count, null, null, null); } @Override - public List listContainer(long startContainerID, int count, + public ContainerListResult listContainer(long startContainerID, int count, HddsProtos.LifeCycleState state) throws IOException { return listContainer(startContainerID, count, state, null, null); } @Override - public List listContainer(long startContainerID, int count, + public ContainerListResult listContainer(long startContainerID, int count, HddsProtos.LifeCycleState state, HddsProtos.ReplicationType replicationType, ReplicationConfig replicationConfig) @@ -436,12 +444,17 @@ public List listContainer(long startContainerID, int count, .getContainersList()) { containerList.add(ContainerInfo.fromProtobuf(containerInfoProto)); } - return containerList; + + if (response.hasContainerCount()) { + return new ContainerListResult(containerList, response.getContainerCount()); + } else { + return new ContainerListResult(containerList, -1); + } } @Deprecated @Override - public List listContainer(long startContainerID, int count, + public ContainerListResult listContainer(long startContainerID, int count, HddsProtos.LifeCycleState state, HddsProtos.ReplicationFactor factor) throws IOException { throw new UnsupportedOperationException("Should no longer be called from " + @@ -760,8 +773,23 @@ public ScmInfo getScmInfo() throws IOException { .setScmId(resp.getScmId()) .setRatisPeerRoles(resp.getPeerRolesList()); - return builder.build(); + // By default, we assume that SCM Ratis is not enabled. + // If the response contains the `ScmRatisEnabled` field, + // we will set it directly; otherwise, + // we will determine if Ratis is enabled based on + // whether the `peerRolesList` contains the keywords 'leader' or 'follower'. + if (resp.hasScmRatisEnabled()) { + builder.setScmRatisEnabled(resp.getScmRatisEnabled()); + } else { + List peerRolesList = resp.getPeerRolesList(); + if (!peerRolesList.isEmpty()) { + boolean containsScmRoles = peerRolesList.stream().map(String::toLowerCase) + .anyMatch(scmRatisRolesToCheck::contains); + builder.setScmRatisEnabled(containsScmRoles); + } + } + return builder.build(); } @Override @@ -1187,7 +1215,7 @@ public void close() { public List getListOfContainers( long startContainerID, int count, HddsProtos.LifeCycleState state) throws IOException { - return listContainer(startContainerID, count, state); + return listContainer(startContainerID, count, state).getContainerInfoList(); } @Override diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/token/TokenVerifier.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/token/TokenVerifier.java index e7e029f70877..da651160d047 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/token/TokenVerifier.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/token/TokenVerifier.java @@ -50,8 +50,7 @@ void verify(Token token, ContainerCommandRequestProtoOrBuilder cmd) throws SCMSecurityException; - /** Same as {@link #verify(Token, - * ContainerCommandRequestProtoOrBuilder)}, but with encoded token. */ + /** Same as {@link #verify}, but with encoded token. */ default void verify(ContainerCommandRequestProtoOrBuilder cmd, String encodedToken) throws SCMSecurityException { diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/CertInfo.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/CertInfo.java index b78604643e57..154f1c2d858f 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/CertInfo.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/CertInfo.java @@ -39,7 +39,8 @@ public final class CertInfo implements Comparable, Serializable { private static final Codec CODEC = new DelegatedCodec<>( Proto2Codec.get(CertInfoProto.getDefaultInstance()), CertInfo::fromProtobuf, - CertInfo::getProtobuf); + CertInfo::getProtobuf, + CertInfo.class); public static Codec getCodec() { return CODEC; diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/authority/DefaultApprover.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/authority/DefaultApprover.java index b2d62443b776..5a39d0f1dd0b 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/authority/DefaultApprover.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/authority/DefaultApprover.java @@ -104,7 +104,7 @@ public DefaultApprover(PKIProfile pkiProfile, SecurityConfig config) { * @param certSerialId - the new certificate id. * @return Signed Certificate. * @throws IOException - On Error - * @throws OperatorCreationException - on Error. + * @throws CertificateException - on Error. */ @SuppressWarnings("ParameterNumber") @Override diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/authority/DefaultCAServer.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/authority/DefaultCAServer.java index a93bdb4e3d67..118aa826013d 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/authority/DefaultCAServer.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/authority/DefaultCAServer.java @@ -195,8 +195,6 @@ public CertPath getCaCertPath() * * @param certSerialId - Certificate for this CA. * @return X509Certificate - * @throws CertificateException - usually thrown if this CA is not - * initialized. * @throws IOException - on Error. */ @Override diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/client/DefaultCertificateClient.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/client/DefaultCertificateClient.java index b277a759cb8d..42292b9663f1 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/client/DefaultCertificateClient.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/client/DefaultCertificateClient.java @@ -668,6 +668,8 @@ protected enum InitCase { * certificate. * * Truth table: + *
    +   * {@code
        *  +--------------+---------------+--------------+---------------------+
        *  | Private Key  | Public Keys   | Certificate  |   Result            |
        *  +--------------+---------------+--------------+---------------------+
    @@ -680,7 +682,8 @@ protected enum InitCase {
        *  | True   (1)   | True    (1)   | False  (0)   |   GETCERT->SUCCESS  |
        *  | True   (1)   | True    (1)   | True   (1)   |   SUCCESS           |
        *  +--------------+-----------------+--------------+----------------+
    -   *
    +   * }
    +   * 
    * Success in following cases: * 1. If keypair as well certificate is available. * 2. If private key and certificate is available and public key is @@ -1083,7 +1086,7 @@ public Duration timeBeforeExpiryGracePeriod(X509Certificate certificate) { * Renew keys and certificate. Save the keys are certificate to disk in new * directories, swap the current key directory and certs directory with the * new directories. - * @param force, check certificate expiry time again if force is false. + * @param force check certificate expiry time again if force is false. * @return String, new certificate ID * */ public String renewAndStoreKeyAndCertificate(boolean force) diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/ServiceRuntimeInfo.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/ServiceRuntimeInfo.java index bcd75f3f215d..3783613f3e34 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/ServiceRuntimeInfo.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/ServiceRuntimeInfo.java @@ -47,13 +47,6 @@ public interface ServiceRuntimeInfo { */ String getSoftwareVersion(); - /** - * Get the compilation information which contains date, user and branch. - * - * @return the compilation information, as a JSON string. - */ - String getCompileInfo(); - /** * Gets the NN start time in milliseconds. * diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/ServiceRuntimeInfoImpl.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/ServiceRuntimeInfoImpl.java index 987f4aee0314..74ba3c5b629c 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/ServiceRuntimeInfoImpl.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/ServiceRuntimeInfoImpl.java @@ -42,12 +42,6 @@ public String getSoftwareVersion() { return versionInfo.getVersion(); } - @Override - public String getCompileInfo() { - return versionInfo.getDate() + " by " + versionInfo.getUser() + " from " - + versionInfo.getBranch(); - } - @Override public long getStartedTimeInMillis() { return startedTimeInMillis; diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/http/HttpServer2.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/http/HttpServer2.java index f27f42e0b4c7..9d037fed6bc8 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/http/HttpServer2.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/http/HttpServer2.java @@ -123,8 +123,8 @@ /** * Create a Jetty embedded server to answer http requests. The primary goal is * to serve up status information for the server. There are three contexts: - * "/logs/" -> points to the log directory "/static/" -> points to common static - * files (src/webapps/static) "/" -> the jsp server code from + * "/logs/" -> points to the log directory "/static/" -> points to common static + * files (src/webapps/static) "/" -> the jsp server code from * (src/webapps/) * * This class is a fork of the old HttpServer. HttpServer exists for diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/http/ProfileServlet.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/http/ProfileServlet.java index f4f188aaf395..bceec92c6c8f 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/http/ProfileServlet.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/http/ProfileServlet.java @@ -41,11 +41,12 @@ import org.slf4j.LoggerFactory; /** + *
      * Servlet that runs async-profiler as web-endpoint.
    - * 

    + * * Source: https://github.com/apache/hive/blob/master/common/src/java/org * /apache/hive/http/ProfileServlet.java - *

    + * * Following options from async-profiler can be specified as query parameter. * // -e event profiling event: cpu|alloc|lock|cache-misses etc. * // -d duration run profiling for seconds @@ -79,7 +80,7 @@ * curl "http://localhost:10002/prof" * - To collect 1 minute CPU profile of current process and output in tree * format (html) - * curl "http://localhost:10002/prof?output=tree&duration=60" + * curl "http://localhost:10002/prof?output=tree&duration=60" * - To collect 30 second heap allocation profile of current process (returns * FlameGraph svg) * curl "http://localhost:10002/prof?event=alloc" @@ -111,6 +112,7 @@ * The default output format of the newest async profiler is HTML. * If the user is using an older version such as 1.5, HTML is not supported. * Please specify the corresponding output format. + *

    */ public class ProfileServlet extends HttpServlet { private static final long serialVersionUID = 1L; diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/DBCheckpointServlet.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/DBCheckpointServlet.java index 2d718628e1eb..cb1fdd3375a8 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/DBCheckpointServlet.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/DBCheckpointServlet.java @@ -41,22 +41,20 @@ import org.apache.commons.fileupload.servlet.ServletFileUpload; import org.apache.commons.fileupload.util.Streams; import org.apache.commons.io.FileUtils; +import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.hdds.server.OzoneAdmins; import org.apache.hadoop.hdds.utils.db.DBCheckpoint; import org.apache.hadoop.hdds.utils.db.DBStore; - -import org.apache.commons.lang3.StringUtils; +import org.apache.hadoop.ozone.lock.BootstrapStateHandler; +import org.apache.hadoop.security.UserGroupInformation; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import static org.apache.hadoop.hdds.utils.HddsServerUtil.writeDBCheckpointToStream; import static org.apache.hadoop.ozone.OzoneConsts.OZONE_DB_CHECKPOINT_REQUEST_FLUSH; import static org.apache.hadoop.ozone.OzoneConsts.OZONE_DB_CHECKPOINT_REQUEST_TO_EXCLUDE_SST; import static org.apache.hadoop.ozone.OzoneConsts.ROCKSDB_SST_SUFFIX; -import org.apache.hadoop.ozone.lock.BootstrapStateHandler; -import org.apache.hadoop.security.UserGroupInformation; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - /** * Provides the current checkpoint Snapshot of the OM/SCM DB. (tar) */ @@ -287,7 +285,7 @@ private static String[] parseFormDataParameters(HttpServletRequest request) { LOG.warn("Exception occured during form data parsing {}", e.getMessage()); } - return sstParam.size() == 0 ? null : sstParam.toArray(new String[0]); + return sstParam.isEmpty() ? null : sstParam.toArray(new String[0]); } /** diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/HddsServerUtil.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/HddsServerUtil.java index c45e772c2417..d80b6b3a272c 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/HddsServerUtil.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/HddsServerUtil.java @@ -125,11 +125,11 @@ private HddsServerUtil() { HddsServerUtil.class); /** - * Add protobuf-based protocol to the {@link RPC.Server}. + * Add protobuf-based protocol to the {@link org.apache.hadoop.ipc.RPC.Server}. * @param conf configuration * @param protocol Protocol interface * @param service service that implements the protocol - * @param server RPC server to which the protocol & implementation is added to + * @param server RPC server to which the protocol and implementation is added to */ public static void addPBProtocol(Configuration conf, Class protocol, BlockingService service, RPC.Server server) throws IOException { @@ -742,9 +742,7 @@ public static String createStartupShutdownMessage(VersionInfo versionInfo, " version = " + versionInfo.getVersion(), " classpath = " + System.getProperty("java.class.path"), " build = " + versionInfo.getUrl() + "/" - + versionInfo.getRevision() - + " ; compiled by '" + versionInfo.getUser() - + "' on " + versionInfo.getDate(), + + versionInfo.getRevision(), " java = " + System.getProperty("java.version"), " conf = " + conf); } diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/TransactionInfo.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/TransactionInfo.java index 29531f315184..8387934261ce 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/TransactionInfo.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/TransactionInfo.java @@ -45,6 +45,7 @@ public final class TransactionInfo implements Comparable { StringCodec.get(), TransactionInfo::valueOf, TransactionInfo::toString, + TransactionInfo.class, DelegatedCodec.CopyType.SHALLOW); public static Codec getCodec() { diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/ByteArrayCodec.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/ByteArrayCodec.java index f62d3ac19cf2..bb5eef70d25a 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/ByteArrayCodec.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/ByteArrayCodec.java @@ -33,6 +33,11 @@ private ByteArrayCodec() { // singleton } + @Override + public Class getTypeClass() { + return byte[].class; + } + @Override public byte[] toPersistedFormat(byte[] bytes) { return bytes; diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/ByteStringCodec.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/ByteStringCodec.java index 997bdf6cf2ee..20e373317b17 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/ByteStringCodec.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/ByteStringCodec.java @@ -34,6 +34,11 @@ public static ByteStringCodec get() { private ByteStringCodec() { } + @Override + public Class getTypeClass() { + return ByteString.class; + } + @Override public boolean supportCodecBuffer() { return true; diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/DBColumnFamilyDefinition.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/DBColumnFamilyDefinition.java index 653182214b6c..a5268e6031c8 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/DBColumnFamilyDefinition.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/DBColumnFamilyDefinition.java @@ -54,32 +54,21 @@ public class DBColumnFamilyDefinition { private final String tableName; - private final Class keyType; - private final Codec keyCodec; - private final Class valueType; - private final Codec valueCodec; - private ManagedColumnFamilyOptions cfOptions; + private volatile ManagedColumnFamilyOptions cfOptions; - public DBColumnFamilyDefinition( - String tableName, - Class keyType, - Codec keyCodec, - Class valueType, - Codec valueCodec) { + public DBColumnFamilyDefinition(String tableName, Codec keyCodec, Codec valueCodec) { this.tableName = tableName; - this.keyType = keyType; this.keyCodec = keyCodec; - this.valueType = valueType; this.valueCodec = valueCodec; this.cfOptions = null; } public Table getTable(DBStore db) throws IOException { - return db.getTable(tableName, keyType, valueType); + return db.getTable(tableName, getKeyType(), getValueType()); } public String getName() { @@ -87,7 +76,7 @@ public String getName() { } public Class getKeyType() { - return keyType; + return keyCodec.getTypeClass(); } public Codec getKeyCodec() { @@ -95,7 +84,7 @@ public Codec getKeyCodec() { } public Class getValueType() { - return valueType; + return valueCodec.getTypeClass(); } public Codec getValueCodec() { diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/DBDefinition.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/DBDefinition.java index 968d62f0dd5a..461bd35f413c 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/DBDefinition.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/DBDefinition.java @@ -20,6 +20,7 @@ import org.apache.hadoop.hdds.conf.ConfigurationSource; import org.apache.hadoop.hdds.server.ServerUtils; +import org.apache.ratis.util.MemoizedSupplier; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -28,6 +29,9 @@ import java.util.Collections; import java.util.List; import java.util.Map; +import java.util.function.Supplier; +import java.util.stream.Collectors; +import java.util.stream.StreamSupport; /** * Simple interface to provide information to create a DBStore.. @@ -55,6 +59,16 @@ default File getDBLocation(ConfigurationSource conf) { getLocationConfigKey(), getName()); } + static List getColumnFamilyNames(Iterable> columnFamilies) { + return Collections.unmodifiableList(StreamSupport.stream(columnFamilies.spliterator(), false) + .map(DBColumnFamilyDefinition::getName) + .collect(Collectors.toList())); + } + + default List getColumnFamilyNames() { + return getColumnFamilyNames(getColumnFamilies()); + } + /** * @return The column families present in the DB. */ @@ -109,9 +123,17 @@ interface WithMapInterface extends DBDefinition { */ abstract class WithMap implements WithMapInterface { private final Map> map; + private final Supplier> columnFamilyNames; protected WithMap(Map> map) { this.map = map; + this.columnFamilyNames = MemoizedSupplier.valueOf( + () -> DBDefinition.getColumnFamilyNames(getColumnFamilies())); + } + + @Override + public final List getColumnFamilyNames() { + return columnFamilyNames.get(); } @Override diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/DBStore.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/DBStore.java index 3e8ea30a6528..8623a3bdd7d7 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/DBStore.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/DBStore.java @@ -184,7 +184,7 @@ void move(KEY sourceKey, KEY destKey, VALUE value, /** * Get List of Index to Table Names. * (For decoding table from column family index) - * @return Map of Index -> TableName + * @return Map of Index -> TableName */ Map getTableNames(); diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RocksDatabase.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RocksDatabase.java index c441ec929c76..c156b8e4d67a 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RocksDatabase.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RocksDatabase.java @@ -841,7 +841,7 @@ private int getLastLevel() throws IOException { /** * Deletes sst files which do not correspond to prefix * for given table. - * @param prefixPairs, a map of TableName to prefixUsed. + * @param prefixPairs a map of TableName to prefixUsed. */ public void deleteFilesNotMatchingPrefix(Map prefixPairs) throws IOException, RocksDBException { diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/Table.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/Table.java index c818c07b1acc..c7055267052e 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/Table.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/Table.java @@ -24,6 +24,7 @@ import java.util.Iterator; import java.util.List; import java.util.Map; +import java.util.Objects; import org.apache.commons.lang3.NotImplementedException; import org.apache.hadoop.hdds.annotation.InterfaceStability; @@ -169,7 +170,7 @@ default VALUE getReadCopy(KEY key) throws IOException { /** * Returns a prefixed iterator for this metadata store. * @param prefix - * @return + * @return MetaStoreIterator */ TableIterator> iterator(KEY prefix) throws IOException; @@ -245,7 +246,7 @@ default TableCacheMetrics createCacheMetrics() throws IOException { /** * Returns a certain range of key value pairs as a list based on a - * startKey or count. Further a {@link MetadataKeyFilters.MetadataKeyFilter} + * startKey or count. Further a {@link org.apache.hadoop.hdds.utils.MetadataKeyFilters.MetadataKeyFilter} * can be added to * filter keys if necessary. * To prevent race conditions while listing * entries, this implementation takes a snapshot and lists the entries from @@ -261,7 +262,7 @@ default TableCacheMetrics createCacheMetrics() throws IOException { * the value for count must be an integer greater than 0. *

    * This method allows to specify one or more - * {@link MetadataKeyFilters.MetadataKeyFilter} + * {@link org.apache.hadoop.hdds.utils.MetadataKeyFilters.MetadataKeyFilter} * to filter keys by certain condition. Once given, only the entries * whose key passes all the filters will be included in the result. * @@ -269,7 +270,7 @@ default TableCacheMetrics createCacheMetrics() throws IOException { * @param count max number of entries to return. * @param prefix fixed key schema specific prefix * @param filters customized one or more - * {@link MetadataKeyFilters.MetadataKeyFilter}. + * {@link org.apache.hadoop.hdds.utils.MetadataKeyFilters.MetadataKeyFilter}. * @return a list of entries found in the database or an empty list if the * startKey is invalid. * @throws IOException if there are I/O errors. @@ -292,7 +293,7 @@ List> getRangeKVs(KEY startKey, * @param count max number of entries to return. * @param prefix fixed key schema specific prefix * @param filters customized one or more - * {@link MetadataKeyFilters.MetadataKeyFilter}. + * {@link org.apache.hadoop.hdds.utils.MetadataKeyFilters.MetadataKeyFilter}. * @return a list of entries found in the database. * @throws IOException * @throws IllegalArgumentException @@ -307,7 +308,6 @@ List> getSequentialRangeKVs(KEY startKey, * as part of a batch operation. * @param batch * @param prefix - * @return */ void deleteBatchWithPrefix(BatchOperation batch, KEY prefix) throws IOException; @@ -354,6 +354,24 @@ public V getValue() { public String toString() { return "(key=" + key + ", value=" + value + ")"; } + + @Override + public boolean equals(Object obj) { + if (!(obj instanceof KeyValue)) { + return false; + } + KeyValue kv = (KeyValue) obj; + try { + return getKey().equals(kv.getKey()) && getValue().equals(kv.getValue()); + } catch (IOException e) { + throw new RuntimeException(e); + } + } + + @Override + public int hashCode() { + return Objects.hash(getKey(), getValue()); + } }; } diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/ozone/audit/package-info.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/ozone/audit/package-info.java index 0c1ec710d2c8..c428f2860eee 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/ozone/audit/package-info.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/ozone/audit/package-info.java @@ -48,7 +48,7 @@ * This interface must be implemented by entities requiring audit logging. * For example - OMVolumeArgs, OMBucketArgs. * The implementing class must override toAuditMap() to return an - * instance of Map where both Key and Value are String. + * instance of {@code Map} where both Key and Value are String. * * Key: must contain printable US ASCII characters * May not contain a space, =, ], or " diff --git a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/TestDBStoreBuilder.java b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/TestDBStoreBuilder.java index aad3e9e12e64..7966afe50457 100644 --- a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/TestDBStoreBuilder.java +++ b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/TestDBStoreBuilder.java @@ -179,7 +179,7 @@ public void builderWithColumnFamilyOptions(@TempDir Path tempDir) String sampleTableName = "sampleTable"; final DBColumnFamilyDefinition sampleTable = new DBColumnFamilyDefinition<>(sampleTableName, - String.class, StringCodec.get(), Long.class, LongCodec.get()); + StringCodec.get(), LongCodec.get()); final DBDefinition sampleDB = new DBDefinition.WithMap( DBColumnFamilyDefinition.newUnmodifiableMap(sampleTable)) { { @@ -250,8 +250,8 @@ public void testIfAutoCompactionDisabled(boolean disableAutoCompaction, String sampleTableName = "sampleTable"; final DBColumnFamilyDefinition sampleTable = - new DBColumnFamilyDefinition<>(sampleTableName, String.class, - StringCodec.get(), Long.class, LongCodec.get()); + new DBColumnFamilyDefinition<>(sampleTableName, + StringCodec.get(), LongCodec.get()); final DBDefinition sampleDB = new DBDefinition.WithMap( DBColumnFamilyDefinition.newUnmodifiableMap(sampleTable)) { @Override diff --git a/hadoop-hdds/hadoop-dependency-client/pom.xml b/hadoop-hdds/hadoop-dependency-client/pom.xml index 03b677e3818c..dbf4a9420c1d 100644 --- a/hadoop-hdds/hadoop-dependency-client/pom.xml +++ b/hadoop-hdds/hadoop-dependency-client/pom.xml @@ -20,10 +20,10 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> org.apache.ozone hdds - 1.5.0-SNAPSHOT + 2.0.0-SNAPSHOT hdds-hadoop-dependency-client - 1.5.0-SNAPSHOT + 2.0.0-SNAPSHOT Apache Ozone Distributed Data Store Hadoop client dependencies Apache Ozone HDDS Hadoop Client dependencies diff --git a/hadoop-hdds/hadoop-dependency-server/pom.xml b/hadoop-hdds/hadoop-dependency-server/pom.xml index 69daeac4bd7f..c6f91c4ca173 100644 --- a/hadoop-hdds/hadoop-dependency-server/pom.xml +++ b/hadoop-hdds/hadoop-dependency-server/pom.xml @@ -20,10 +20,10 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> org.apache.ozone hdds - 1.5.0-SNAPSHOT + 2.0.0-SNAPSHOT hdds-hadoop-dependency-server - 1.5.0-SNAPSHOT + 2.0.0-SNAPSHOT Apache Ozone Distributed Data Store Hadoop server dependencies Apache Ozone HDDS Hadoop Server dependencies diff --git a/hadoop-hdds/hadoop-dependency-test/pom.xml b/hadoop-hdds/hadoop-dependency-test/pom.xml index 80ec91cd6d94..f04e45a03404 100644 --- a/hadoop-hdds/hadoop-dependency-test/pom.xml +++ b/hadoop-hdds/hadoop-dependency-test/pom.xml @@ -20,10 +20,10 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> org.apache.ozone hdds - 1.5.0-SNAPSHOT + 2.0.0-SNAPSHOT hdds-hadoop-dependency-test - 1.5.0-SNAPSHOT + 2.0.0-SNAPSHOT Apache Ozone Distributed Data Store Hadoop test dependencies Apache Ozone HDDS Hadoop Test dependencies diff --git a/hadoop-hdds/interface-admin/pom.xml b/hadoop-hdds/interface-admin/pom.xml index 9230b02b524c..422fb2048a34 100644 --- a/hadoop-hdds/interface-admin/pom.xml +++ b/hadoop-hdds/interface-admin/pom.xml @@ -20,10 +20,10 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> org.apache.ozone hdds - 1.5.0-SNAPSHOT + 2.0.0-SNAPSHOT hdds-interface-admin - 1.5.0-SNAPSHOT + 2.0.0-SNAPSHOT Apache Ozone Distributed Data Store Admin interface Apache Ozone HDDS Admin Interface diff --git a/hadoop-hdds/interface-admin/src/main/proto/ScmAdminProtocol.proto b/hadoop-hdds/interface-admin/src/main/proto/ScmAdminProtocol.proto index 039914369b5f..ee2df89e81b7 100644 --- a/hadoop-hdds/interface-admin/src/main/proto/ScmAdminProtocol.proto +++ b/hadoop-hdds/interface-admin/src/main/proto/ScmAdminProtocol.proto @@ -296,6 +296,7 @@ message SCMListContainerRequestProto { message SCMListContainerResponseProto { repeated ContainerInfoProto containers = 1; + optional int64 containerCount = 2; } message SCMDeleteContainerRequestProto { diff --git a/hadoop-hdds/interface-client/pom.xml b/hadoop-hdds/interface-client/pom.xml index 98cfc53f5e86..3c8c7cfc2257 100644 --- a/hadoop-hdds/interface-client/pom.xml +++ b/hadoop-hdds/interface-client/pom.xml @@ -20,10 +20,10 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> org.apache.ozone hdds - 1.5.0-SNAPSHOT + 2.0.0-SNAPSHOT hdds-interface-client - 1.5.0-SNAPSHOT + 2.0.0-SNAPSHOT Apache Ozone Distributed Data Store Client interface Apache Ozone HDDS Client Interface diff --git a/hadoop-hdds/interface-client/src/main/proto/hdds.proto b/hadoop-hdds/interface-client/src/main/proto/hdds.proto index 6cd4f6235ce7..1fc5884e24fa 100644 --- a/hadoop-hdds/interface-client/src/main/proto/hdds.proto +++ b/hadoop-hdds/interface-client/src/main/proto/hdds.proto @@ -61,7 +61,7 @@ message ExtendedDatanodeDetailsProto { optional string version = 2; optional int64 setupTime = 3; optional string revision = 4; - optional string buildDate = 5; + optional string buildDate = 5; // unused, reserved for compatibility } message MoveDataNodePairProto { @@ -257,6 +257,7 @@ message GetScmInfoResponseProto { required string clusterId = 1; required string scmId = 2; repeated string peerRoles = 3; + optional bool scmRatisEnabled = 4; } message AddScmRequestProto { diff --git a/hadoop-hdds/interface-server/pom.xml b/hadoop-hdds/interface-server/pom.xml index df65c1e2b2af..6a6f2a6b383a 100644 --- a/hadoop-hdds/interface-server/pom.xml +++ b/hadoop-hdds/interface-server/pom.xml @@ -20,10 +20,10 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> org.apache.ozone hdds - 1.5.0-SNAPSHOT + 2.0.0-SNAPSHOT hdds-interface-server - 1.5.0-SNAPSHOT + 2.0.0-SNAPSHOT Apache Ozone Distributed Data Store Server interface Apache Ozone HDDS Server Interface diff --git a/hadoop-hdds/managed-rocksdb/pom.xml b/hadoop-hdds/managed-rocksdb/pom.xml index 125783222e54..40ad920647a0 100644 --- a/hadoop-hdds/managed-rocksdb/pom.xml +++ b/hadoop-hdds/managed-rocksdb/pom.xml @@ -19,10 +19,10 @@ org.apache.ozone hdds - 1.5.0-SNAPSHOT + 2.0.0-SNAPSHOT hdds-managed-rocksdb - 1.5.0-SNAPSHOT + 2.0.0-SNAPSHOT Apache Ozone Managed RocksDB library Apache Ozone HDDS Managed RocksDB jar diff --git a/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedRocksObjectUtils.java b/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedRocksObjectUtils.java index 148abee7fc0e..d58f70495fe8 100644 --- a/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedRocksObjectUtils.java +++ b/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedRocksObjectUtils.java @@ -55,12 +55,7 @@ static UncheckedAutoCloseable track(AutoCloseable object) { static void reportLeak(Class clazz, String stackTrace) { ManagedRocksObjectMetrics.INSTANCE.increaseLeakObject(); - String warning = String.format("%s is not closed properly", clazz.getSimpleName()); - if (stackTrace != null && LOG.isDebugEnabled()) { - String debugMessage = String.format("%nStackTrace for unclosed instance: %s", stackTrace); - warning = warning.concat(debugMessage); - } - LOG.warn(warning); + HddsUtils.reportLeak(clazz, stackTrace, LOG); } private static @Nullable StackTraceElement[] getStackTrace() { diff --git a/hadoop-hdds/pom.xml b/hadoop-hdds/pom.xml index 87d761583014..2fb94d8c809a 100644 --- a/hadoop-hdds/pom.xml +++ b/hadoop-hdds/pom.xml @@ -20,11 +20,11 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> org.apache.ozone ozone-main - 1.5.0-SNAPSHOT + 2.0.0-SNAPSHOT hdds - 1.5.0-SNAPSHOT + 2.0.0-SNAPSHOT Apache Ozone Distributed Data Store Project Apache Ozone HDDS pom diff --git a/hadoop-hdds/rocks-native/pom.xml b/hadoop-hdds/rocks-native/pom.xml index 5fc9949514bd..35a03db730a4 100644 --- a/hadoop-hdds/rocks-native/pom.xml +++ b/hadoop-hdds/rocks-native/pom.xml @@ -18,7 +18,7 @@ hdds org.apache.ozone - 1.5.0-SNAPSHOT + 2.0.0-SNAPSHOT 4.0.0 Apache Ozone HDDS RocksDB Tools diff --git a/hadoop-hdds/rocks-native/src/main/java/org/apache/hadoop/hdds/utils/NativeLibraryLoader.java b/hadoop-hdds/rocks-native/src/main/java/org/apache/hadoop/hdds/utils/NativeLibraryLoader.java index ce424c930e1c..d93933dee362 100644 --- a/hadoop-hdds/rocks-native/src/main/java/org/apache/hadoop/hdds/utils/NativeLibraryLoader.java +++ b/hadoop-hdds/rocks-native/src/main/java/org/apache/hadoop/hdds/utils/NativeLibraryLoader.java @@ -36,6 +36,8 @@ import java.util.Optional; import java.util.concurrent.ConcurrentHashMap; +import static org.apache.hadoop.hdds.utils.NativeConstants.ROCKS_TOOLS_NATIVE_LIBRARY_NAME; + /** * Class to load Native Libraries. */ @@ -67,6 +69,10 @@ public static NativeLibraryLoader getInstance() { return instance; } + public static String getJniLibraryFileName() { + return appendLibOsSuffix("lib" + ROCKS_TOOLS_NATIVE_LIBRARY_NAME); + } + public static String getJniLibraryFileName(String libraryName) { return appendLibOsSuffix("lib" + libraryName); } @@ -99,9 +105,12 @@ private static String appendLibOsSuffix(String libraryFileName) { return libraryFileName + getLibOsSuffix(); } + public static boolean isLibraryLoaded() { + return isLibraryLoaded(ROCKS_TOOLS_NATIVE_LIBRARY_NAME); + } + public static boolean isLibraryLoaded(final String libraryName) { - return getInstance().librariesLoaded - .getOrDefault(libraryName, false); + return getInstance().librariesLoaded.getOrDefault(libraryName, false); } public synchronized boolean loadLibrary(final String libraryName, final List dependentFiles) { diff --git a/hadoop-hdds/rocksdb-checkpoint-differ/pom.xml b/hadoop-hdds/rocksdb-checkpoint-differ/pom.xml index 3e535c5f5f26..c4284a4e85d1 100644 --- a/hadoop-hdds/rocksdb-checkpoint-differ/pom.xml +++ b/hadoop-hdds/rocksdb-checkpoint-differ/pom.xml @@ -20,11 +20,11 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> org.apache.ozone hdds - 1.5.0-SNAPSHOT + 2.0.0-SNAPSHOT rocksdb-checkpoint-differ - 1.5.0-SNAPSHOT + 2.0.0-SNAPSHOT RocksDB Checkpoint Differ RocksDB Checkpoint Differ jar diff --git a/hadoop-hdds/rocksdb-checkpoint-differ/src/main/java/org/apache/ozone/compaction/log/CompactionLogEntry.java b/hadoop-hdds/rocksdb-checkpoint-differ/src/main/java/org/apache/ozone/compaction/log/CompactionLogEntry.java index c27763b97880..04980821ba9e 100644 --- a/hadoop-hdds/rocksdb-checkpoint-differ/src/main/java/org/apache/ozone/compaction/log/CompactionLogEntry.java +++ b/hadoop-hdds/rocksdb-checkpoint-differ/src/main/java/org/apache/ozone/compaction/log/CompactionLogEntry.java @@ -38,7 +38,8 @@ public final class CompactionLogEntry implements private static final Codec CODEC = new DelegatedCodec<>( Proto2Codec.get(CompactionLogEntryProto.getDefaultInstance()), CompactionLogEntry::getFromProtobuf, - CompactionLogEntry::getProtobuf); + CompactionLogEntry::getProtobuf, + CompactionLogEntry.class); public static Codec getCodec() { return CODEC; diff --git a/hadoop-hdds/server-scm/pom.xml b/hadoop-hdds/server-scm/pom.xml index 32408e8904b2..26e35b072ffc 100644 --- a/hadoop-hdds/server-scm/pom.xml +++ b/hadoop-hdds/server-scm/pom.xml @@ -20,10 +20,10 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> org.apache.ozone hdds - 1.5.0-SNAPSHOT + 2.0.0-SNAPSHOT hdds-server-scm - 1.5.0-SNAPSHOT + 2.0.0-SNAPSHOT Apache Ozone Distributed Data Store Storage Container Manager Server Apache Ozone HDDS SCM Server jar diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/PlacementPolicy.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/PlacementPolicy.java index a792e2cea6b7..05eb32722e73 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/PlacementPolicy.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/PlacementPolicy.java @@ -73,7 +73,7 @@ ContainerPlacementStatus validateContainerPlacement( * Given a set of replicas of a container which are * neither over underreplicated nor overreplicated, * return a set of replicas to copy to another node to fix misreplication. - * @param replicas: Map of replicas with value signifying if + * @param replicas Map of replicas with value signifying if * replica can be copied */ Set replicasToCopyToFixMisreplication( @@ -82,8 +82,8 @@ Set replicasToCopyToFixMisreplication( /** * Given a set of replicas of a container which are overreplicated, * return a set of replicas to delete to fix overreplication. - * @param replicas: Set of existing replicas of the container - * @param expectedCountPerUniqueReplica: Replication factor of each + * @param replicas Set of existing replicas of the container + * @param expectedCountPerUniqueReplica Replication factor of each * unique replica */ Set replicasToRemoveToFixOverreplication( diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/SCMCommonPlacementPolicy.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/SCMCommonPlacementPolicy.java index 471a94794122..2a1c6fce0c0f 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/SCMCommonPlacementPolicy.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/SCMCommonPlacementPolicy.java @@ -525,7 +525,7 @@ public boolean isValidNode(DatanodeDetails datanodeDetails, * Given a set of replicas of a container which are * neither over underreplicated nor overreplicated, * return a set of replicas to copy to another node to fix misreplication. - * @param replicas: Map of replicas with value signifying if + * @param replicas Map of replicas with value signifying if * replica can be copied */ @Override @@ -582,7 +582,7 @@ protected Node getPlacementGroup(DatanodeDetails dn) { * replication is computed. * The algorithm starts with creating a replicaIdMap which contains the * replicas grouped by replica Index. A placementGroup Map is created which - * groups replicas based on their rack & the replicas within the rack + * groups replicas based on their rack and the replicas within the rack * are further grouped based on the replica Index. * A placement Group Count Map is created which keeps * track of the count of replicas in each rack. @@ -590,13 +590,13 @@ protected Node getPlacementGroup(DatanodeDetails dn) { * order based on their current replication factor in a descending factor. * For each replica Index the replica is removed from the rack which contains * the most replicas, in order to achieve this the racks are put - * into priority queue & are based on the number of replicas they have. - * The replica is removed from the rack with maximum replicas & the replica - * to be removed is also removed from the maps created above & + * into priority queue and are based on the number of replicas they have. + * The replica is removed from the rack with maximum replicas and the replica + * to be removed is also removed from the maps created above and * the count for rack is reduced. * The set of replicas computed are then returned by the function. - * @param replicas: Set of existing replicas of the container - * @param expectedCountPerUniqueReplica: Replication factor of each + * @param replicas Set of existing replicas of the container + * @param expectedCountPerUniqueReplica Replication factor of each * * unique replica * @return Set of replicas to be removed are computed. */ diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DatanodeDeletedBlockTransactions.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DatanodeDeletedBlockTransactions.java index e485fcc98d93..99fd9c7b431d 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DatanodeDeletedBlockTransactions.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DatanodeDeletedBlockTransactions.java @@ -47,7 +47,8 @@ void addTransactionToDN(UUID dnID, DeletedBlocksTransaction tx) { blocksDeleted += tx.getLocalIDCount(); if (SCMBlockDeletingService.LOG.isDebugEnabled()) { SCMBlockDeletingService.LOG - .debug("Transaction added: {} <- TX({})", dnID, tx.getTxID()); + .debug("Transaction added: {} <- TX({}), DN {} <- blocksDeleted Add {}.", + dnID, tx.getTxID(), dnID, tx.getLocalIDCount()); } } diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DeletedBlockLog.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DeletedBlockLog.java index 45d53c0ef2cd..5ec68c78d74a 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DeletedBlockLog.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DeletedBlockLog.java @@ -57,7 +57,7 @@ DatanodeDeletedBlockTransactions getTransactions( * considered to be failed if it has been sent more than MAX_RETRY limit * and its count is reset to -1. * - * @param count Maximum num of returned transactions, if < 0. return all. + * @param count Maximum num of returned transactions, if < 0. return all. * @param startTxId The least transaction id to start with. * @return a list of failed deleted block transactions. * @throws IOException diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DeletedBlockLogImpl.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DeletedBlockLogImpl.java index 9d5377b9e3e6..987f293dd14c 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DeletedBlockLogImpl.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DeletedBlockLogImpl.java @@ -35,7 +35,6 @@ import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.CommandStatus; import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerBlocksDeletionACKProto; -import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerBlocksDeletionACKProto.DeleteBlockTransactionResult; import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.DeletedBlocksTransaction; import org.apache.hadoop.hdds.scm.command.CommandStatusReportHandler.DeleteBlockStatus; import org.apache.hadoop.hdds.scm.container.ContainerInfo; @@ -200,20 +199,6 @@ private DeletedBlocksTransaction constructNewTransaction( .build(); } - private boolean isTransactionFailed(DeleteBlockTransactionResult result) { - if (LOG.isDebugEnabled()) { - LOG.debug( - "Got block deletion ACK from datanode, TXIDs={}, " + "success={}", - result.getTxID(), result.getSuccess()); - } - if (!result.getSuccess()) { - LOG.warn("Got failed ACK for TXID={}, prepare to resend the " - + "TX in next interval", result.getTxID()); - return true; - } - return false; - } - @Override public int getNumOfValidTransactions() throws IOException { lock.lock(); @@ -300,26 +285,46 @@ private void getTransaction(DeletedBlocksTransaction tx, .setCount(transactionStatusManager.getOrDefaultRetryCount( tx.getTxID(), 0)) .build(); + for (ContainerReplica replica : replicas) { DatanodeDetails details = replica.getDatanodeDetails(); - if (!dnList.contains(details)) { - continue; - } if (!transactionStatusManager.isDuplication( details, updatedTxn.getTxID(), commandStatus)) { transactions.addTransactionToDN(details.getUuid(), updatedTxn); + metrics.incrProcessedTransaction(); } } } private Boolean checkInadequateReplica(Set replicas, - DeletedBlocksTransaction txn) throws ContainerNotFoundException { + DeletedBlocksTransaction txn, + Set dnList) throws ContainerNotFoundException { ContainerInfo containerInfo = containerManager .getContainer(ContainerID.valueOf(txn.getContainerID())); ReplicationManager replicationManager = scmContext.getScm().getReplicationManager(); ContainerHealthResult result = replicationManager .getContainerReplicationHealth(containerInfo, replicas); + + // We have made an improvement here, and we expect that all replicas + // of the Container being sent will be included in the dnList. + // This change benefits ACK confirmation and improves deletion speed. + // The principle behind it is that + // DN can receive the command to delete a certain Container at the same time and provide + // feedback to SCM at roughly the same time. + // This avoids the issue of deletion blocking, + // where some replicas of a Container are deleted while others do not receive the delete command. + long containerId = txn.getContainerID(); + for (ContainerReplica replica : replicas) { + DatanodeDetails datanodeDetails = replica.getDatanodeDetails(); + if (!dnList.contains(datanodeDetails)) { + DatanodeDetails dnDetail = replica.getDatanodeDetails(); + LOG.debug("Skip Container = {}, because DN = {} is not in dnList.", + containerId, dnDetail.getUuid()); + return true; + } + } + return result.getHealthState() != ContainerHealthResult.HealthState.HEALTHY; } @@ -345,6 +350,7 @@ public DatanodeDeletedBlockTransactions getTransactions( .getCommandStatusByTxId(dnList.stream(). map(DatanodeDetails::getUuid).collect(Collectors.toSet())); ArrayList txIDs = new ArrayList<>(); + metrics.setNumBlockDeletionTransactionDataNodes(dnList.size()); // Here takes block replica count as the threshold to avoid the case // that part of replicas committed the TXN and recorded in the // SCMDeletedBlockTransactionStatusManager, while they are counted @@ -358,23 +364,25 @@ public DatanodeDeletedBlockTransactions getTransactions( // HDDS-7126. When container is under replicated, it is possible // that container is deleted, but transactions are not deleted. if (containerManager.getContainer(id).isDeleted()) { - LOG.warn("Container: " + id + " was deleted for the " + - "transaction: " + txn); + LOG.warn("Container: {} was deleted for the " + + "transaction: {}.", id, txn); txIDs.add(txn.getTxID()); } else if (txn.getCount() > -1 && txn.getCount() <= maxRetry && !containerManager.getContainer(id).isOpen()) { Set replicas = containerManager .getContainerReplicas( ContainerID.valueOf(txn.getContainerID())); - if (checkInadequateReplica(replicas, txn)) { + if (checkInadequateReplica(replicas, txn, dnList)) { + metrics.incrSkippedTransaction(); continue; } getTransaction( txn, transactions, dnList, replicas, commandStatus); + } else if (txn.getCount() >= maxRetry || containerManager.getContainer(id).isOpen()) { + metrics.incrSkippedTransaction(); } } catch (ContainerNotFoundException ex) { - LOG.warn("Container: " + id + " was not found for the transaction: " - + txn); + LOG.warn("Container: {} was not found for the transaction: {}.", id, txn); txIDs.add(txn.getTxID()); } } diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/SCMBlockDeletingService.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/SCMBlockDeletingService.java index 7271d9dcba68..e6fc45cb5eee 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/SCMBlockDeletingService.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/SCMBlockDeletingService.java @@ -203,9 +203,10 @@ public EmptyTaskResult call() throws Exception { } } LOG.info("Totally added {} blocks to be deleted for" - + " {} datanodes, task elapsed time: {}ms", + + " {} datanodes / {} totalnodes, task elapsed time: {}ms", transactions.getBlocksDeleted(), transactions.getDatanodeTransactionMap().size(), + included.size(), Time.monotonicNow() - startTime); deletedBlockLog.incrementCount(new ArrayList<>(processedTxIDs)); } catch (NotLeaderException nle) { diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/ScmBlockDeletingServiceMetrics.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/ScmBlockDeletingServiceMetrics.java index 2cadca1d92a4..6637bd183293 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/ScmBlockDeletingServiceMetrics.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/ScmBlockDeletingServiceMetrics.java @@ -24,6 +24,7 @@ import org.apache.hadoop.metrics2.annotation.Metrics; import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem; import org.apache.hadoop.metrics2.lib.MutableCounterLong; +import org.apache.hadoop.metrics2.lib.MutableGaugeLong; /** * Metrics related to Block Deleting Service running in SCM. @@ -76,6 +77,15 @@ public final class ScmBlockDeletingServiceMetrics { @Metric(about = "The number of created txs which are added into DB.") private MutableCounterLong numBlockDeletionTransactionCreated; + @Metric(about = "The number of skipped transactions") + private MutableCounterLong numSkippedTransactions; + + @Metric(about = "The number of processed transactions") + private MutableCounterLong numProcessedTransactions; + + @Metric(about = "The number of dataNodes of delete transactions.") + private MutableGaugeLong numBlockDeletionTransactionDataNodes; + private ScmBlockDeletingServiceMetrics() { } @@ -130,6 +140,18 @@ public void incrBlockDeletionTransactionCreated(long count) { this.numBlockDeletionTransactionCreated.incr(count); } + public void incrSkippedTransaction() { + this.numSkippedTransactions.incr(); + } + + public void incrProcessedTransaction() { + this.numProcessedTransactions.incr(); + } + + public void setNumBlockDeletionTransactionDataNodes(long dataNodes) { + this.numBlockDeletionTransactionDataNodes.set(dataNodes); + } + public long getNumBlockDeletionCommandSent() { return numBlockDeletionCommandSent.value(); } @@ -162,6 +184,18 @@ public long getNumBlockDeletionTransactionCreated() { return numBlockDeletionTransactionCreated.value(); } + public long getNumSkippedTransactions() { + return numSkippedTransactions.value(); + } + + public long getNumProcessedTransactions() { + return numProcessedTransactions.value(); + } + + public long getNumBlockDeletionTransactionDataNodes() { + return numBlockDeletionTransactionDataNodes.value(); + } + @Override public String toString() { StringBuffer buffer = new StringBuffer(); diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerManager.java index 3eba240533e3..6b6a888f4241 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerManager.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerManager.java @@ -61,9 +61,9 @@ default List getContainers() { * The max size of the searching range cannot exceed the * value of count. * - * @param startID start containerID, >=0, + * @param startID start containerID, >=0, * start searching at the head if 0. - * @param count count must be >= 0 + * @param count count must be >= 0 * Usually the count will be replace with a very big * value instead of being unlimited in case the db is very big. * @@ -85,9 +85,9 @@ default List getContainers() { * The max size of the searching range cannot exceed the * value of count. * - * @param startID start containerID, >=0, + * @param startID start containerID, >=0, * start searching at the head if 0. - * @param count count must be >= 0 + * @param count count must be >= 0 * Usually the count will be replace with a very big * value instead of being unlimited in case the db is very big. * @param state container state @@ -164,7 +164,6 @@ void updateContainerReplica(ContainerID containerID, ContainerReplica replica) * * @param containerID Container ID * @param replica ContainerReplica - * @return True of dataNode is removed successfully else false. */ void removeContainerReplica(ContainerID containerID, ContainerReplica replica) throws ContainerNotFoundException, ContainerReplicaNotFoundException; diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerManagerImpl.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerManagerImpl.java index 00aee0f62c25..d61f9ee366bd 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerManagerImpl.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerManagerImpl.java @@ -86,6 +86,8 @@ public class ContainerManagerImpl implements ContainerManager { @SuppressWarnings("java:S2245") // no need for secure random private final Random random = new Random(); + private int maxCountOfContainerList; + /** * */ @@ -115,6 +117,10 @@ public ContainerManagerImpl( .getInt(ScmConfigKeys.OZONE_SCM_PIPELINE_OWNER_CONTAINER_COUNT, ScmConfigKeys.OZONE_SCM_PIPELINE_OWNER_CONTAINER_COUNT_DEFAULT); + this.maxCountOfContainerList = conf + .getInt(ScmConfigKeys.OZONE_SCM_CONTAINER_LIST_MAX_COUNT, + ScmConfigKeys.OZONE_SCM_CONTAINER_LIST_MAX_COUNT_DEFAULT); + this.scmContainerManagerMetrics = SCMContainerManagerMetrics.create(); } diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/ContainerBalancerTask.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/ContainerBalancerTask.java index 19a2f3c2e621..c3b76dc44975 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/ContainerBalancerTask.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/ContainerBalancerTask.java @@ -691,7 +691,7 @@ private void checkIterationMoveResults() { moveSelectionToFutureMap.values(); if (!futures.isEmpty()) { CompletableFuture allFuturesResult = CompletableFuture.allOf( - futures.toArray(new CompletableFuture[futures.size()])); + futures.toArray(new CompletableFuture[0])); try { allFuturesResult.get(config.getMoveTimeout().toMillis(), TimeUnit.MILLISECONDS); diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/SCMContainerPlacementRackAware.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/SCMContainerPlacementRackAware.java index 7fec06e7e069..1c2b5a3be395 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/SCMContainerPlacementRackAware.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/SCMContainerPlacementRackAware.java @@ -346,8 +346,7 @@ protected List chooseDatanodesInternalLegacy( return chooseNodes(null, chosenNodes, mutableFavoredNodes, mutableUsedNodes, favorIndex, nodesRequired, mapSizeRequired); } else { - List mutableExcludedNodes = new ArrayList<>(); - mutableExcludedNodes.addAll(excludedNodes); + List mutableExcludedNodes = new ArrayList<>(excludedNodes); // choose node to meet replication requirement // case 1: one excluded node, choose one on the same rack as the excluded // node, choose others on different racks. diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/metrics/SCMNodeMetric.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/metrics/SCMNodeMetric.java index 094e535dcbd9..3d113b3d3012 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/metrics/SCMNodeMetric.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/metrics/SCMNodeMetric.java @@ -42,8 +42,7 @@ public SCMNodeMetric(SCMNodeStat stat) { * @param capacity in bytes * @param used in bytes * @param remaining in bytes - * @param committed - * @paaram committed in bytes + * @param committed in bytes */ @VisibleForTesting public SCMNodeMetric(long capacity, long used, long remaining, diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ContainerHealthResult.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ContainerHealthResult.java index 0abe8f6ea34d..fcfef7de6e6c 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ContainerHealthResult.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ContainerHealthResult.java @@ -248,7 +248,6 @@ public void setOfflineIndexesOkAfterPending(boolean val) { /** * Returns true if a container has under-replication caused by offline * indexes, but it is corrected by a pending add. - * @return */ public boolean offlineIndexesOkAfterPending() { return offlineIndexesOkAfterPending; diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ContainerReplicaPendingOps.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ContainerReplicaPendingOps.java index d1890bdf8026..4eef0a8a744c 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ContainerReplicaPendingOps.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ContainerReplicaPendingOps.java @@ -116,7 +116,7 @@ public List getPendingOps(ContainerID containerID) { * Store a ContainerReplicaOp to add a replica for the given ContainerID. * @param containerID ContainerID for which to add a replica * @param target The target datanode - * @param replicaIndex The replica index (zero for Ratis, > 0 for EC) + * @param replicaIndex The replica index (zero for Ratis, > 0 for EC) * @param deadlineEpochMillis The time by which the replica should have been * added and reported by the datanode, or it will * be discarded. @@ -130,7 +130,7 @@ public void scheduleAddReplica(ContainerID containerID, * Store a ContainerReplicaOp to delete a replica for the given ContainerID. * @param containerID ContainerID for which to delete a replica * @param target The target datanode - * @param replicaIndex The replica index (zero for Ratis, > 0 for EC) + * @param replicaIndex The replica index (zero for Ratis, > 0 for EC) * @param deadlineEpochMillis The time by which the replica should have been * deleted and reported by the datanode, or it will * be discarded. @@ -145,7 +145,7 @@ public void scheduleDeleteReplica(ContainerID containerID, * been replicated successfully. * @param containerID ContainerID for which to complete the replication * @param target The target Datanode - * @param replicaIndex The replica index (zero for Ratis, > 0 for EC) + * @param replicaIndex The replica index (zero for Ratis, > 0 for EC) * @return True if a pending replica was found and removed, false otherwise. */ public boolean completeAddReplica(ContainerID containerID, @@ -167,7 +167,7 @@ public boolean completeAddReplica(ContainerID containerID, * been deleted successfully. * @param containerID ContainerID for which to complete the deletion * @param target The target Datanode - * @param replicaIndex The replica index (zero for Ratis, > 0 for EC) + * @param replicaIndex The replica index (zero for Ratis, > 0 for EC) * @return True if a pending replica was found and removed, false otherwise. */ public boolean completeDeleteReplica(ContainerID containerID, diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/RatisContainerReplicaCount.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/RatisContainerReplicaCount.java index fe771fac6a4a..4e14798ccdcc 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/RatisContainerReplicaCount.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/RatisContainerReplicaCount.java @@ -186,9 +186,9 @@ private void countReplicas() { * For example, consider a CLOSED container with the following replicas: * {CLOSED, CLOSING, OPEN, UNHEALTHY} * In this case, healthy replica count equals 3. Calculation: - * 1 CLOSED -> 1 matching replica. - * 1 OPEN, 1 CLOSING -> 2 mismatched replicas. - * 1 UNHEALTHY -> 1 unhealthy replica. Not counted as healthy. + * 1 CLOSED -> 1 matching replica. + * 1 OPEN, 1 CLOSING -> 2 mismatched replicas. + * 1 UNHEALTHY -> 1 unhealthy replica. Not counted as healthy. * Total healthy replicas = 3 = 1 matching + 2 mismatched replicas */ public int getHealthyReplicaCount() { diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/health/HealthCheck.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/health/HealthCheck.java index a95c0d39945b..f271b8a863c9 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/health/HealthCheck.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/health/HealthCheck.java @@ -49,7 +49,6 @@ public interface HealthCheck { * returns false. This allows handlers to be chained together, and each will * be tried in turn until one succeeds. * @param handler - * @return */ HealthCheck addNext(HealthCheck handler); diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/states/ContainerAttribute.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/states/ContainerAttribute.java index c6f15be5d2cf..1289a0a21ff7 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/states/ContainerAttribute.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/states/ContainerAttribute.java @@ -37,10 +37,10 @@ *

    * Currently we manage the following attributes for a container. *

    - * 1. StateMap - LifeCycleState -> Set of ContainerIDs - * 2. TypeMap - ReplicationType -> Set of ContainerIDs - * 3. OwnerMap - OwnerNames -> Set of ContainerIDs - * 4. FactorMap - ReplicationFactor -> Set of ContainerIDs + * 1. StateMap - LifeCycleState -> Set of ContainerIDs + * 2. TypeMap - ReplicationType -> Set of ContainerIDs + * 3. OwnerMap - OwnerNames -> Set of ContainerIDs + * 4. FactorMap - ReplicationFactor -> Set of ContainerIDs *

    * This means that for a cluster size of 750 PB -- we will have around 150 * Million containers, if we assume 5GB average container size. diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/HASecurityUtils.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/HASecurityUtils.java index f0d78b23079a..5eeb489f677e 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/HASecurityUtils.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/HASecurityUtils.java @@ -58,7 +58,7 @@ /** * Utilities for SCM HA security. */ -public final class HASecurityUtils { +public final class HASecurityUtils { private HASecurityUtils() { } @@ -150,7 +150,6 @@ public static CertificateServer initializeRootCertificateServer( * * @param conf * @param certificateClient - * @return */ public static GrpcTlsConfig createSCMRatisTLSConfig(SecurityConfig conf, CertificateClient certificateClient) throws IOException { diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/InterSCMGrpcService.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/InterSCMGrpcService.java index bd4b56cd8c13..ec95ab66bf18 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/InterSCMGrpcService.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/InterSCMGrpcService.java @@ -18,8 +18,7 @@ package org.apache.hadoop.hdds.scm.ha; import java.io.IOException; - -import com.google.common.base.Preconditions; +import java.util.Objects; import org.apache.hadoop.hdds.protocol.scm.proto.InterSCMProtocolProtos.CopyDBCheckpointRequestProto; import org.apache.hadoop.hdds.protocol.scm.proto.InterSCMProtocolProtos.CopyDBCheckpointResponseProto; @@ -52,12 +51,11 @@ public class InterSCMGrpcService extends private final Table transactionInfoTable; InterSCMGrpcService(final StorageContainerManager scm) throws IOException { - Preconditions.checkNotNull(scm); + Objects.requireNonNull(scm, "scm"); this.scm = scm; this.transactionInfoTable = HAUtils.getTransactionInfoTable( - scm.getScmMetadataStore().getStore(), new SCMDBDefinition()); - provider = - new SCMDBCheckpointProvider(scm.getScmMetadataStore().getStore()); + scm.getScmMetadataStore().getStore(), SCMDBDefinition.get()); + this.provider = new SCMDBCheckpointProvider(scm.getScmMetadataStore().getStore()); } @Override @@ -67,7 +65,7 @@ public void download(CopyDBCheckpointRequestProto request, scm.getScmHAManager().asSCMHADBTransactionBuffer().flush(); TransactionInfo transactionInfo = transactionInfoTable.get(TRANSACTION_INFO_KEY); - Preconditions.checkNotNull(transactionInfo); + Objects.requireNonNull(transactionInfo, "transactionInfo"); SCMGrpcOutputStream outputStream = new SCMGrpcOutputStream(responseObserver, scm.getClusterId(), BUFFER_SIZE); diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMHADBTransactionBufferImpl.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMHADBTransactionBufferImpl.java index f1ee76a198ec..cd0346d72f83 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMHADBTransactionBufferImpl.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMHADBTransactionBufferImpl.java @@ -26,6 +26,8 @@ import org.apache.hadoop.hdds.utils.db.BatchOperation; import org.apache.hadoop.hdds.utils.db.Table; import org.apache.ratis.statemachine.SnapshotInfo; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import java.io.IOException; import java.util.concurrent.atomic.AtomicLong; @@ -41,6 +43,8 @@ * operation in DB. */ public class SCMHADBTransactionBufferImpl implements SCMHADBTransactionBuffer { + + public static final Logger LOG = LoggerFactory.getLogger(SCMHADBTransactionBufferImpl.class); private final StorageContainerManager scm; private SCMMetadataStore metadataStore; private BatchOperation currentBatchOperation; @@ -107,6 +111,8 @@ public SnapshotInfo getLatestSnapshot() { @Override public void setLatestSnapshot(SnapshotInfo latestSnapshot) { + LOG.info("{}: Set latest Snapshot to {}", + scm.getScmHAManager().getRatisServer().getDivision().getId(), latestSnapshot); this.latestSnapshot.set(latestSnapshot); } diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMHAManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMHAManager.java index 03f6ae293b2e..92a5140ff2a9 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMHAManager.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMHAManager.java @@ -55,7 +55,6 @@ public interface SCMHAManager extends AutoCloseable { /** * Returns the DBTransactionBuffer as SCMHADBTransactionBuffer if its * valid. - * @return */ SCMHADBTransactionBuffer asSCMHADBTransactionBuffer(); diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMHAManagerImpl.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMHAManagerImpl.java index fc3c1548ba14..5d0ea444ef8a 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMHAManagerImpl.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMHAManagerImpl.java @@ -72,6 +72,7 @@ public class SCMHAManagerImpl implements SCMHAManager { private final SCMRatisServer ratisServer; private final ConfigurationSource conf; + private final OzoneConfiguration ozoneConf; private final SecurityConfig securityConfig; private final DBTransactionBuffer transactionBuffer; private final SCMSnapshotProvider scmSnapshotProvider; @@ -89,6 +90,7 @@ public SCMHAManagerImpl(final ConfigurationSource conf, final SecurityConfig securityConfig, final StorageContainerManager scm) throws IOException { this.conf = conf; + this.ozoneConf = OzoneConfiguration.of(conf); this.securityConfig = securityConfig; this.scm = scm; this.exitManager = new ExitManager(); @@ -128,7 +130,7 @@ public void start() throws IOException { // It will first try to add itself to existing ring final SCMNodeDetails nodeDetails = scm.getSCMHANodeDetails().getLocalNodeDetails(); - final boolean success = HAUtils.addSCM(OzoneConfiguration.of(conf), + final boolean success = HAUtils.addSCM(ozoneConf, new AddSCMRequest.Builder().setClusterId(scm.getClusterId()) .setScmId(scm.getScmId()) .setRatisAddr(nodeDetails @@ -221,17 +223,18 @@ public List getSecretKeysFromLeader(String leaderID) } } + private TransactionInfo getTransactionInfoFromCheckpoint(Path checkpointLocation) throws IOException { + return HAUtils.getTrxnInfoFromCheckpoint( + ozoneConf, checkpointLocation, SCMDBDefinition.get()); + } + @Override public TermIndex verifyCheckpointFromLeader(String leaderId, DBCheckpoint checkpoint) { try { Path checkpointLocation = checkpoint.getCheckpointLocation(); - TransactionInfo checkpointTxnInfo = HAUtils - .getTrxnInfoFromCheckpoint(OzoneConfiguration.of(conf), - checkpointLocation, new SCMDBDefinition()); - - LOG.info("Installing checkpoint with SCMTransactionInfo {}", - checkpointTxnInfo); + final TransactionInfo checkpointTxnInfo = getTransactionInfoFromCheckpoint(checkpointLocation); + LOG.info("{}: Verify checkpoint {} from leader {}", scm.getScmId(), checkpointTxnInfo, leaderId); TermIndex termIndex = getRatisServer().getSCMStateMachine().getLastAppliedTermIndex(); @@ -281,12 +284,9 @@ public TermIndex installCheckpoint(DBCheckpoint dbCheckpoint) throws Exception { Path checkpointLocation = dbCheckpoint.getCheckpointLocation(); - TransactionInfo checkpointTrxnInfo = HAUtils - .getTrxnInfoFromCheckpoint(OzoneConfiguration.of(conf), - checkpointLocation, new SCMDBDefinition()); + final TransactionInfo checkpointTrxnInfo = getTransactionInfoFromCheckpoint(checkpointLocation); - LOG.info("Installing checkpoint with SCMTransactionInfo {}", - checkpointTrxnInfo); + LOG.info("{}: Install checkpoint {}", scm.getScmId(), checkpointTrxnInfo); return installCheckpoint(checkpointLocation, checkpointTrxnInfo); } @@ -457,7 +457,7 @@ public void startServices() throws IOException { // TODO: Fix the metrics ?? final SCMMetadataStore metadataStore = scm.getScmMetadataStore(); - metadataStore.start(OzoneConfiguration.of(conf)); + metadataStore.start(ozoneConf); scm.getSequenceIdGen().reinitialize(metadataStore.getSequenceIdTable()); scm.getPipelineManager().reinitialize(metadataStore.getPipelineTable()); scm.getContainerManager().reinitialize(metadataStore.getContainerTable()); diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMStateMachine.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMStateMachine.java index 1128accd2ff4..5805fe67e493 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMStateMachine.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMStateMachine.java @@ -137,6 +137,7 @@ public void initialize(RaftServer server, RaftGroupId id, getLifeCycle().startAndTransition(() -> { super.initialize(server, id, raftStorage); storage.init(raftStorage); + LOG.info("{}: initialize {}", server.getId(), id); }); } @@ -149,6 +150,9 @@ public CompletableFuture applyTransaction( final SCMRatisRequest request = SCMRatisRequest.decode( Message.valueOf(trx.getStateMachineLogEntry().getLogData())); + if (LOG.isDebugEnabled()) { + LOG.debug("{}: applyTransaction {}", getId(), TermIndex.valueOf(trx.getLogEntry())); + } try { applyTransactionFuture.complete(process(request)); } catch (SCMException ex) { @@ -389,6 +393,7 @@ public void notifyConfigurationChanged(long term, long index, @Override public void pause() { final LifeCycle lc = getLifeCycle(); + LOG.info("{}: Try to pause from current LifeCycle state {}", getId(), lc); if (lc.getCurrentState() != LifeCycle.State.NEW) { lc.transition(LifeCycle.State.PAUSING); lc.transition(LifeCycle.State.PAUSED); @@ -414,6 +419,8 @@ public void reinitialize() throws IOException { throw new IOException(e); } + LOG.info("{}: SCMStateMachine is reinitializing. newTermIndex = {}", getId(), termIndex); + // re-initialize the DBTransactionBuffer and update the lastAppliedIndex. try { transactionBuffer.init(); diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/metadata/BigIntegerCodec.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/metadata/BigIntegerCodec.java index 5a7e86e99cc2..ab7530967166 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/metadata/BigIntegerCodec.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/metadata/BigIntegerCodec.java @@ -38,6 +38,11 @@ private BigIntegerCodec() { // singleton } + @Override + public Class getTypeClass() { + return BigInteger.class; + } + @Override public byte[] toPersistedFormat(BigInteger object) throws IOException { return object.toByteArray(); diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/metadata/SCMDBDefinition.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/metadata/SCMDBDefinition.java index 4a280d2103ad..ea86fa154af5 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/metadata/SCMDBDefinition.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/metadata/SCMDBDefinition.java @@ -41,75 +41,53 @@ * Class defines the structure and types of the scm.db. */ public class SCMDBDefinition extends DBDefinition.WithMap { - public SCMDBDefinition() { - this(COLUMN_FAMILIES); - } - - protected SCMDBDefinition(Map> map) { - super(map); - } - public static final DBColumnFamilyDefinition DELETED_BLOCKS = new DBColumnFamilyDefinition<>( "deletedBlocks", - Long.class, LongCodec.get(), - DeletedBlocksTransaction.class, Proto2Codec.get(DeletedBlocksTransaction.getDefaultInstance())); public static final DBColumnFamilyDefinition VALID_CERTS = new DBColumnFamilyDefinition<>( "validCerts", - BigInteger.class, BigIntegerCodec.get(), - X509Certificate.class, X509CertificateCodec.get()); public static final DBColumnFamilyDefinition VALID_SCM_CERTS = new DBColumnFamilyDefinition<>( "validSCMCerts", - BigInteger.class, BigIntegerCodec.get(), - X509Certificate.class, X509CertificateCodec.get()); public static final DBColumnFamilyDefinition PIPELINES = new DBColumnFamilyDefinition<>( "pipelines", - PipelineID.class, PipelineID.getCodec(), - Pipeline.class, Pipeline.getCodec()); public static final DBColumnFamilyDefinition CONTAINERS = new DBColumnFamilyDefinition<>( "containers", - ContainerID.class, ContainerID.getCodec(), - ContainerInfo.class, ContainerInfo.getCodec()); public static final DBColumnFamilyDefinition TRANSACTIONINFO = new DBColumnFamilyDefinition<>( "scmTransactionInfos", - String.class, StringCodec.get(), - TransactionInfo.class, TransactionInfo.getCodec()); public static final DBColumnFamilyDefinition SEQUENCE_ID = new DBColumnFamilyDefinition<>( "sequenceId", - String.class, StringCodec.get(), - Long.class, LongCodec.get()); public static final DBColumnFamilyDefinition> map) { MOVE = new DBColumnFamilyDefinition<>( "move", - ContainerID.class, ContainerID.getCodec(), - MoveDataNodePair.class, MoveDataNodePair.getCodec()); /** @@ -129,18 +105,14 @@ protected SCMDBDefinition(Map> map) { public static final DBColumnFamilyDefinition META = new DBColumnFamilyDefinition<>( "meta", - String.class, StringCodec.get(), - String.class, StringCodec.get()); public static final DBColumnFamilyDefinition STATEFUL_SERVICE_CONFIG = new DBColumnFamilyDefinition<>( "statefulServiceConfig", - String.class, StringCodec.get(), - ByteString.class, ByteStringCodec.get()); private static final Map> @@ -156,6 +128,16 @@ protected SCMDBDefinition(Map> map) { VALID_CERTS, VALID_SCM_CERTS); + private static final SCMDBDefinition INSTANCE = new SCMDBDefinition(COLUMN_FAMILIES); + + public static SCMDBDefinition get() { + return INSTANCE; + } + + protected SCMDBDefinition(Map> map) { + super(map); + } + @Override public String getName() { return "scm.db"; diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/metadata/SCMMetadataStoreImpl.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/metadata/SCMMetadataStoreImpl.java index ec63076b4a62..6aa993f60771 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/metadata/SCMMetadataStoreImpl.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/metadata/SCMMetadataStoreImpl.java @@ -104,7 +104,7 @@ public SCMMetadataStoreImpl(OzoneConfiguration config) public void start(OzoneConfiguration config) throws IOException { if (this.store == null) { - SCMDBDefinition scmdbDefinition = new SCMDBDefinition(); + final SCMDBDefinition scmdbDefinition = SCMDBDefinition.get(); File metaDir = HAUtils.getMetaDir(scmdbDefinition, configuration); // Check if there is a DB Inconsistent Marker in the metaDir. This // marker indicates that the DB is in an inconsistent state and hence diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/metadata/X509CertificateCodec.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/metadata/X509CertificateCodec.java index 3e7db16c2a0f..e0279e8f2f08 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/metadata/X509CertificateCodec.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/metadata/X509CertificateCodec.java @@ -51,6 +51,11 @@ private X509CertificateCodec() { // singleton } + @Override + public Class getTypeClass() { + return X509Certificate.class; + } + @Override public boolean supportCodecBuffer() { return true; diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/DatanodeInfo.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/DatanodeInfo.java index 05ed833edbe9..b3350d8a12aa 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/DatanodeInfo.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/DatanodeInfo.java @@ -346,7 +346,7 @@ public void setCommandCounts(CommandQueueReportProto cmds, * Retrieve the number of queued commands of the given type, as reported by * the datanode at the last heartbeat. * @param cmd The command for which to receive the queued command count - * @return -1 if we have no information about the count, or an integer >= 0 + * @return -1 if we have no information about the count, or an integer >= 0 * indicating the command count at the last heartbeat. */ public int getCommandCount(SCMCommandProto.Type cmd) { diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeManager.java index 25be60945a91..992dc82582b5 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeManager.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeManager.java @@ -97,8 +97,6 @@ default RegisteredCommand register( * @param type The type of the SCMCommand. * @param scmCommand A BiConsumer that takes a DatanodeDetails and a * SCMCommand object and performs the necessary actions. - * @return whatever the regular register command returns with default - * layout version passed in. */ default void registerSendCommandNotify(SCMCommandProto.Type type, BiConsumer> scmCommand) { diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeManagerMXBean.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeManagerMXBean.java index a66fc0d22fb5..1bd9677a3631 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeManagerMXBean.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeManagerMXBean.java @@ -45,7 +45,7 @@ public interface NodeManagerMXBean { /** * @return Get the NodeStatus table information like hostname, - * Commissioned State & Operational State column for dataNode + * Commissioned State and Operational State column for dataNode */ Map> getNodeStatusInfo(); diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeStateManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeStateManager.java index 3c3ff8fb8338..78c1801a103d 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeStateManager.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeStateManager.java @@ -791,7 +791,7 @@ public void run() { * * This method is synchronized to coordinate node state updates between * the upgrade finalization thread which calls this method, and the - * node health processing thread that calls {@link this#checkNodesHealth}. + * node health processing thread that calls {@link #checkNodesHealth}. */ public synchronized void forceNodesToHealthyReadOnly() { try { @@ -817,7 +817,7 @@ public synchronized void forceNodesToHealthyReadOnly() { /** * This method is synchronized to coordinate node state updates between * the upgrade finalization thread which calls - * {@link this#forceNodesToHealthyReadOnly}, and the node health processing + * {@link #forceNodesToHealthyReadOnly}, and the node health processing * thread that calls this method. */ @VisibleForTesting diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java index 7121d8f7a9d2..7db0c88e1739 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java @@ -1166,7 +1166,6 @@ public Map> getNodeStatusInfo() { * Calculate the storage capacity of the DataNode node. * @param storageReports Calculate the storage capacity corresponding * to the storage collection. - * @return */ public static String calculateStorageCapacity( List storageReports) { @@ -1214,7 +1213,6 @@ private static String convertUnit(double value) { * Calculate the storage usage percentage of a DataNode node. * @param storageReports Calculate the storage percentage corresponding * to the storage collection. - * @return */ public static String[] calculateStoragePercentage( List storageReports) { diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineStateManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineStateManager.java index 4dd0443a5055..d6058877126b 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineStateManager.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineStateManager.java @@ -46,7 +46,6 @@ void addPipeline(HddsProtos.Pipeline pipelineProto) /** * Removing pipeline would be replicated to Ratis. * @param pipelineIDProto - * @return Pipeline removed * @throws IOException */ @Replicate diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocolServerSideTranslatorPB.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocolServerSideTranslatorPB.java index 7432f5f7f2cf..c4d2bfdc642b 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocolServerSideTranslatorPB.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocolServerSideTranslatorPB.java @@ -115,6 +115,7 @@ import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.ResetDeletedBlockRetryCountResponseProto; import org.apache.hadoop.hdds.scm.DatanodeAdminError; import org.apache.hadoop.hdds.scm.ScmInfo; +import org.apache.hadoop.hdds.scm.container.ContainerListResult; import org.apache.hadoop.hdds.scm.container.ContainerID; import org.apache.hadoop.hdds.scm.container.ContainerInfo; import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline; @@ -857,21 +858,21 @@ public SCMListContainerResponseProto listContainer( } else if (request.hasFactor()) { factor = request.getFactor(); } - List containerList; + ContainerListResult containerListAndTotalCount; if (factor != null) { // Call from a legacy client - containerList = + containerListAndTotalCount = impl.listContainer(startContainerID, count, state, factor); } else { - containerList = - impl.listContainer(startContainerID, count, state, replicationType, - repConfig); + containerListAndTotalCount = + impl.listContainer(startContainerID, count, state, replicationType, repConfig); } SCMListContainerResponseProto.Builder builder = SCMListContainerResponseProto.newBuilder(); - for (ContainerInfo container : containerList) { + for (ContainerInfo container : containerListAndTotalCount.getContainerInfoList()) { builder.addContainers(container.getProtobuf()); } + builder.setContainerCount(containerListAndTotalCount.getTotalCount()); return builder.build(); } @@ -1001,6 +1002,7 @@ public HddsProtos.GetScmInfoResponseProto getScmInfo( .setClusterId(scmInfo.getClusterId()) .setScmId(scmInfo.getScmId()) .addAllPeerRoles(scmInfo.getRatisPeerRoles()) + .setScmRatisEnabled(scmInfo.getScmRatisEnabled()) .build(); } diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/security/RootCARotationManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/security/RootCARotationManager.java index d38a904d09c0..fc7249462c4a 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/security/RootCARotationManager.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/security/RootCARotationManager.java @@ -118,6 +118,8 @@ public class RootCARotationManager extends StatefulService { * * @param scm the storage container manager * + *

    +   * {@code
        *                         (1)   (3)(4)
        *                   --------------------------->
        *                         (2)                        scm2(Follower)
    @@ -130,8 +132,8 @@ public class RootCARotationManager extends StatefulService {
        *                   --------------------------->
        *                          (2)                       scm3(Follower)
        *                   <---------------------------
    -   *
    -   *
    +   * }
    +   * 
    * (1) Rotation Prepare * (2) Rotation Prepare Ack * (3) Rotation Commit @@ -186,7 +188,7 @@ public void notifyStatusChanged() { waitAckTask.cancel(true); } if (waitAckTimeoutTask != null) { - waitAckTask.cancel(true); + waitAckTimeoutTask.cancel(true); } if (clearPostProcessingTask != null) { clearPostProcessingTask.cancel(true); diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMClientProtocolServer.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMClientProtocolServer.java index 40d153a6bb41..6fdc81ac9a3a 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMClientProtocolServer.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMClientProtocolServer.java @@ -48,6 +48,7 @@ import org.apache.hadoop.hdds.scm.DatanodeAdminError; import org.apache.hadoop.hdds.scm.FetchMetrics; import org.apache.hadoop.hdds.scm.ScmInfo; +import org.apache.hadoop.hdds.scm.container.ContainerListResult; import org.apache.hadoop.hdds.scm.container.ContainerID; import org.apache.hadoop.hdds.scm.container.ContainerInfo; import org.apache.hadoop.hdds.scm.container.ContainerNotFoundException; @@ -107,6 +108,7 @@ import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; +import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Optional; @@ -417,11 +419,12 @@ private boolean hasRequiredReplicas(ContainerInfo contInfo) { * @param startContainerID start containerID. * @param count count must be {@literal >} 0. * - * @return a list of pipeline. + * @return a list of containers capped by max count allowed + * in "ozone.scm.container.list.max.count" and total number of containers. * @throws IOException */ @Override - public List listContainer(long startContainerID, + public ContainerListResult listContainer(long startContainerID, int count) throws IOException { return listContainer(startContainerID, count, null, null, null); } @@ -433,11 +436,12 @@ public List listContainer(long startContainerID, * @param count count must be {@literal >} 0. * @param state Container with this state will be returned. * - * @return a list of pipeline. + * @return a list of containers capped by max count allowed + * in "ozone.scm.container.list.max.count" and total number of containers. * @throws IOException */ @Override - public List listContainer(long startContainerID, + public ContainerListResult listContainer(long startContainerID, int count, HddsProtos.LifeCycleState state) throws IOException { return listContainer(startContainerID, count, state, null, null); } @@ -449,53 +453,36 @@ public List listContainer(long startContainerID, * @param count count must be {@literal >} 0. * @param state Container with this state will be returned. * @param factor Container factor. - * @return a list of pipeline. + * @return a list of containers capped by max count allowed + * in "ozone.scm.container.list.max.count" and total number of containers. * @throws IOException */ @Override @Deprecated - public List listContainer(long startContainerID, + public ContainerListResult listContainer(long startContainerID, int count, HddsProtos.LifeCycleState state, HddsProtos.ReplicationFactor factor) throws IOException { + return listContainerInternal(startContainerID, count, state, factor, null, null); + } + + private ContainerListResult listContainerInternal(long startContainerID, int count, + HddsProtos.LifeCycleState state, + HddsProtos.ReplicationFactor factor, + HddsProtos.ReplicationType replicationType, + ReplicationConfig repConfig) throws IOException { boolean auditSuccess = true; - Map auditMap = Maps.newHashMap(); - auditMap.put("startContainerID", String.valueOf(startContainerID)); - auditMap.put("count", String.valueOf(count)); - if (state != null) { - auditMap.put("state", state.name()); - } - if (factor != null) { - auditMap.put("factor", factor.name()); - } + Map auditMap = buildAuditMap(startContainerID, count, state, factor, replicationType, repConfig); + try { - final ContainerID containerId = ContainerID.valueOf(startContainerID); - if (state != null) { - if (factor != null) { - return scm.getContainerManager().getContainers(state).stream() - .filter(info -> info.containerID().getId() >= startContainerID) - //Filtering EC replication type as EC will not have factor. - .filter(info -> info - .getReplicationType() != HddsProtos.ReplicationType.EC) - .filter(info -> (info.getReplicationFactor() == factor)) - .sorted().limit(count).collect(Collectors.toList()); - } else { - return scm.getContainerManager().getContainers(state).stream() - .filter(info -> info.containerID().getId() >= startContainerID) - .sorted().limit(count).collect(Collectors.toList()); - } - } else { - if (factor != null) { - return scm.getContainerManager().getContainers().stream() - .filter(info -> info.containerID().getId() >= startContainerID) - //Filtering EC replication type as EC will not have factor. - .filter(info -> info - .getReplicationType() != HddsProtos.ReplicationType.EC) - .filter(info -> info.getReplicationFactor() == factor) - .sorted().limit(count).collect(Collectors.toList()); - } else { - return scm.getContainerManager().getContainers(containerId, count); - } - } + Stream containerStream = + buildContainerStream(factor, replicationType, repConfig, getBaseContainerStream(state)); + List containerInfos = + containerStream.filter(info -> info.containerID().getId() >= startContainerID) + .sorted().collect(Collectors.toList()); + List limitedContainers = + containerInfos.stream().limit(count).collect(Collectors.toList()); + long totalCount = (long) containerInfos.size(); + return new ContainerListResult(limitedContainers, totalCount); } catch (Exception ex) { auditSuccess = false; AUDIT.logReadFailure( @@ -509,74 +496,74 @@ public List listContainer(long startContainerID, } } - /** - * Lists a range of containers and get their info. - * - * @param startContainerID start containerID. - * @param count count must be {@literal >} 0. - * @param state Container with this state will be returned. - * @param repConfig Replication Config for the container. - * @return a list of pipeline. - * @throws IOException - */ - @Override - public List listContainer(long startContainerID, - int count, HddsProtos.LifeCycleState state, + private Stream buildContainerStream(HddsProtos.ReplicationFactor factor, HddsProtos.ReplicationType replicationType, - ReplicationConfig repConfig) throws IOException { - boolean auditSuccess = true; - Map auditMap = Maps.newHashMap(); + ReplicationConfig repConfig, + Stream containerStream) { + if (factor != null) { + containerStream = containerStream.filter(info -> info.getReplicationType() != HddsProtos.ReplicationType.EC) + .filter(info -> info.getReplicationFactor() == factor); + } else if (repConfig != null) { + // If we have repConfig filter by it, as it includes repType too. + // Otherwise, we may have a filter just for repType, eg all EC containers + // without filtering on their replication scheme + containerStream = containerStream + .filter(info -> info.getReplicationConfig().equals(repConfig)); + } else if (replicationType != null) { + containerStream = containerStream.filter(info -> info.getReplicationType() == replicationType); + } + return containerStream; + } + + private Stream getBaseContainerStream(HddsProtos.LifeCycleState state) { + if (state != null) { + return scm.getContainerManager().getContainers(state).stream(); + } else { + return scm.getContainerManager().getContainers().stream(); + } + } + + private Map buildAuditMap(long startContainerID, int count, + HddsProtos.LifeCycleState state, + HddsProtos.ReplicationFactor factor, + HddsProtos.ReplicationType replicationType, + ReplicationConfig repConfig) { + Map auditMap = new HashMap<>(); auditMap.put("startContainerID", String.valueOf(startContainerID)); auditMap.put("count", String.valueOf(count)); if (state != null) { auditMap.put("state", state.name()); } + if (factor != null) { + auditMap.put("factor", factor.name()); + } if (replicationType != null) { auditMap.put("replicationType", replicationType.toString()); } if (repConfig != null) { auditMap.put("replicationConfig", repConfig.toString()); } - try { - final ContainerID containerId = ContainerID.valueOf(startContainerID); - if (state == null && replicationType == null && repConfig == null) { - // Not filters, so just return everything - return scm.getContainerManager().getContainers(containerId, count); - } - List containerList; - if (state != null) { - containerList = scm.getContainerManager().getContainers(state); - } else { - containerList = scm.getContainerManager().getContainers(); - } + return auditMap; + } - Stream containerStream = containerList.stream() - .filter(info -> info.containerID().getId() >= startContainerID); - // If we have repConfig filter by it, as it includes repType too. - // Otherwise, we may have a filter just for repType, eg all EC containers - // without filtering on their replication scheme - if (repConfig != null) { - containerStream = containerStream - .filter(info -> info.getReplicationConfig().equals(repConfig)); - } else if (replicationType != null) { - containerStream = containerStream - .filter(info -> info.getReplicationType() == replicationType); - } - return containerStream.sorted() - .limit(count) - .collect(Collectors.toList()); - } catch (Exception ex) { - auditSuccess = false; - AUDIT.logReadFailure( - buildAuditMessageForFailure(SCMAction.LIST_CONTAINER, auditMap, ex)); - throw ex; - } finally { - if (auditSuccess) { - AUDIT.logReadSuccess( - buildAuditMessageForSuccess(SCMAction.LIST_CONTAINER, auditMap)); - } - } + /** + * Lists a range of containers and get their info. + * + * @param startContainerID start containerID. + * @param count count must be {@literal >} 0. + * @param state Container with this state will be returned. + * @param repConfig Replication Config for the container. + * @return a list of containers capped by max count allowed + * in "ozone.scm.container.list.max.count" and total number of containers. + * @throws IOException + */ + @Override + public ContainerListResult listContainer(long startContainerID, + int count, HddsProtos.LifeCycleState state, + HddsProtos.ReplicationType replicationType, + ReplicationConfig repConfig) throws IOException { + return listContainerInternal(startContainerID, count, state, null, replicationType, repConfig); } @Override @@ -837,6 +824,7 @@ public ScmInfo getScmInfo() { if (scm.getScmHAManager().getRatisServer() != null) { builder.setRatisPeerRoles( scm.getScmHAManager().getRatisServer().getRatisRoles()); + builder.setScmRatisEnabled(true); } else { // In case, there is no ratis, there is no ratis role. // This will just print the hostname with ratis port as the default @@ -844,6 +832,7 @@ public ScmInfo getScmInfo() { String address = scm.getSCMHANodeDetails().getLocalNodeDetails() .getRatisHostPortStr(); builder.setRatisPeerRoles(Arrays.asList(address)); + builder.setScmRatisEnabled(false); } return builder.build(); } catch (Exception ex) { diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMDatanodeHeartbeatDispatcher.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMDatanodeHeartbeatDispatcher.java index cca2df003742..6f5429a853bd 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMDatanodeHeartbeatDispatcher.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMDatanodeHeartbeatDispatcher.java @@ -289,12 +289,12 @@ public interface ContainerReport { public enum ContainerReportType { /** * Incremental container report type - * {@liks IncrementalContainerReportFromDatanode}. + * {@link IncrementalContainerReportFromDatanode}. */ ICR, /** * Full container report type - * {@liks ContainerReportFromDatanode}. + * {@link ContainerReportFromDatanode}. */ FCR } diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMDatanodeProtocolServer.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMDatanodeProtocolServer.java index 0cc6ab7ab9db..105e7ac34862 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMDatanodeProtocolServer.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMDatanodeProtocolServer.java @@ -483,7 +483,6 @@ private static String flatten(String input) { /** * Get Key associated with Datanode address for this server. - * @return */ protected String getDatanodeAddressKey() { return this.scm.getScmNodeDetails().getDatanodeAddressKey(); diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMHTTPServerConfig.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMHTTPServerConfig.java index 2b6fa032b538..5aaf4b7b4852 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMHTTPServerConfig.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMHTTPServerConfig.java @@ -70,9 +70,10 @@ public String getKerberosKeytab() { * This static class is required to support other classes * that reference the key names and also require attributes. * Example: SCMSecurityProtocol where the KerberosInfo references - * the old configuration with the annotation shown below:- - * @KerberosInfo(serverPrincipal = - * ScmConfigKeys.HDDS_SCM_KERBEROS_PRINCIPAL_KEY) + * the old configuration with the annotation shown below: + *
    + * {@code KerberosInfo(serverPrincipal = + * ScmConfigKeys.HDDS_SCM_KERBEROS_PRINCIPAL_KEY)} */ public static class ConfigStrings { public static final String HDDS_SCM_HTTP_AUTH_CONFIG_PREFIX = diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMSecurityProtocolServer.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMSecurityProtocolServer.java index 88b3c8877460..17318107e3d2 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMSecurityProtocolServer.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMSecurityProtocolServer.java @@ -430,7 +430,6 @@ public String getCACertificate() throws IOException { * @param role - node role: OM/SCM/DN. * @param startSerialId - start certificate serial id. * @param count - max number of certificates returned in a batch. - * @return * @throws IOException */ @Override diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java index 5f69d9fee2bc..ba52b7fcc5f6 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java @@ -609,7 +609,8 @@ public OzoneConfiguration getConfiguration() { * @param conf HDDS configuration * @param configurator SCM configurator * @return SCM instance - * @throws IOException, AuthenticationException + * @throws IOException on Failure, + * @throws AuthenticationException */ public static StorageContainerManager createSCM( OzoneConfiguration conf, SCMConfigurator configurator) @@ -622,7 +623,8 @@ public static StorageContainerManager createSCM( * * @param conf HDDS configuration * @return SCM instance - * @throws IOException, AuthenticationException + * @throws IOException on Failure, + * @throws AuthenticationException */ public static StorageContainerManager createSCM(OzoneConfiguration conf) throws IOException, AuthenticationException { diff --git a/hadoop-hdds/server-scm/src/main/resources/webapps/scm/scm-overview.html b/hadoop-hdds/server-scm/src/main/resources/webapps/scm/scm-overview.html index 0f233bf4ea18..2748716e67f0 100644 --- a/hadoop-hdds/server-scm/src/main/resources/webapps/scm/scm-overview.html +++ b/hadoop-hdds/server-scm/src/main/resources/webapps/scm/scm-overview.html @@ -110,6 +110,114 @@

    Space Statistics

    +

    Pipeline Statistics

    + + + + + + + + + + + + + + + + + + + + + + + +
    Pipeline StateSize
    Closed{{statistics.pipelines.closed}}
    Allocated{{statistics.pipelines.allocated}}
    Open{{statistics.pipelines.open}}
    Dormant{{statistics.pipelines.dormant}}
    + +

    Container Statistics

    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    Operational StateSize
    Open{{statistics.containers.lifecycle.open}}
    Closing{{statistics.containers.lifecycle.closing}}
    Quasi Closed{{statistics.containers.lifecycle.quasi_closed}}
    Closed{{statistics.containers.lifecycle.closed}}
    Deleting{{statistics.containers.lifecycle.deleting}}
    Deleted{{statistics.containers.lifecycle.deleted}}
    Recovering{{statistics.containers.lifecycle.recovering}}
    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    HealthSize
    Under Replicated{{statistics.containers.health.under_replicated}}
    Mis Replicated{{statistics.containers.health.mis_replicated}}
    Over Replicated{{statistics.containers.health.over_replicated}}
    Missing{{statistics.containers.health.missing}}
    Unhealthy{{statistics.containers.health.unhealthy}}
    Empty{{statistics.containers.health.empty}}
    Open Unhealthy{{statistics.containers.health.open_unhealthy}}
    Quasi Closed Stuck{{statistics.containers.health.quasi_closed_stuck}}
    Open Without Pipeline{{statistics.containers.health.open_without_pipeline}}
    +

    Node Status

    diff --git a/hadoop-hdds/server-scm/src/main/resources/webapps/scm/scm.js b/hadoop-hdds/server-scm/src/main/resources/webapps/scm/scm.js index e00f8b8ede8c..fc216c068625 100644 --- a/hadoop-hdds/server-scm/src/main/resources/webapps/scm/scm.js +++ b/hadoop-hdds/server-scm/src/main/resources/webapps/scm/scm.js @@ -53,6 +53,34 @@ remaining : "N/A", nonscmused : "N/A" } + }, + pipelines : { + closed : "N/A", + allocated : "N/A", + open : "N/A", + dormant : "N/A" + }, + containers : { + lifecycle : { + open : "N/A", + closing : "N/A", + quasi_closed : "N/A", + closed : "N/A", + deleting : "N/A", + deleted : "N/A", + recovering : "N/A" + }, + health : { + under_replicated : "N/A", + mis_replicated : "N/A", + over_replicated : "N/A", + missing : "N/A", + unhealthy : "N/A", + empty : "N/A", + open_unhealthy : "N/A", + quasi_closed_stuck : "N/A", + open_without_pipeline : "N/A" + } } } @@ -142,6 +170,46 @@ } }); }); + + $http.get("jmx?qry=Hadoop:service=SCMPipelineManager,name=SCMPipelineManagerInfo") + .then(function (result) { + const URLScheme = location.protocol.replace(":" , ""); + ctrl.scmpipelinemanager = result.data.beans[0]; + ctrl.scmpipelinemanager.PipelineInfo.forEach(({key, value}) => { + if(key == "CLOSED") { + $scope.statistics.pipelines.closed = value; + } else if(key == "ALLOCATED") { + $scope.statistics.pipelines.allocated = value; + } else if(key == "OPEN") { + $scope.statistics.pipelines.open = value; + } else if(key == "DORMANT") { + $scope.statistics.pipelines.dormant = value; + } + }); + }); + + $http.get("jmx?qry=Hadoop:service=StorageContainerManager,name=ReplicationManagerMetrics") + .then(function (result) { + const URLScheme = location.protocol.replace(":" , ""); + ctrl.scmcontainermanager = result.data.beans[0]; + $scope.statistics.containers.lifecycle.open = ctrl.scmcontainermanager.OpenContainers; + $scope.statistics.containers.lifecycle.closing = ctrl.scmcontainermanager.ClosingContainers; + $scope.statistics.containers.lifecycle.quasi_closed = ctrl.scmcontainermanager.QuasiClosedContainers; + $scope.statistics.containers.lifecycle.closed = ctrl.scmcontainermanager.ClosedContainers; + $scope.statistics.containers.lifecycle.deleting = ctrl.scmcontainermanager.DeletingContainers; + $scope.statistics.containers.lifecycle.deleted = ctrl.scmcontainermanager.DeletedContainers; + $scope.statistics.containers.lifecycle.recovering = ctrl.scmcontainermanager.RecoveringContainers; + $scope.statistics.containers.health.under_replicated = ctrl.scmcontainermanager.UnderReplicatedContainers; + $scope.statistics.containers.health.mis_replicated = ctrl.scmcontainermanager.MisReplicatedContainers; + $scope.statistics.containers.health.over_replicated = ctrl.scmcontainermanager.OverReplicatedContainers; + $scope.statistics.containers.health.missing = ctrl.scmcontainermanager.MissingContainers; + $scope.statistics.containers.health.unhealthy = ctrl.scmcontainermanager.UnhealthyContainers; + $scope.statistics.containers.health.empty = ctrl.scmcontainermanager.EmptyContainers; + $scope.statistics.containers.health.open_unhealthy = ctrl.scmcontainermanager.OpenUnhealthyContainers; + $scope.statistics.containers.health.quasi_closed_stuck = ctrl.scmcontainermanager.StuckQuasiClosedContainers; + $scope.statistics.containers.health.open_without_pipeline = ctrl.scmcontainermanager.OpenContainersWithoutPipeline; + }); + /*if option is 'All' display all records else display specified record on page*/ $scope.UpdateRecordsToShow = () => { if($scope.RecordsToDisplay == 'All') { diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestBlockManager.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestBlockManager.java index 6438b6f8d493..621c9297e7e5 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestBlockManager.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestBlockManager.java @@ -22,15 +22,15 @@ import java.time.Clock; import java.time.ZoneId; import java.time.ZoneOffset; +import java.util.ArrayList; import java.util.List; import java.util.Map; -import java.util.ArrayList; import java.util.concurrent.CompletableFuture; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeoutException; -import java.util.concurrent.ConcurrentHashMap; -import java.util.concurrent.ExecutorService; import org.apache.hadoop.hdds.HddsConfigKeys; import org.apache.hadoop.hdds.client.RatisReplicationConfig; @@ -39,30 +39,30 @@ import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor; import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMCommandProto; -import org.apache.hadoop.hdds.scm.ScmConfigKeys; import org.apache.hadoop.hdds.scm.HddsTestUtils; -import org.apache.hadoop.hdds.scm.container.replication.ContainerReplicaPendingOps; -import org.apache.hadoop.hdds.scm.ha.SCMHAManagerStub; -import org.apache.hadoop.hdds.scm.ha.SequenceIdGenerator; -import org.apache.hadoop.hdds.scm.ha.SCMServiceManager; -import org.apache.hadoop.hdds.scm.ha.SCMContext; -import org.apache.hadoop.hdds.scm.ha.SCMHAManager; -import org.apache.hadoop.hdds.scm.node.NodeStatus; +import org.apache.hadoop.hdds.scm.ScmConfigKeys; import org.apache.hadoop.hdds.scm.container.CloseContainerEventHandler; import org.apache.hadoop.hdds.scm.container.ContainerID; -import org.apache.hadoop.hdds.scm.container.ContainerManagerImpl; import org.apache.hadoop.hdds.scm.container.ContainerManager; +import org.apache.hadoop.hdds.scm.container.ContainerManagerImpl; import org.apache.hadoop.hdds.scm.container.MockNodeManager; import org.apache.hadoop.hdds.scm.container.common.helpers.AllocatedBlock; import org.apache.hadoop.hdds.scm.container.common.helpers.ExcludeList; +import org.apache.hadoop.hdds.scm.container.replication.ContainerReplicaPendingOps; import org.apache.hadoop.hdds.scm.events.SCMEvents; +import org.apache.hadoop.hdds.scm.ha.SCMContext; +import org.apache.hadoop.hdds.scm.ha.SCMHAManager; +import org.apache.hadoop.hdds.scm.ha.SCMHAManagerStub; +import org.apache.hadoop.hdds.scm.ha.SCMServiceManager; +import org.apache.hadoop.hdds.scm.ha.SequenceIdGenerator; import org.apache.hadoop.hdds.scm.metadata.SCMMetadataStore; import org.apache.hadoop.hdds.scm.metadata.SCMMetadataStoreImpl; +import org.apache.hadoop.hdds.scm.node.NodeStatus; import org.apache.hadoop.hdds.scm.pipeline.MockRatisPipelineProvider; import org.apache.hadoop.hdds.scm.pipeline.Pipeline; import org.apache.hadoop.hdds.scm.pipeline.PipelineID; -import org.apache.hadoop.hdds.scm.pipeline.PipelineProvider; import org.apache.hadoop.hdds.scm.pipeline.PipelineManagerImpl; +import org.apache.hadoop.hdds.scm.pipeline.PipelineProvider; import org.apache.hadoop.hdds.scm.safemode.SCMSafeModeManager; import org.apache.hadoop.hdds.scm.safemode.SCMSafeModeManager.SafeModeStatus; import org.apache.hadoop.hdds.scm.server.SCMConfigurator; @@ -76,21 +76,19 @@ import org.apache.hadoop.ozone.protocol.commands.CommandForDatanode; import org.apache.hadoop.ozone.protocol.commands.CreatePipelineCommand; import org.apache.ozone.test.GenericTestUtils; - -import static org.apache.hadoop.ozone.OzoneConsts.GB; -import static org.apache.hadoop.ozone.OzoneConsts.MB; -import static org.junit.jupiter.api.Assertions.assertEquals; -import static org.junit.jupiter.api.Assertions.assertNotEquals; -import static org.junit.jupiter.api.Assertions.assertNotNull; -import static org.junit.jupiter.api.Assertions.assertThrows; - import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.Timeout; import org.junit.jupiter.api.io.TempDir; +import static org.apache.hadoop.ozone.OzoneConsts.GB; +import static org.apache.hadoop.ozone.OzoneConsts.MB; import static org.assertj.core.api.Assertions.assertThat; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNotEquals; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertThrows; /** * Tests for SCM Block Manager. @@ -273,7 +271,7 @@ void testAllocateBlockInParallel() throws Exception { } CompletableFuture - .allOf(futureList.toArray(new CompletableFuture[futureList.size()])) + .allOf(futureList.toArray(new CompletableFuture[0])) .get(); } diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestDeletedBlockLog.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestDeletedBlockLog.java index 03500529ff9d..c8e2f267aff3 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestDeletedBlockLog.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestDeletedBlockLog.java @@ -265,7 +265,7 @@ private void commitTransactions( List transactionResults) throws IOException { commitTransactions(transactionResults, - dnList.toArray(new DatanodeDetails[3])); + dnList.toArray(new DatanodeDetails[0])); } private void commitTransactions( diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerManagerImpl.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerManagerImpl.java index 83791c3257d8..5e951a6d6809 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerManagerImpl.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerManagerImpl.java @@ -77,8 +77,7 @@ public class TestContainerManagerImpl { @BeforeEach void setUp() throws Exception { final OzoneConfiguration conf = SCMTestUtils.getConf(testDir); - dbStore = DBStoreBuilder.createDBStore( - conf, new SCMDBDefinition()); + dbStore = DBStoreBuilder.createDBStore(conf, SCMDBDefinition.get()); scmhaManager = SCMHAManagerStub.getInstance(true); nodeManager = new MockNodeManager(true, 10); sequenceIdGen = new SequenceIdGenerator( diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerReportHandler.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerReportHandler.java index 7c3666ad6179..0a863bc8165a 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerReportHandler.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerReportHandler.java @@ -98,8 +98,7 @@ void setup() throws IOException, InvalidStateTransitionException { final OzoneConfiguration conf = SCMTestUtils.getConf(testDir); nodeManager = new MockNodeManager(true, 10); containerManager = mock(ContainerManager.class); - dbStore = DBStoreBuilder.createDBStore( - conf, new SCMDBDefinition()); + dbStore = DBStoreBuilder.createDBStore(conf, SCMDBDefinition.get()); scmhaManager = SCMHAManagerStub.getInstance(true); pipelineManager = new MockPipelineManager(dbStore, scmhaManager, nodeManager); diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerStateManager.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerStateManager.java index a7043d026427..157a65c70148 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerStateManager.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerStateManager.java @@ -75,8 +75,7 @@ public void init() throws IOException, TimeoutException { OzoneConfiguration conf = new OzoneConfiguration(); scmhaManager = SCMHAManagerStub.getInstance(true); conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, testDir.getAbsolutePath()); - dbStore = DBStoreBuilder.createDBStore( - conf, new SCMDBDefinition()); + dbStore = DBStoreBuilder.createDBStore(conf, SCMDBDefinition.get()); pipelineManager = mock(PipelineManager.class); pipeline = Pipeline.newBuilder().setState(Pipeline.PipelineState.CLOSED) .setId(PipelineID.randomId()) diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestIncrementalContainerReportHandler.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestIncrementalContainerReportHandler.java index dbcccce598c9..679c0f368f1d 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestIncrementalContainerReportHandler.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestIncrementalContainerReportHandler.java @@ -126,8 +126,7 @@ public void setup() throws IOException, InvalidStateTransitionException, new SCMNodeManager(conf, storageConfig, eventQueue, clusterMap, scmContext, versionManager); scmhaManager = SCMHAManagerStub.getInstance(true); - dbStore = DBStoreBuilder.createDBStore( - conf, new SCMDBDefinition()); + dbStore = DBStoreBuilder.createDBStore(conf, SCMDBDefinition.get()); pipelineManager = new MockPipelineManager(dbStore, scmhaManager, nodeManager); @@ -644,7 +643,7 @@ public void testECReplicaIndexValidation() throws NodeNotFoundException, IOException, TimeoutException { List dns = IntStream.range(0, 5) .mapToObj(i -> randomDatanodeDetails()).collect(Collectors.toList()); - dns.stream().forEach(dn -> nodeManager.register(dn, null, null)); + dns.forEach(dn -> nodeManager.register(dn, null, null)); ECReplicationConfig replicationConfig = new ECReplicationConfig(3, 2); final ContainerInfo container = getECContainer(LifeCycleState.CLOSED, PipelineID.randomId(), replicationConfig); diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestUnknownContainerReport.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestUnknownContainerReport.java index 9ea4ea45b563..a573573a67b9 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestUnknownContainerReport.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestUnknownContainerReport.java @@ -78,8 +78,7 @@ public void setup() throws IOException { final OzoneConfiguration conf = SCMTestUtils.getConf(testDir); this.nodeManager = new MockNodeManager(true, 10); this.containerManager = mock(ContainerManager.class); - dbStore = DBStoreBuilder.createDBStore( - conf, new SCMDBDefinition()); + dbStore = DBStoreBuilder.createDBStore(conf, SCMDBDefinition.get()); scmhaManager = SCMHAManagerStub.getInstance(true); pipelineManager = new MockPipelineManager(dbStore, scmhaManager, nodeManager); diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestLegacyReplicationManager.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestLegacyReplicationManager.java index 8aac64de702b..7f36279ba08f 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestLegacyReplicationManager.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestLegacyReplicationManager.java @@ -179,8 +179,7 @@ void setup(@TempDir File testDir) throws IOException, InterruptedException, nodeManager = new SimpleMockNodeManager(); eventQueue = new EventQueue(); SCMHAManager scmhaManager = SCMHAManagerStub.getInstance(true); - dbStore = DBStoreBuilder.createDBStore( - conf, new SCMDBDefinition()); + dbStore = DBStoreBuilder.createDBStore(conf, SCMDBDefinition.get()); PipelineManager pipelineManager = mock(PipelineManager.class); when(pipelineManager.containsPipeline(any(PipelineID.class))) .thenReturn(true); @@ -277,8 +276,7 @@ private void createReplicationManager(ReplicationManagerConfiguration rmConf, SCMHAManager scmHAManager = SCMHAManagerStub .getInstance(true, new SCMDBTransactionBufferImpl()); - dbStore = DBStoreBuilder.createDBStore( - config, new SCMDBDefinition()); + dbStore = DBStoreBuilder.createDBStore(config, SCMDBDefinition.get()); LegacyReplicationManager legacyRM = new LegacyReplicationManager( config, containerManager, ratisContainerPlacementPolicy, eventQueue, diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/ha/TestStatefulServiceStateManagerImpl.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/ha/TestStatefulServiceStateManagerImpl.java index 92509d22685b..4e69f46b6e93 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/ha/TestStatefulServiceStateManagerImpl.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/ha/TestStatefulServiceStateManagerImpl.java @@ -49,7 +49,7 @@ public class TestStatefulServiceStateManagerImpl { void setup(@TempDir File testDir) throws IOException { conf = SCMTestUtils.getConf(testDir); conf.setBoolean(ScmConfigKeys.OZONE_SCM_HA_ENABLE_KEY, true); - dbStore = DBStoreBuilder.createDBStore(conf, new SCMDBDefinition()); + dbStore = DBStoreBuilder.createDBStore(conf, SCMDBDefinition.get()); statefulServiceConfig = SCMDBDefinition.STATEFUL_SERVICE_CONFIG.getTable(dbStore); scmhaManager = SCMHAManagerStub.getInstance(true, dbStore); diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/metadata/OldPipelineIDCodecForTesting.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/metadata/OldPipelineIDCodecForTesting.java index fb80fbbee781..f09bb43d4cf7 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/metadata/OldPipelineIDCodecForTesting.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/metadata/OldPipelineIDCodecForTesting.java @@ -30,6 +30,10 @@ * Codec to serialize / deserialize PipelineID. */ public class OldPipelineIDCodecForTesting implements Codec { + @Override + public Class getTypeClass() { + return PipelineID.class; + } @Override public byte[] toPersistedFormat(PipelineID object) throws IOException { diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/metadata/OldX509CertificateCodecForTesting.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/metadata/OldX509CertificateCodecForTesting.java index 67593dc77782..3a8fc9a96328 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/metadata/OldX509CertificateCodecForTesting.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/metadata/OldX509CertificateCodecForTesting.java @@ -45,6 +45,11 @@ private OldX509CertificateCodecForTesting() { // singleton } + @Override + public Class getTypeClass() { + return X509Certificate.class; + } + @Override public byte[] toPersistedFormat(X509Certificate object) throws IOException { try { diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestContainerPlacement.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestContainerPlacement.java index f3a303cad738..0862c46e8388 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestContainerPlacement.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestContainerPlacement.java @@ -100,8 +100,7 @@ public class TestContainerPlacement { public void setUp() throws Exception { conf = getConf(); conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, testDir.getAbsolutePath()); - dbStore = DBStoreBuilder.createDBStore( - conf, new SCMDBDefinition()); + dbStore = DBStoreBuilder.createDBStore(conf, SCMDBDefinition.get()); scmhaManager = SCMHAManagerStub.getInstance(true); sequenceIdGen = new SequenceIdGenerator( conf, scmhaManager, SCMDBDefinition.SEQUENCE_ID.getTable(dbStore)); diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestSCMNodeManager.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestSCMNodeManager.java index cc9133cf684b..6d11cb5fe587 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestSCMNodeManager.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestSCMNodeManager.java @@ -123,6 +123,7 @@ import org.junit.jupiter.params.ParameterizedTest; import org.junit.jupiter.params.provider.Arguments; +import org.junit.jupiter.params.provider.EnumSource; import org.junit.jupiter.params.provider.MethodSource; import org.junit.jupiter.params.provider.ValueSource; import org.mockito.ArgumentCaptor; @@ -850,15 +851,12 @@ void testScmHandleJvmPause() throws Exception { } } - @Test - public void testProcessLayoutVersion() throws IOException { - // TODO: Refactor this class to use org.junit.jupiter so test - // parameterization can be used. - for (FinalizationCheckpoint checkpoint: FinalizationCheckpoint.values()) { - LOG.info("Testing with SCM finalization checkpoint {}", checkpoint); - testProcessLayoutVersionLowerMlv(checkpoint); - testProcessLayoutVersionReportHigherMlv(checkpoint); - } + @ParameterizedTest + @EnumSource(FinalizationCheckpoint.class) + public void testProcessLayoutVersion(FinalizationCheckpoint checkpoint) throws IOException { + LOG.info("Testing with SCM finalization checkpoint {}", checkpoint); + testProcessLayoutVersionLowerMlv(checkpoint); + testProcessLayoutVersionReportHigherMlv(checkpoint); } // Currently invoked by testProcessLayoutVersion. diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineDatanodesIntersection.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineDatanodesIntersection.java index 385e1c653168..9908210e0747 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineDatanodesIntersection.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineDatanodesIntersection.java @@ -66,8 +66,7 @@ public class TestPipelineDatanodesIntersection { public void initialize() throws IOException { conf = SCMTestUtils.getConf(testDir); end = false; - dbStore = DBStoreBuilder.createDBStore( - conf, new SCMDBDefinition()); + dbStore = DBStoreBuilder.createDBStore(conf, SCMDBDefinition.get()); } @AfterEach diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineManagerImpl.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineManagerImpl.java index e9407d6a9419..dd994f35b64d 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineManagerImpl.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineManagerImpl.java @@ -136,7 +136,7 @@ void init(@TempDir File testDir, @TempDir File dbDir) throws Exception { // placement policy (Rack Scatter), so just use the random one. conf.set(ScmConfigKeys.OZONE_SCM_CONTAINER_PLACEMENT_EC_IMPL_KEY, SCMContainerPlacementRandom.class.getName()); - dbStore = DBStoreBuilder.createDBStore(conf, new SCMDBDefinition()); + dbStore = DBStoreBuilder.createDBStore(conf, SCMDBDefinition.get()); nodeManager = new MockNodeManager(true, 20); maxPipelineCount = nodeManager.getNodeCount( HddsProtos.NodeOperationalState.IN_SERVICE, diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelinePlacementFactory.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelinePlacementFactory.java index 96f62432b315..82fcc01d7eeb 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelinePlacementFactory.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelinePlacementFactory.java @@ -135,8 +135,7 @@ private void setupRacks(int datanodeCount, int nodesPerRack, .thenReturn(dn); } - dbStore = DBStoreBuilder.createDBStore( - conf, new SCMDBDefinition()); + dbStore = DBStoreBuilder.createDBStore(conf, SCMDBDefinition.get()); scmhaManager = SCMHAManagerStub.getInstance(true); stateManager = PipelineStateManagerImpl.newBuilder() diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelinePlacementPolicy.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelinePlacementPolicy.java index 0f9ec84f0332..2a4d2de9010e 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelinePlacementPolicy.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelinePlacementPolicy.java @@ -112,8 +112,7 @@ public void init() throws Exception { conf.setStorageSize(OZONE_DATANODE_RATIS_VOLUME_FREE_SPACE_MIN, 10, StorageUnit.MB); nodeManager.setNumPipelinePerDatanode(PIPELINE_LOAD_LIMIT); - dbStore = DBStoreBuilder.createDBStore( - conf, new SCMDBDefinition()); + dbStore = DBStoreBuilder.createDBStore(conf, SCMDBDefinition.get()); scmhaManager = SCMHAManagerStub.getInstance(true); stateManager = PipelineStateManagerImpl.newBuilder() .setPipelineStore(SCMDBDefinition.PIPELINES.getTable(dbStore)) diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineStateManagerImpl.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineStateManagerImpl.java index 9feb9e1f0a93..4a0baa2daca9 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineStateManagerImpl.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineStateManagerImpl.java @@ -68,8 +68,7 @@ public class TestPipelineStateManagerImpl { @BeforeEach public void init() throws Exception { final OzoneConfiguration conf = SCMTestUtils.getConf(testDir); - dbStore = DBStoreBuilder.createDBStore( - conf, new SCMDBDefinition()); + dbStore = DBStoreBuilder.createDBStore(conf, SCMDBDefinition.get()); SCMHAManager scmhaManager = SCMHAManagerStub.getInstance(true); NodeManager nodeManager = new MockNodeManager(true, 10); diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestRatisPipelineProvider.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestRatisPipelineProvider.java index 5350c0da86e8..94c0d45276c5 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestRatisPipelineProvider.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestRatisPipelineProvider.java @@ -93,8 +93,7 @@ public void init(int maxPipelinePerNode, OzoneConfiguration conf) public void init(int maxPipelinePerNode, OzoneConfiguration conf, File dir) throws Exception { conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, dir.getAbsolutePath()); - dbStore = DBStoreBuilder.createDBStore( - conf, new SCMDBDefinition()); + dbStore = DBStoreBuilder.createDBStore(conf, SCMDBDefinition.get()); nodeManager = new MockNodeManager(true, 10); nodeManager.setNumPipelinePerDatanode(maxPipelinePerNode); SCMHAManager scmhaManager = SCMHAManagerStub.getInstance(true); diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestSimplePipelineProvider.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestSimplePipelineProvider.java index b69ebedb04d8..7fb31d2c7689 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestSimplePipelineProvider.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestSimplePipelineProvider.java @@ -60,8 +60,7 @@ public class TestSimplePipelineProvider { public void init() throws Exception { nodeManager = new MockNodeManager(true, 10); final OzoneConfiguration conf = SCMTestUtils.getConf(testDir); - dbStore = DBStoreBuilder.createDBStore( - conf, new SCMDBDefinition()); + dbStore = DBStoreBuilder.createDBStore(conf, SCMDBDefinition.get()); SCMHAManager scmhaManager = SCMHAManagerStub.getInstance(true); stateManager = PipelineStateManagerImpl.newBuilder() .setPipelineStore(SCMDBDefinition.PIPELINES.getTable(dbStore)) diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestWritableECContainerProvider.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestWritableECContainerProvider.java index 4f86450d03e7..78aab4843cf4 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestWritableECContainerProvider.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestWritableECContainerProvider.java @@ -128,8 +128,7 @@ void setup(@TempDir File testDir) throws IOException { containers = new HashMap<>(); conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, testDir.getAbsolutePath()); - dbStore = DBStoreBuilder.createDBStore( - conf, new SCMDBDefinition()); + dbStore = DBStoreBuilder.createDBStore(conf, SCMDBDefinition.get()); scmhaManager = SCMHAManagerStub.getInstance(true); pipelineManager = new MockPipelineManager(dbStore, scmhaManager, nodeManager); diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/safemode/TestSCMSafeModeManager.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/safemode/TestSCMSafeModeManager.java index 319caabe40a8..eedef0794459 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/safemode/TestSCMSafeModeManager.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/safemode/TestSCMSafeModeManager.java @@ -28,6 +28,7 @@ import java.util.Set; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; +import java.util.stream.Stream; import org.apache.commons.lang3.tuple.Pair; import org.apache.hadoop.hdds.HddsConfigKeys; @@ -63,7 +64,10 @@ import org.junit.jupiter.api.Timeout; import org.junit.jupiter.api.io.TempDir; import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.Arguments; import org.junit.jupiter.params.provider.CsvSource; +import org.junit.jupiter.params.provider.MethodSource; +import org.junit.jupiter.params.provider.ValueSource; import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertEquals; @@ -108,13 +112,10 @@ public void destroyDbStore() throws Exception { } } - @Test - public void testSafeModeState() throws Exception { - // Test 1: test for 0 containers - testSafeMode(0); - - // Test 2: test for 20 containers - testSafeMode(20); + @ParameterizedTest + @ValueSource(ints = {0, 20}) + public void testSafeModeState(int numContainers) throws Exception { + testSafeMode(numContainers); } @Test @@ -215,36 +216,6 @@ private OzoneConfiguration createConf(double healthyPercent, return conf; } - @Test - public void testSafeModeExitRuleWithPipelineAvailabilityCheck1() - throws Exception { - testSafeModeExitRuleWithPipelineAvailabilityCheck(100, 30, 8, 0.90, 1); - } - - @Test - public void testSafeModeExitRuleWithPipelineAvailabilityCheck2() - throws Exception { - testSafeModeExitRuleWithPipelineAvailabilityCheck(100, 90, 22, 0.10, 0.9); - } - - @Test - public void testSafeModeExitRuleWithPipelineAvailabilityCheck3() - throws Exception { - testSafeModeExitRuleWithPipelineAvailabilityCheck(100, 30, 8, 0, 0.9); - } - - @Test - public void testSafeModeExitRuleWithPipelineAvailabilityCheck4() - throws Exception { - testSafeModeExitRuleWithPipelineAvailabilityCheck(100, 90, 22, 0, 0); - } - - @Test - public void testSafeModeExitRuleWithPipelineAvailabilityCheck5() - throws Exception { - testSafeModeExitRuleWithPipelineAvailabilityCheck(100, 90, 22, 0, 0.5); - } - @ParameterizedTest @CsvSource(value = {"100,0.9,false", "0.9,200,false", "0.9,0.1,true"}) public void testHealthyPipelinePercentWithIncorrectValue(double healthyPercent, @@ -269,6 +240,18 @@ public void testHealthyPipelinePercentWithIncorrectValue(double healthyPercent, assertThat(exception).hasMessageEndingWith("value should be >= 0.0 and <= 1.0"); } + private static Stream testCaseForSafeModeExitRuleWithPipelineAvailabilityCheck() { + return Stream.of( + Arguments.of(100, 30, 8, 0.90, 1), + Arguments.of(100, 90, 22, 0.10, 0.9), + Arguments.of(100, 30, 8, 0, 0.9), + Arguments.of(100, 90, 22, 0, 0), + Arguments.of(100, 90, 22, 0, 0.5) + ); + } + + @ParameterizedTest + @MethodSource("testCaseForSafeModeExitRuleWithPipelineAvailabilityCheck") public void testSafeModeExitRuleWithPipelineAvailabilityCheck( int containerCount, int nodeCount, int pipelineCount, double healthyPipelinePercent, double oneReplicaPercent) @@ -455,12 +438,11 @@ public void testDisableSafeMode() { assertFalse(scmSafeModeManager.getInSafeMode()); } - @Test - public void testSafeModeDataNodeExitRule() throws Exception { + @ParameterizedTest + @ValueSource(ints = {0, 3, 5}) + public void testSafeModeDataNodeExitRule(int numberOfDns) throws Exception { containers = new ArrayList<>(); - testSafeModeDataNodes(0); - testSafeModeDataNodes(3); - testSafeModeDataNodes(5); + testSafeModeDataNodes(numberOfDns); } /** diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/TestSCMClientProtocolServer.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/TestSCMClientProtocolServer.java index 7c06b79a2ffb..8e21eef930e3 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/TestSCMClientProtocolServer.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/TestSCMClientProtocolServer.java @@ -17,12 +17,19 @@ */ package org.apache.hadoop.hdds.scm.server; +import org.apache.hadoop.hdds.client.RatisReplicationConfig; import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.conf.ReconfigurationHandler; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.DecommissionScmRequestProto; import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.DecommissionScmResponseProto; +import org.apache.hadoop.hdds.scm.container.ContainerInfo; +import org.apache.hadoop.hdds.scm.container.ContainerManagerImpl; import org.apache.hadoop.hdds.scm.HddsTestUtils; import org.apache.hadoop.hdds.scm.ha.SCMContext; import org.apache.hadoop.hdds.scm.ha.SCMHAManagerStub; +import org.apache.hadoop.hdds.scm.ha.SCMNodeDetails; +import org.apache.hadoop.hdds.scm.pipeline.PipelineID; import org.apache.hadoop.hdds.scm.protocol.StorageContainerLocationProtocolServerSideTranslatorPB; import org.apache.hadoop.hdds.utils.ProtocolMessageMetrics; import org.apache.hadoop.ozone.container.common.SCMTestUtils; @@ -35,9 +42,13 @@ import java.io.File; import java.io.IOException; +import java.net.InetSocketAddress; +import java.util.ArrayList; +import java.util.List; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_READONLY_ADMINISTRATORS; import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertThrows; import static org.junit.jupiter.api.Assertions.assertTrue; @@ -112,4 +123,47 @@ public void testReadOnlyAdmins() throws IOException { UserGroupInformation.reset(); } } + + /** + * Tests listContainer of scm. + */ + @Test + public void testScmListContainer() throws Exception { + SCMClientProtocolServer scmServer = + new SCMClientProtocolServer(new OzoneConfiguration(), + mockStorageContainerManager(), mock(ReconfigurationHandler.class)); + + assertEquals(10, scmServer.listContainer(1, 10, + null, HddsProtos.ReplicationType.RATIS, null).getContainerInfoList().size()); + // Test call from a legacy client, which uses a different method of listContainer + assertEquals(10, scmServer.listContainer(1, 10, null, + HddsProtos.ReplicationFactor.THREE).getContainerInfoList().size()); + } + + private StorageContainerManager mockStorageContainerManager() { + List infos = new ArrayList<>(); + for (int i = 0; i < 10; i++) { + infos.add(newContainerInfoForTest()); + } + ContainerManagerImpl containerManager = mock(ContainerManagerImpl.class); + when(containerManager.getContainers()).thenReturn(infos); + StorageContainerManager storageContainerManager = mock(StorageContainerManager.class); + when(storageContainerManager.getContainerManager()).thenReturn(containerManager); + + SCMNodeDetails scmNodeDetails = mock(SCMNodeDetails.class); + when(scmNodeDetails.getClientProtocolServerAddress()).thenReturn(new InetSocketAddress("localhost", 9876)); + when(scmNodeDetails.getClientProtocolServerAddressKey()).thenReturn("test"); + when(storageContainerManager.getScmNodeDetails()).thenReturn(scmNodeDetails); + return storageContainerManager; + } + + private ContainerInfo newContainerInfoForTest() { + return new ContainerInfo.Builder() + .setContainerID(1) + .setPipelineID(PipelineID.randomId()) + .setReplicationConfig( + RatisReplicationConfig + .getInstance(HddsProtos.ReplicationFactor.THREE)) + .build(); + } } diff --git a/hadoop-hdds/test-utils/pom.xml b/hadoop-hdds/test-utils/pom.xml index f720d65bdf50..6ff87083c03d 100644 --- a/hadoop-hdds/test-utils/pom.xml +++ b/hadoop-hdds/test-utils/pom.xml @@ -20,10 +20,10 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> org.apache.ozone hdds - 1.5.0-SNAPSHOT + 2.0.0-SNAPSHOT hdds-test-utils - 1.5.0-SNAPSHOT + 2.0.0-SNAPSHOT Apache Ozone Distributed Data Store Test Utils Apache Ozone HDDS Test Utils jar diff --git a/hadoop-hdds/test-utils/src/main/java/org/apache/ozone/test/GenericTestUtils.java b/hadoop-hdds/test-utils/src/main/java/org/apache/ozone/test/GenericTestUtils.java index c9fa668445dd..48abd5e986ef 100644 --- a/hadoop-hdds/test-utils/src/main/java/org/apache/ozone/test/GenericTestUtils.java +++ b/hadoop-hdds/test-utils/src/main/java/org/apache/ozone/test/GenericTestUtils.java @@ -29,6 +29,7 @@ import java.time.Instant; import java.util.List; import java.util.Map; +import java.util.concurrent.Callable; import java.util.concurrent.TimeoutException; import com.google.common.base.Preconditions; @@ -205,6 +206,20 @@ public static void waitFor(BooleanSupplier check, int checkEveryMillis, } } + public static T assertThrows( + Class expectedType, + Callable func) { + return Assertions.assertThrows(expectedType, () -> { + final AutoCloseable closeable = func.call(); + try { + if (closeable != null) { + closeable.close(); + } + } catch (Exception ignored) { + } + }); + } + /** * @deprecated use sl4fj based version */ @@ -335,11 +350,11 @@ private static long monotonicNow() { * *

    * TODO: Add lambda support once Java 8 is common. - *

    +   * {@code
        *   SystemErrCapturer.withCapture(capture -> {
        *     ...
        *   })
    -   * 
    + * } */ public static class SystemErrCapturer implements AutoCloseable { private final ByteArrayOutputStream bytes; @@ -376,11 +391,11 @@ public void close() throws Exception { * *

    * TODO: Add lambda support once Java 8 is common. - *

    +   * {@code
        *   SystemOutCapturer.withCapture(capture -> {
        *     ...
        *   })
    -   * 
    + * } */ public static class SystemOutCapturer implements AutoCloseable { private final ByteArrayOutputStream bytes; @@ -475,8 +490,8 @@ public static final class ReflectionUtils { * This method provides the modifiers field using reflection approach which is compatible * for both pre Java 9 and post java 9 versions. * @return modifiers field - * @throws IllegalAccessException - * @throws NoSuchFieldException + * @throws IllegalAccessException illegalAccessException, + * @throws NoSuchFieldException noSuchFieldException. */ public static Field getModifiersField() throws IllegalAccessException, NoSuchFieldException { Field modifiersField = null; diff --git a/hadoop-hdds/test-utils/src/main/java/org/apache/ozone/test/LambdaTestUtils.java b/hadoop-hdds/test-utils/src/main/java/org/apache/ozone/test/LambdaTestUtils.java index 661989dade17..d6b028c815f7 100644 --- a/hadoop-hdds/test-utils/src/main/java/org/apache/ozone/test/LambdaTestUtils.java +++ b/hadoop-hdds/test-utils/src/main/java/org/apache/ozone/test/LambdaTestUtils.java @@ -77,11 +77,13 @@ public interface TimeoutHandler { * is called. This returns the exception passed in (if any), * or generates a new one. *
    +   * {@code
        * await(
        *   30 * 1000,
        *   () -> { return 0 == filesystem.listFiles(new Path("/")).length); },
        *   () -> 500),
        *   (timeout, ex) -> ex != null ? ex : new TimeoutException("timeout"));
    +   * }
        * 
    * * @param timeoutMillis timeout in milliseconds. @@ -160,9 +162,11 @@ public static int await(int timeoutMillis, *

    * Example: await for probe to succeed: *

    +   * {@code
        * await(
        *   30 * 1000, 500,
        *   () -> { return 0 == filesystem.listFiles(new Path("/")).length); });
    +   * }
        * 
    * * @param timeoutMillis timeout in milliseconds. diff --git a/hadoop-hdds/tools/pom.xml b/hadoop-hdds/tools/pom.xml index daf6f3d40f42..583c801bcd42 100644 --- a/hadoop-hdds/tools/pom.xml +++ b/hadoop-hdds/tools/pom.xml @@ -20,11 +20,11 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> org.apache.ozone hdds - 1.5.0-SNAPSHOT + 2.0.0-SNAPSHOT hdds-tools - 1.5.0-SNAPSHOT + 2.0.0-SNAPSHOT Apache Ozone Distributed Data Store Tools Apache Ozone HDDS Tools jar diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ContainerOperationClient.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ContainerOperationClient.java index 76334d124ea5..884d7f939a68 100644 --- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ContainerOperationClient.java +++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ContainerOperationClient.java @@ -37,6 +37,7 @@ import org.apache.hadoop.hdds.scm.XceiverClientManager; import org.apache.hadoop.hdds.scm.XceiverClientSpi; import org.apache.hadoop.hdds.scm.client.ClientTrustManager; +import org.apache.hadoop.hdds.scm.container.ContainerListResult; import org.apache.hadoop.hdds.security.x509.certificate.client.CACertificateProvider; import org.apache.hadoop.hdds.scm.client.ScmClient; import org.apache.hadoop.hdds.scm.container.ContainerID; @@ -83,6 +84,7 @@ public class ContainerOperationClient implements ScmClient { private final boolean containerTokenEnabled; private final OzoneConfiguration configuration; private XceiverClientManager xceiverClientManager; + private int maxCountOfContainerList; public synchronized XceiverClientManager getXceiverClientManager() throws IOException { @@ -110,6 +112,9 @@ public ContainerOperationClient(OzoneConfiguration conf) throws IOException { } containerTokenEnabled = conf.getBoolean(HDDS_CONTAINER_TOKEN_ENABLED, HDDS_CONTAINER_TOKEN_ENABLED_DEFAULT); + maxCountOfContainerList = conf + .getInt(ScmConfigKeys.OZONE_SCM_CONTAINER_LIST_MAX_COUNT, + ScmConfigKeys.OZONE_SCM_CONTAINER_LIST_MAX_COUNT_DEFAULT); } private XceiverClientManager newXCeiverClientManager(ConfigurationSource conf) @@ -339,17 +344,29 @@ public void deleteContainer(long containerID, boolean force) } @Override - public List listContainer(long startContainerID, + public ContainerListResult listContainer(long startContainerID, int count) throws IOException { + if (count > maxCountOfContainerList) { + LOG.warn("Attempting to list {} containers. However, this exceeds" + + " the cluster's current limit of {}. The results will be capped at the" + + " maximum allowed count.", count, maxCountOfContainerList); + count = maxCountOfContainerList; + } return storageContainerLocationClient.listContainer( startContainerID, count); } @Override - public List listContainer(long startContainerID, + public ContainerListResult listContainer(long startContainerID, int count, HddsProtos.LifeCycleState state, HddsProtos.ReplicationType repType, ReplicationConfig replicationConfig) throws IOException { + if (count > maxCountOfContainerList) { + LOG.warn("Attempting to list {} containers. However, this exceeds" + + " the cluster's current limit of {}. The results will be capped at the" + + " maximum allowed count.", count, maxCountOfContainerList); + count = maxCountOfContainerList; + } return storageContainerLocationClient.listContainer( startContainerID, count, state, repType, replicationConfig); } @@ -519,6 +536,11 @@ public List getScmRatisRoles() throws IOException { return storageContainerLocationClient.getScmInfo().getRatisPeerRoles(); } + @Override + public boolean isScmRatisEnable() throws IOException { + return storageContainerLocationClient.getScmInfo().getScmRatisEnabled(); + } + @Override public boolean rotateSecretKeys(boolean force) throws IOException { return secretKeyClient.checkAndRotate(force); diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/ContainerCommands.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/ContainerCommands.java index 54c69273f0bc..15dd873491cc 100644 --- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/ContainerCommands.java +++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/ContainerCommands.java @@ -26,6 +26,7 @@ import org.kohsuke.MetaInfServices; import picocli.CommandLine.Command; +import picocli.CommandLine.ParentCommand; import picocli.CommandLine.Model.CommandSpec; import picocli.CommandLine.Spec; @@ -51,6 +52,9 @@ public class ContainerCommands implements Callable, SubcommandWithParent { @Spec private CommandSpec spec; + @ParentCommand + private OzoneAdmin parent; + @Override public Void call() throws Exception { GenericCli.missingSubcommand(spec); @@ -61,4 +65,8 @@ public Void call() throws Exception { public Class getParentType() { return OzoneAdmin.class; } + + public OzoneAdmin getParent() { + return parent; + } } diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/ListSubcommand.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/ListSubcommand.java index ecc43d04087a..88ccef702b3e 100644 --- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/ListSubcommand.java +++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/ListSubcommand.java @@ -18,7 +18,6 @@ package org.apache.hadoop.hdds.scm.cli.container; import java.io.IOException; -import java.util.List; import com.google.common.base.Strings; import org.apache.hadoop.hdds.cli.HddsVersionProvider; @@ -26,7 +25,9 @@ import org.apache.hadoop.hdds.client.ReplicationType; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; +import org.apache.hadoop.hdds.scm.ScmConfigKeys; import org.apache.hadoop.hdds.scm.cli.ScmSubcommand; +import org.apache.hadoop.hdds.scm.container.ContainerListResult; import org.apache.hadoop.hdds.scm.client.ScmClient; import org.apache.hadoop.hdds.scm.container.ContainerInfo; @@ -37,6 +38,7 @@ import com.fasterxml.jackson.databind.SerializationFeature; import com.fasterxml.jackson.datatype.jsr310.JavaTimeModule; import picocli.CommandLine.Command; +import picocli.CommandLine.ParentCommand; import picocli.CommandLine.Help.Visibility; import picocli.CommandLine.Option; @@ -55,10 +57,15 @@ public class ListSubcommand extends ScmSubcommand { private long startId; @Option(names = {"-c", "--count"}, - description = "Maximum number of containers to list", + description = "Maximum number of containers to list.", defaultValue = "20", showDefaultValue = Visibility.ALWAYS) private int count; + @Option(names = {"-a", "--all"}, + description = "List all containers.", + defaultValue = "false") + private boolean all; + @Option(names = {"--state"}, description = "Container state(OPEN, CLOSING, QUASI_CLOSED, CLOSED, " + "DELETING, DELETED)") @@ -75,6 +82,9 @@ public class ListSubcommand extends ScmSubcommand { private static final ObjectWriter WRITER; + @ParentCommand + private ContainerCommands parent; + static { ObjectMapper mapper = new ObjectMapper() .registerModule(new JavaTimeModule()) @@ -105,12 +115,49 @@ public void execute(ScmClient scmClient) throws IOException { ReplicationType.fromProto(type), replication, new OzoneConfiguration()); } - List containerList = - scmClient.listContainer(startId, count, state, type, repConfig); - // Output data list - for (ContainerInfo container : containerList) { - outputContainerInfo(container); + int maxCountAllowed = parent.getParent().getOzoneConf() + .getInt(ScmConfigKeys.OZONE_SCM_CONTAINER_LIST_MAX_COUNT, + ScmConfigKeys.OZONE_SCM_CONTAINER_LIST_MAX_COUNT_DEFAULT); + + ContainerListResult containerListAndTotalCount; + + if (!all) { + if (count > maxCountAllowed) { + System.err.printf("Attempting to list the first %d records of containers." + + " However it exceeds the cluster's current limit of %d. The results will be capped at the" + + " maximum allowed count.%n", count, ScmConfigKeys.OZONE_SCM_CONTAINER_LIST_MAX_COUNT_DEFAULT); + count = maxCountAllowed; + } + containerListAndTotalCount = scmClient.listContainer(startId, count, state, type, repConfig); + for (ContainerInfo container : containerListAndTotalCount.getContainerInfoList()) { + outputContainerInfo(container); + } + + if (containerListAndTotalCount.getTotalCount() > count) { + System.err.printf("Displaying %d out of %d containers. " + + "Container list has more containers.%n", + count, containerListAndTotalCount.getTotalCount()); + } + } else { + // Batch size is either count passed through cli or maxCountAllowed + int batchSize = (count > 0) ? count : maxCountAllowed; + long currentStartId = startId; + int fetchedCount; + + do { + // Fetch containers in batches of 'batchSize' + containerListAndTotalCount = scmClient.listContainer(currentStartId, batchSize, state, type, repConfig); + fetchedCount = containerListAndTotalCount.getContainerInfoList().size(); + + for (ContainerInfo container : containerListAndTotalCount.getContainerInfoList()) { + outputContainerInfo(container); + } + + if (fetchedCount > 0) { + currentStartId = containerListAndTotalCount.getContainerInfoList().get(fetchedCount - 1).getContainerID() + 1; + } + } while (fetchedCount > 0); } } } diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/pipeline/ClosePipelineSubcommand.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/pipeline/ClosePipelineSubcommand.java index 7c70456995b4..e5392ef618d8 100644 --- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/pipeline/ClosePipelineSubcommand.java +++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/pipeline/ClosePipelineSubcommand.java @@ -59,12 +59,22 @@ public void execute(ScmClient scmClient) throws IOException { List pipelineList = new ArrayList<>(); Predicate predicate = replicationFilter.orElse(null); - for (Pipeline pipeline : scmClient.listPipelines()) { - boolean filterPassed = (predicate != null) && predicate.test(pipeline); - if (pipeline.getPipelineState() != Pipeline.PipelineState.CLOSED && filterPassed) { - pipelineList.add(pipeline); + List pipelines = scmClient.listPipelines(); + if (predicate == null) { + for (Pipeline pipeline : pipelines) { + if (pipeline.getPipelineState() != Pipeline.PipelineState.CLOSED) { + pipelineList.add(pipeline); + } + } + } else { + for (Pipeline pipeline : pipelines) { + boolean filterPassed = predicate.test(pipeline); + if (pipeline.getPipelineState() != Pipeline.PipelineState.CLOSED && filterPassed) { + pipelineList.add(pipeline); + } } } + System.out.println("Sending close command for " + pipelineList.size() + " pipelines..."); pipelineList.forEach(pipeline -> { try { diff --git a/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/pipeline/TestClosePipelinesSubCommand.java b/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/pipeline/TestClosePipelinesSubCommand.java new file mode 100644 index 000000000000..013350fe871d --- /dev/null +++ b/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/pipeline/TestClosePipelinesSubCommand.java @@ -0,0 +1,178 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdds.scm.cli.pipeline; + +import org.apache.hadoop.hdds.client.ECReplicationConfig; +import org.apache.hadoop.hdds.client.RatisReplicationConfig; +import org.apache.hadoop.hdds.client.ReplicationConfig; +import org.apache.hadoop.hdds.client.StandaloneReplicationConfig; +import org.apache.hadoop.hdds.protocol.DatanodeDetails; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos; +import org.apache.hadoop.hdds.scm.client.ScmClient; +import org.apache.hadoop.hdds.scm.pipeline.Pipeline; +import org.apache.hadoop.hdds.scm.pipeline.PipelineID; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.Arguments; +import org.junit.jupiter.params.provider.MethodSource; +import picocli.CommandLine; + +import java.io.ByteArrayOutputStream; +import java.io.IOException; +import java.io.PrintStream; +import java.nio.charset.StandardCharsets; +import java.util.ArrayList; +import java.util.List; +import java.util.UUID; +import java.util.stream.Stream; + +import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.ONE; +import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.THREE; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.params.provider.Arguments.arguments; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +/** + * Tests for the ClosePipelineSubcommand class. + */ +class TestClosePipelinesSubCommand { + + private static final String DEFAULT_ENCODING = StandardCharsets.UTF_8.name(); + private final ByteArrayOutputStream outContent = new ByteArrayOutputStream(); + private final ByteArrayOutputStream errContent = new ByteArrayOutputStream(); + private final PrintStream originalOut = System.out; + private final PrintStream originalErr = System.err; + private ClosePipelineSubcommand cmd; + private ScmClient scmClient; + + public static Stream values() { + return Stream.of( + arguments( + new String[]{"--all"}, + "Sending close command for 2 pipelines...\n", + "with empty parameters" + ), + arguments( + new String[]{"--all", "-ffc", "THREE"}, + "Sending close command for 1 pipelines...\n", + "by filter factor, opened" + ), + arguments( + new String[]{"--all", "-ffc", "ONE"}, + "Sending close command for 0 pipelines...\n", + "by filter factor, closed" + ), + arguments( + new String[]{"--all", "-r", "rs-3-2-1024k", "-t", "EC"}, + "Sending close command for 1 pipelines...\n", + "by replication and type, opened" + ), + arguments( + new String[]{"--all", "-r", "rs-6-3-1024k", "-t", "EC"}, + "Sending close command for 0 pipelines...\n", + "by replication and type, closed" + ), + arguments( + new String[]{"--all", "-t", "EC"}, + "Sending close command for 1 pipelines...\n", + "by type, opened" + ), + arguments( + new String[]{"--all", "-t", "RS"}, + "Sending close command for 0 pipelines...\n", + "by type, closed" + ) + ); + } + + @BeforeEach + public void setup() throws IOException { + cmd = new ClosePipelineSubcommand(); + System.setOut(new PrintStream(outContent, false, DEFAULT_ENCODING)); + System.setErr(new PrintStream(errContent, false, DEFAULT_ENCODING)); + + scmClient = mock(ScmClient.class); + when(scmClient.listPipelines()).thenAnswer(invocation -> createPipelines()); + } + + @AfterEach + public void tearDown() { + System.setOut(originalOut); + System.setErr(originalErr); + } + + @ParameterizedTest(name = "{index}. {2}") + @MethodSource("values") + void testCloseAllPipelines(String[] commands, String expectedOutput, String testName) throws IOException { + CommandLine c = new CommandLine(cmd); + c.parseArgs(commands); + cmd.execute(scmClient); + assertEquals(expectedOutput, outContent.toString(DEFAULT_ENCODING)); + } + + private List createPipelines() { + List pipelines = new ArrayList<>(); + pipelines.add(createPipeline(StandaloneReplicationConfig.getInstance(ONE), + Pipeline.PipelineState.CLOSED)); + pipelines.add(createPipeline(RatisReplicationConfig.getInstance(THREE), + Pipeline.PipelineState.OPEN)); + pipelines.add(createPipeline(RatisReplicationConfig.getInstance(THREE), + Pipeline.PipelineState.CLOSED)); + + pipelines.add(createPipeline( + new ECReplicationConfig(3, 2), Pipeline.PipelineState.OPEN)); + pipelines.add(createPipeline( + new ECReplicationConfig(3, 2), Pipeline.PipelineState.CLOSED)); + pipelines.add(createPipeline( + new ECReplicationConfig(6, 3), Pipeline.PipelineState.CLOSED)); + pipelines.add(createPipeline( + RatisReplicationConfig.getInstance(THREE), Pipeline.PipelineState.CLOSED)); + return pipelines; + } + + private Pipeline createPipeline(ReplicationConfig repConfig, + Pipeline.PipelineState state) { + return new Pipeline.Builder() + .setId(PipelineID.randomId()) + .setCreateTimestamp(System.currentTimeMillis()) + .setState(state) + .setReplicationConfig(repConfig) + .setNodes(createDatanodeDetails(1)) + .build(); + } + + private List createDatanodeDetails(int count) { + List dns = new ArrayList<>(); + for (int i = 0; i < count; i++) { + HddsProtos.DatanodeDetailsProto dnd = + HddsProtos.DatanodeDetailsProto.newBuilder() + .setHostName("host" + i) + .setIpAddress("1.2.3." + i + 1) + .setNetworkLocation("/default") + .setNetworkName("host" + i) + .addPorts(HddsProtos.Port.newBuilder() + .setName("ratis").setValue(5678).build()) + .setUuid(UUID.randomUUID().toString()) + .build(); + dns.add(DatanodeDetails.getFromProtoBuf(dnd)); + } + return dns; + } +} diff --git a/hadoop-ozone/client/pom.xml b/hadoop-ozone/client/pom.xml index 545faba51ac1..6b5a1ac0c8b7 100644 --- a/hadoop-ozone/client/pom.xml +++ b/hadoop-ozone/client/pom.xml @@ -20,10 +20,10 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> org.apache.ozone ozone - 1.5.0-SNAPSHOT + 2.0.0-SNAPSHOT ozone-client - 1.5.0-SNAPSHOT + 2.0.0-SNAPSHOT Apache Ozone Client Apache Ozone Client jar diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/ObjectStore.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/ObjectStore.java index 65dce09cba16..56ca8798f223 100644 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/ObjectStore.java +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/ObjectStore.java @@ -217,7 +217,7 @@ public S3SecretValue getS3Secret(String kerberosID, boolean createIfNotExist) * Set secretKey for accessId. * @param accessId * @param secretKey - * @return S3SecretValue pair + * @return {@code S3SecretValue } pair * @throws IOException */ public S3SecretValue setS3Secret(String accessId, String secretKey) diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneClient.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneClient.java index 3a63a5934692..8bd648545d46 100644 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneClient.java +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneClient.java @@ -26,6 +26,7 @@ import java.io.IOException; import com.google.common.annotations.VisibleForTesting; +import org.apache.ratis.util.UncheckedAutoCloseable; /** * OzoneClient connects to Ozone Cluster and @@ -76,6 +77,7 @@ public class OzoneClient implements Closeable { private final ClientProtocol proxy; private final ObjectStore objectStore; private ConfigurationSource conf; + private final UncheckedAutoCloseable leakTracker = OzoneClientFactory.track(this); /** * Creates a new OzoneClient object, generally constructed @@ -119,7 +121,11 @@ public ConfigurationSource getConfiguration() { */ @Override public void close() throws IOException { - proxy.close(); + try { + proxy.close(); + } finally { + leakTracker.close(); + } } /** diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneClientFactory.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneClientFactory.java index 44239aafcebb..1c673618d075 100644 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneClientFactory.java +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneClientFactory.java @@ -23,9 +23,11 @@ import java.io.IOException; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hdds.HddsUtils; import org.apache.hadoop.hdds.conf.ConfigurationSource; import org.apache.hadoop.hdds.conf.MutableConfigurationSource; import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.utils.LeakDetector; import org.apache.hadoop.ipc.RemoteException; import org.apache.hadoop.ozone.OmUtils; import org.apache.hadoop.ozone.OzoneConsts; @@ -34,13 +36,17 @@ import org.apache.hadoop.ozone.security.OzoneTokenIdentifier; import org.apache.hadoop.security.token.Token; -import com.google.common.base.Preconditions; import org.apache.commons.lang3.StringUtils; -import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_ADDRESS_KEY; -import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_SERVICE_IDS_KEY; +import org.apache.ratis.util.UncheckedAutoCloseable; + +import com.google.common.base.Preconditions; + import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_ADDRESS_KEY; +import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_SERVICE_IDS_KEY; + /** * Factory class to create OzoneClients. */ @@ -54,6 +60,21 @@ public final class OzoneClientFactory { */ private OzoneClientFactory() { } + private static final LeakDetector OZONE_CLIENT_LEAK_DETECTOR = + new LeakDetector("OzoneClientObject"); + + public static UncheckedAutoCloseable track(AutoCloseable object) { + final Class clazz = object.getClass(); + final StackTraceElement[] stackTrace = HddsUtils.getStackTrace(LOG); + return OZONE_CLIENT_LEAK_DETECTOR.track(object, + () -> HddsUtils.reportLeak(clazz, + HddsUtils.formatStackTrace(stackTrace, 4), LOG)); + } + + public static Logger getLogger() { + return LOG; + } + /** * Constructs and return an OzoneClient with default configuration. @@ -170,7 +191,7 @@ private static OzoneClient getRpcClient(ClientProtocol clientProtocol, * Create OzoneClient for token renew/cancel operations. * @param conf Configuration to be used for OzoneCient creation * @param token ozone token is involved - * @return + * @return OzoneClient * @throws IOException */ public static OzoneClient getOzoneClient(Configuration conf, diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/checksum/BaseFileChecksumHelper.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/checksum/BaseFileChecksumHelper.java index 2d83d88ed5e8..76baefd71dd6 100644 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/checksum/BaseFileChecksumHelper.java +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/checksum/BaseFileChecksumHelper.java @@ -20,6 +20,8 @@ import org.apache.hadoop.fs.FileChecksum; import org.apache.hadoop.fs.MD5MD5CRC32CastagnoliFileChecksum; import org.apache.hadoop.fs.MD5MD5CRC32GzipFileChecksum; +import org.apache.hadoop.fs.PathIOException; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.scm.OzoneClientConfig; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; import org.apache.hadoop.hdds.scm.XceiverClientFactory; @@ -38,6 +40,7 @@ import org.slf4j.LoggerFactory; import java.io.IOException; +import java.nio.ByteBuffer; import java.util.List; /** @@ -150,6 +153,90 @@ protected void setChecksumType(ContainerProtos.ChecksumType type) { checksumType = type; } + protected abstract AbstractBlockChecksumComputer getBlockChecksumComputer(List chunkInfos, + long blockLength); + + protected abstract String populateBlockChecksumBuf(ByteBuffer blockChecksumByteBuffer) throws IOException; + + protected abstract List getChunkInfos( + OmKeyLocationInfo keyLocationInfo) throws IOException; + + protected ByteBuffer getBlockChecksumFromChunkChecksums(AbstractBlockChecksumComputer blockChecksumComputer) + throws IOException { + blockChecksumComputer.compute(getCombineMode()); + return blockChecksumComputer.getOutByteBuffer(); + } + + /** + * Compute block checksums block by block and append the raw bytes of the + * block checksums into getBlockChecksumBuf(). + * + * @throws IOException + */ + protected void checksumBlocks() throws IOException { + long currentLength = 0; + for (int blockIdx = 0; + blockIdx < getKeyLocationInfoList().size() && getRemaining() >= 0; + blockIdx++) { + OmKeyLocationInfo keyLocationInfo = + getKeyLocationInfoList().get(blockIdx); + if (currentLength > getLength()) { + return; + } + + if (!checksumBlock(keyLocationInfo)) { + throw new PathIOException(getSrc(), + "Fail to get block checksum for " + keyLocationInfo + + ", checksum combine mode: " + getCombineMode()); + } + + currentLength += keyLocationInfo.getLength(); + } + } + + /** + * Return true when sounds good to continue or retry, false when severe + * condition or totally failed. + */ + protected boolean checksumBlock(OmKeyLocationInfo keyLocationInfo) + throws IOException { + // for each block, send request + List chunkInfos = + getChunkInfos(keyLocationInfo); + if (chunkInfos.isEmpty()) { + return false; + } + + long blockNumBytes = keyLocationInfo.getLength(); + + if (getRemaining() < blockNumBytes) { + blockNumBytes = getRemaining(); + } + setRemaining(getRemaining() - blockNumBytes); + + ContainerProtos.ChecksumData checksumData = + chunkInfos.get(0).getChecksumData(); + setChecksumType(checksumData.getType()); + int bytesPerChecksum = checksumData.getBytesPerChecksum(); + setBytesPerCRC(bytesPerChecksum); + + AbstractBlockChecksumComputer blockChecksumComputer = getBlockChecksumComputer(chunkInfos, + keyLocationInfo.getLength()); + ByteBuffer blockChecksumByteBuffer = + getBlockChecksumFromChunkChecksums(blockChecksumComputer); + String blockChecksumForDebug = + populateBlockChecksumBuf(blockChecksumByteBuffer); + + LOG.debug("Got reply from {} {} for block {}: blockChecksum={}, " + + "blockChecksumType={}", + keyInfo.getReplicationConfig().getReplicationType() == HddsProtos.ReplicationType.EC + ? "EC pipeline" : "pipeline", + keyLocationInfo.getPipeline(), keyLocationInfo.getBlockID(), + blockChecksumForDebug, checksumData.getType()); + + return true; + } + /** * Request the blocks created in the most recent version from Ozone Manager. * @@ -219,14 +306,6 @@ public void compute() throws IOException { } } - /** - * Compute block checksums block by block and append the raw bytes of the - * block checksums into getBlockChecksumBuf(). - * - * @throws IOException - */ - protected abstract void checksumBlocks() throws IOException; - /** * Make final file checksum result given the per-block or per-block-group * checksums collected into getBlockChecksumBuf(). diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/checksum/ECBlockChecksumComputer.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/checksum/ECBlockChecksumComputer.java index b2c30ed9e08f..a4c24768cddc 100644 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/checksum/ECBlockChecksumComputer.java +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/checksum/ECBlockChecksumComputer.java @@ -45,12 +45,14 @@ public class ECBlockChecksumComputer extends AbstractBlockChecksumComputer { private final List chunkInfoList; private final OmKeyInfo keyInfo; + private final long blockLength; public ECBlockChecksumComputer( - List chunkInfoList, OmKeyInfo keyInfo) { + List chunkInfoList, OmKeyInfo keyInfo, long blockLength) { this.chunkInfoList = chunkInfoList; this.keyInfo = keyInfo; + this.blockLength = blockLength; } @Override @@ -72,15 +74,13 @@ public void compute(OzoneClientConfig.ChecksumCombineMode combineMode) private void computeMd5Crc() { Preconditions.checkArgument(chunkInfoList.size() > 0); - final ContainerProtos.ChunkInfo firstChunkInfo = chunkInfoList.get(0); - long chunkSize = firstChunkInfo.getLen(); - long bytesPerCrc = firstChunkInfo.getChecksumData().getBytesPerChecksum(); - // Total parity checksum bytes per stripe to remove - int parityBytes = getParityBytes(chunkSize, bytesPerCrc); - final MessageDigest digester = MD5Hash.getDigester(); for (ContainerProtos.ChunkInfo chunkInfo : chunkInfoList) { + long chunkSize = chunkInfo.getLen(); + long bytesPerCrc = chunkInfo.getChecksumData().getBytesPerChecksum(); + // Total parity checksum bytes per stripe to remove + int parityBytes = getParityBytes(chunkSize, bytesPerCrc); ByteString stripeChecksum = chunkInfo.getStripeChecksum(); Preconditions.checkNotNull(stripeChecksum); @@ -121,66 +121,40 @@ private void computeCompositeCrc() throws IOException { // Bytes required to create a CRC long bytesPerCrc = firstChunkInfo.getChecksumData().getBytesPerChecksum(); - long chunkSize = firstChunkInfo.getLen(); - - //When EC chunk size is not a multiple of ozone.client.bytes.per.checksum - // (default = 16KB) the last checksum in an EC chunk is only generated for - // offset. - long bytesPerCrcOffset = chunkSize % bytesPerCrc; - - long keySize = keyInfo.getDataSize(); - // Total parity checksum bytes per stripe to remove - int parityBytes = getParityBytes(chunkSize, bytesPerCrc); - - // Number of checksum per chunk, Eg: 2MB EC chunk will - // have 2 checksum per chunk. - int numChecksumPerChunk = (int) - (Math.ceil((double) chunkSize / bytesPerCrc)); + long blockSize = blockLength; CrcComposer blockCrcComposer = CrcComposer.newCrcComposer(dataChecksumType, bytesPerCrc); for (ContainerProtos.ChunkInfo chunkInfo : chunkInfoList) { ByteString stripeChecksum = chunkInfo.getStripeChecksum(); + long chunkSize = chunkInfo.getLen(); + + // Total parity checksum bytes per stripe to remove + int parityBytes = getParityBytes(chunkSize, bytesPerCrc); Preconditions.checkNotNull(stripeChecksum); final int checksumSize = stripeChecksum.size(); Preconditions.checkArgument(checksumSize % 4 == 0, "Checksum Bytes size does not match"); - CrcComposer chunkCrcComposer = - CrcComposer.newCrcComposer(dataChecksumType, bytesPerCrc); // Limit parity bytes as they do not contribute to fileChecksum final ByteBuffer byteWrap = stripeChecksum.asReadOnlyByteBuffer(); byteWrap.limit(checksumSize - parityBytes); - long chunkOffsetIndex = 1; while (byteWrap.hasRemaining()) { - - /* - When chunk size is not a multiple of bytes.per.crc we get an offset. - For eg, RS-3-2-1524k is not a multiple of 1MB. So two checksums are - generated 1st checksum for 1024k bytes and 2nd checksum for 500k bytes. - When we reach the 2nd Checksum we need to modify the bytesPerCrc as in - this case 500k is the bytes for which the checksum is generated. - */ - long currentChunkOffset = Long.MAX_VALUE; - if ((chunkOffsetIndex % numChecksumPerChunk == 0) - && (bytesPerCrcOffset > 0)) { - currentChunkOffset = bytesPerCrcOffset; + // Here Math.min in mainly required for last stripe's last chunk. The last chunk of the last stripe can be + // less than the chunkSize, chunkSize is only calculated from each stripe's first chunk. This would be fine + // for rest of the stripe because all the chunks are of the same size. But for the last stripe we don't know + // the exact size of the last chunk. So we calculate it with the of blockSize. If the block size is smaller + // than the chunk size, then we know it is the last stripe' last chunk. + long remainingChunkSize = Math.min(blockSize, chunkSize); + while (byteWrap.hasRemaining() && remainingChunkSize > 0) { + final int checksumData = byteWrap.getInt(); + blockCrcComposer.update(checksumData, Math.min(bytesPerCrc, remainingChunkSize)); + remainingChunkSize -= bytesPerCrc; } - - final int checksumDataCrc = byteWrap.getInt(); - //To handle last chunk when it size is lower than 1524K in the case - // of rs-3-2-1524k. - long chunkSizePerChecksum = Math.min(Math.min(keySize, bytesPerCrc), - currentChunkOffset); - chunkCrcComposer.update(checksumDataCrc, chunkSizePerChecksum); - - int chunkChecksumCrc = CrcUtil.readInt(chunkCrcComposer.digest(), 0); - blockCrcComposer.update(chunkChecksumCrc, chunkSizePerChecksum); - keySize -= Math.min(bytesPerCrc, currentChunkOffset); - ++chunkOffsetIndex; + blockSize -= chunkSize; } } diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/checksum/ECFileChecksumHelper.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/checksum/ECFileChecksumHelper.java index 13ba57169878..db36b9837ad4 100644 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/checksum/ECFileChecksumHelper.java +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/checksum/ECFileChecksumHelper.java @@ -17,7 +17,6 @@ */ package org.apache.hadoop.ozone.client.checksum; -import org.apache.hadoop.fs.PathIOException; import org.apache.hadoop.hdds.client.BlockID; import org.apache.hadoop.hdds.client.ECReplicationConfig; import org.apache.hadoop.hdds.client.StandaloneReplicationConfig; @@ -46,7 +45,6 @@ * The helper class to compute file checksum for EC files. */ public class ECFileChecksumHelper extends BaseFileChecksumHelper { - private int blockIdx; public ECFileChecksumHelper(OzoneVolume volume, OzoneBucket bucket, String keyName, long length, OzoneClientConfig.ChecksumCombineMode @@ -57,63 +55,13 @@ public ECFileChecksumHelper(OzoneVolume volume, OzoneBucket bucket, } @Override - protected void checksumBlocks() throws IOException { - long currentLength = 0; - for (blockIdx = 0; - blockIdx < getKeyLocationInfoList().size() && getRemaining() >= 0; - blockIdx++) { - OmKeyLocationInfo keyLocationInfo = - getKeyLocationInfoList().get(blockIdx); - - if (currentLength > getLength()) { - return; - } - - if (!checksumBlock(keyLocationInfo)) { - throw new PathIOException(getSrc(), - "Fail to get block checksum for " + keyLocationInfo - + ", checksum combine mode: " + getCombineMode()); - } - - currentLength += keyLocationInfo.getLength(); - } - } - - private boolean checksumBlock(OmKeyLocationInfo keyLocationInfo) - throws IOException { - // for each block, send request - List chunkInfos = - getChunkInfos(keyLocationInfo); - if (chunkInfos.size() == 0) { - return false; - } - - long blockNumBytes = keyLocationInfo.getLength(); - - if (getRemaining() < blockNumBytes) { - blockNumBytes = getRemaining(); - } - setRemaining(getRemaining() - blockNumBytes); - - ContainerProtos.ChecksumData checksumData = - chunkInfos.get(0).getChecksumData(); - setChecksumType(checksumData.getType()); - int bytesPerChecksum = checksumData.getBytesPerChecksum(); - setBytesPerCRC(bytesPerChecksum); - - ByteBuffer blockChecksumByteBuffer = - getBlockChecksumFromChunkChecksums(chunkInfos); - String blockChecksumForDebug = - populateBlockChecksumBuf(blockChecksumByteBuffer); - - LOG.debug("Got reply from EC pipeline {} for block {}: blockChecksum={}, " + - "blockChecksumType={}", - keyLocationInfo.getPipeline(), keyLocationInfo.getBlockID(), - blockChecksumForDebug, checksumData.getType()); - return true; + protected AbstractBlockChecksumComputer getBlockChecksumComputer(List chunkInfos, + long blockLength) { + return new ECBlockChecksumComputer(chunkInfos, getKeyInfo(), blockLength); } - private String populateBlockChecksumBuf( + @Override + protected String populateBlockChecksumBuf( ByteBuffer blockChecksumByteBuffer) throws IOException { String blockChecksumForDebug = null; switch (getCombineMode()) { @@ -139,18 +87,9 @@ private String populateBlockChecksumBuf( return blockChecksumForDebug; } - private ByteBuffer getBlockChecksumFromChunkChecksums( - List chunkInfos) throws IOException { - - AbstractBlockChecksumComputer blockChecksumComputer = - new ECBlockChecksumComputer(chunkInfos, getKeyInfo()); - blockChecksumComputer.compute(getCombineMode()); - - return blockChecksumComputer.getOutByteBuffer(); - } - - private List getChunkInfos(OmKeyLocationInfo - keyLocationInfo) throws IOException { + @Override + protected List getChunkInfos(OmKeyLocationInfo + keyLocationInfo) throws IOException { // To read an EC block, we create a STANDALONE pipeline that contains the // single location for the block index we want to read. The EC blocks are // indexed from 1 to N, however the data locations are stored in the diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/checksum/ReplicatedFileChecksumHelper.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/checksum/ReplicatedFileChecksumHelper.java index 016121ce1a9b..9c2df0fdb47f 100644 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/checksum/ReplicatedFileChecksumHelper.java +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/checksum/ReplicatedFileChecksumHelper.java @@ -17,7 +17,6 @@ */ package org.apache.hadoop.ozone.client.checksum; -import org.apache.hadoop.fs.PathIOException; import org.apache.hadoop.hdds.client.BlockID; import org.apache.hadoop.hdds.client.ReplicationConfig; import org.apache.hadoop.hdds.client.StandaloneReplicationConfig; @@ -44,7 +43,6 @@ * The helper class to compute file checksum for replicated files. */ public class ReplicatedFileChecksumHelper extends BaseFileChecksumHelper { - private int blockIdx; public ReplicatedFileChecksumHelper( OzoneVolume volume, OzoneBucket bucket, String keyName, long length, @@ -61,65 +59,10 @@ public ReplicatedFileChecksumHelper(OzoneVolume volume, OzoneBucket bucket, keyInfo); } - @Override - protected void checksumBlocks() throws IOException { - long currentLength = 0; - for (blockIdx = 0; - blockIdx < getKeyLocationInfoList().size() && getRemaining() >= 0; - blockIdx++) { - OmKeyLocationInfo keyLocationInfo = - getKeyLocationInfoList().get(blockIdx); - if (currentLength > getLength()) { - return; - } - - if (!checksumBlock(keyLocationInfo)) { - throw new PathIOException(getSrc(), - "Fail to get block checksum for " + keyLocationInfo - + ", checksum combine mode: " + getCombineMode()); - } - - currentLength += keyLocationInfo.getLength(); - } - } - - /** - * Return true when sounds good to continue or retry, false when severe - * condition or totally failed. - */ - private boolean checksumBlock(OmKeyLocationInfo keyLocationInfo) - throws IOException { - // for each block, send request - List chunkInfos = - getChunkInfos(keyLocationInfo); - if (chunkInfos.size() == 0) { - return false; - } - - long blockNumBytes = keyLocationInfo.getLength(); - - if (getRemaining() < blockNumBytes) { - blockNumBytes = getRemaining(); - } - setRemaining(getRemaining() - blockNumBytes); - - ContainerProtos.ChecksumData checksumData = - chunkInfos.get(0).getChecksumData(); - setChecksumType(checksumData.getType()); - int bytesPerChecksum = checksumData.getBytesPerChecksum(); - setBytesPerCRC(bytesPerChecksum); - - ByteBuffer blockChecksumByteBuffer = getBlockChecksumFromChunkChecksums( - keyLocationInfo, chunkInfos); - String blockChecksumForDebug = - populateBlockChecksumBuf(blockChecksumByteBuffer); - - LOG.debug("got reply from pipeline {} for block {}: blockChecksum={}, " + - "blockChecksumType={}", - keyLocationInfo.getPipeline(), keyLocationInfo.getBlockID(), - blockChecksumForDebug, checksumData.getType()); - return true; + protected AbstractBlockChecksumComputer getBlockChecksumComputer(List chunkInfos, + long blockLength) { + return new ReplicatedBlockChecksumComputer(chunkInfos); } // copied from BlockInputStream @@ -127,6 +70,7 @@ private boolean checksumBlock(OmKeyLocationInfo keyLocationInfo) * Send RPC call to get the block info from the container. * @return List of chunks in this block. */ + @Override protected List getChunkInfos( OmKeyLocationInfo keyLocationInfo) throws IOException { // irrespective of the container state, we will always read via Standalone @@ -164,18 +108,6 @@ protected List getChunkInfos( return chunks; } - // TODO: copy BlockChecksumHelper here - ByteBuffer getBlockChecksumFromChunkChecksums( - OmKeyLocationInfo keyLocationInfo, - List chunkInfoList) - throws IOException { - AbstractBlockChecksumComputer blockChecksumComputer = - new ReplicatedBlockChecksumComputer(chunkInfoList); - blockChecksumComputer.compute(getCombineMode()); - - return blockChecksumComputer.getOutByteBuffer(); - } - /** * Parses out the raw blockChecksum bytes from {@code checksumData} byte * buffer according to the blockChecksumType and populates the cumulative @@ -184,7 +116,8 @@ ByteBuffer getBlockChecksumFromChunkChecksums( * @return a debug-string representation of the parsed checksum if * debug is enabled, otherwise null. */ - String populateBlockChecksumBuf(ByteBuffer checksumData) + @Override + protected String populateBlockChecksumBuf(ByteBuffer checksumData) throws IOException { String blockChecksumForDebug = null; switch (getCombineMode()) { diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java index 9dc11637f3cd..fe9866401765 100644 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java @@ -2590,17 +2590,27 @@ public OzoneFsServerDefaults getServerDefaults() throws IOException { long now = Time.monotonicNow(); if ((serverDefaults == null) || (now - serverDefaultsLastUpdate > serverDefaultsValidityPeriod)) { - serverDefaults = ozoneManagerClient.getServerDefaults(); - serverDefaultsLastUpdate = now; + try { + for (ServiceInfo si : ozoneManagerClient.getServiceInfo() + .getServiceInfoList()) { + if (si.getServerDefaults() != null) { + serverDefaults = si.getServerDefaults(); + serverDefaultsLastUpdate = now; + break; + } + } + } catch (Exception e) { + LOG.warn("Could not get server defaults from OM.", e); + } } - assert serverDefaults != null; return serverDefaults; } @Override public URI getKeyProviderUri() throws IOException { - return OzoneKMSUtil.getKeyProviderUri(ugi, - null, getServerDefaults().getKeyProviderUri(), conf); + String keyProviderUri = (getServerDefaults() != null) ? + serverDefaults.getKeyProviderUri() : null; + return OzoneKMSUtil.getKeyProviderUri(ugi, null, keyProviderUri, conf); } @Override diff --git a/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/TestOzoneECClient.java b/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/TestOzoneECClient.java index 25a3ad2d9c89..1b67f024bbe7 100644 --- a/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/TestOzoneECClient.java +++ b/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/TestOzoneECClient.java @@ -153,7 +153,7 @@ public void testPutECKeyAndCheckDNStoredData() throws IOException { Map storages = factoryStub.getStorages(); DatanodeDetails[] dnDetails = - storages.keySet().toArray(new DatanodeDetails[storages.size()]); + storages.keySet().toArray(new DatanodeDetails[0]); Arrays.sort(dnDetails); for (int i = 0; i < inputChunks.length; i++) { MockDatanodeStorage datanodeStorage = storages.get(dnDetails[i]); @@ -182,7 +182,7 @@ public void testPutECKeyAndCheckParityData() throws IOException { Map storages = factoryStub.getStorages(); DatanodeDetails[] dnDetails = - storages.keySet().toArray(new DatanodeDetails[storages.size()]); + storages.keySet().toArray(new DatanodeDetails[0]); Arrays.sort(dnDetails); for (int i = dataBlocks; i < parityBlocks + dataBlocks; i++) { diff --git a/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/checksum/TestReplicatedFileChecksumHelper.java b/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/checksum/TestFileChecksumHelper.java similarity index 74% rename from hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/checksum/TestReplicatedFileChecksumHelper.java rename to hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/checksum/TestFileChecksumHelper.java index 702a450ee75e..83feb378c568 100644 --- a/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/checksum/TestReplicatedFileChecksumHelper.java +++ b/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/checksum/TestFileChecksumHelper.java @@ -21,7 +21,9 @@ import org.apache.hadoop.fs.MD5MD5CRC32FileChecksum; import org.apache.hadoop.fs.MD5MD5CRC32GzipFileChecksum; import org.apache.hadoop.hdds.client.BlockID; +import org.apache.hadoop.hdds.client.ECReplicationConfig; import org.apache.hadoop.hdds.client.RatisReplicationConfig; +import org.apache.hadoop.hdds.client.ReplicationConfig; import org.apache.hadoop.hdds.client.ReplicationType; import org.apache.hadoop.hdds.conf.ConfigurationSource; import org.apache.hadoop.hdds.conf.InMemoryConfiguration; @@ -56,10 +58,11 @@ import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.EnumSource; import java.io.IOException; import java.util.ArrayList; -import java.util.Arrays; import java.util.Collections; import java.util.HashMap; import java.util.List; @@ -77,9 +80,10 @@ import static org.mockito.Mockito.mock; /** - * Unit tests for ReplicatedFileChecksumHelper class. + * Unit tests for Replicated and EC FileChecksumHelper class. */ -public class TestReplicatedFileChecksumHelper { +public class TestFileChecksumHelper { + private final FileChecksum noCachedChecksum = null; private OzoneClient client; private ObjectStore store; private OzoneVolume volume; @@ -119,128 +123,126 @@ public void close() throws IOException { client.close(); } + private OmKeyInfo omKeyInfo(ReplicationType type, FileChecksum cachedChecksum, List locationInfo) { + ReplicationConfig config = type == ReplicationType.EC ? new ECReplicationConfig(6, 3) + : RatisReplicationConfig.getInstance(HddsProtos.ReplicationFactor.ONE); - @Test - public void testEmptyBlock() throws IOException { - // test the file checksum of a file with an empty block. - RpcClient mockRpcClient = mock(RpcClient.class); - - OzoneManagerProtocol om = mock(OzoneManagerProtocol.class); - when(mockRpcClient.getOzoneManagerClient()).thenReturn(om); - - OmKeyInfo omKeyInfo = new OmKeyInfo.Builder() + return new OmKeyInfo.Builder() .setVolumeName(null) .setBucketName(null) .setKeyName(null) .setOmKeyLocationInfos(Collections.singletonList( - new OmKeyLocationInfoGroup(0, new ArrayList<>()))) + new OmKeyLocationInfoGroup(0, locationInfo))) .setCreationTime(Time.now()) .setModificationTime(Time.now()) .setDataSize(0) - .setReplicationConfig(RatisReplicationConfig.getInstance( - HddsProtos.ReplicationFactor.ONE)) + .setReplicationConfig(config) .setFileEncryptionInfo(null) + .setFileChecksum(cachedChecksum) .setAcls(null) .build(); + } - when(om.lookupKey(any())).thenReturn(omKeyInfo); + private BaseFileChecksumHelper checksumHelper(ReplicationType type, OzoneVolume mockVolume, OzoneBucket mockBucket, + int length, OzoneClientConfig.ChecksumCombineMode combineMode, RpcClient mockRpcClient, OmKeyInfo keyInfo) + throws IOException { + return type == ReplicationType.RATIS ? new ReplicatedFileChecksumHelper( + mockVolume, mockBucket, "dummy", length, combineMode, mockRpcClient) + : new ECFileChecksumHelper( + mockVolume, mockBucket, "dummy", length, combineMode, mockRpcClient, keyInfo); + } - OzoneVolume mockVolume = mock(OzoneVolume.class); - when(mockVolume.getName()).thenReturn("vol1"); - OzoneBucket bucket = mock(OzoneBucket.class); - when(bucket.getName()).thenReturn("bucket1"); + private Pipeline pipeline(ReplicationType type, List datanodeDetails) { + ReplicationConfig config = type == ReplicationType.RATIS ? RatisReplicationConfig + .getInstance(HddsProtos.ReplicationFactor.THREE) + : new ECReplicationConfig(6, 3); + + return Pipeline.newBuilder() + .setId(PipelineID.randomId()) + .setReplicationConfig(config) + .setState(Pipeline.PipelineState.CLOSED) + .setNodes(datanodeDetails) + .build(); + } + @ParameterizedTest + @EnumSource(names = {"EC", "RATIS"}) + public void testEmptyBlock(ReplicationType helperType) throws IOException { + // test the file checksum of a file with an empty block. + RpcClient mockRpcClient = mock(RpcClient.class); + OmKeyInfo omKeyInfo = omKeyInfo(helperType, noCachedChecksum, new ArrayList<>()); + OzoneVolume mockVolume = mock(OzoneVolume.class); + OzoneBucket mockBucket = mock(OzoneBucket.class); OzoneClientConfig.ChecksumCombineMode combineMode = OzoneClientConfig.ChecksumCombineMode.MD5MD5CRC; - ReplicatedFileChecksumHelper helper = new ReplicatedFileChecksumHelper( - mockVolume, bucket, "dummy", 10, combineMode, mockRpcClient); + OzoneManagerProtocol om = mock(OzoneManagerProtocol.class); + when(mockRpcClient.getOzoneManagerClient()).thenReturn(om); + when(om.lookupKey(any())).thenReturn(omKeyInfo); + when(mockVolume.getName()).thenReturn("vol1"); + when(mockBucket.getName()).thenReturn("bucket1"); + + + BaseFileChecksumHelper helper = + checksumHelper(helperType, mockVolume, mockBucket, 10, combineMode, mockRpcClient, omKeyInfo); helper.compute(); FileChecksum fileChecksum = helper.getFileChecksum(); assertInstanceOf(MD5MD5CRC32GzipFileChecksum.class, fileChecksum); assertEquals(DataChecksum.Type.CRC32, - ((MD5MD5CRC32GzipFileChecksum)fileChecksum).getCrcType()); + ((MD5MD5CRC32GzipFileChecksum) fileChecksum).getCrcType()); // test negative length - helper = new ReplicatedFileChecksumHelper( - mockVolume, bucket, "dummy", -1, combineMode, mockRpcClient); + helper = + checksumHelper(helperType, mockVolume, mockBucket, -1, combineMode, mockRpcClient, omKeyInfo); helper.compute(); assertNull(helper.getKeyLocationInfoList()); } - @Test - public void testOneBlock() throws IOException { + @ParameterizedTest + @EnumSource(names = {"EC", "RATIS"}) + public void testOneBlock(ReplicationType helperType) throws IOException { // test the file checksum of a file with one block. OzoneConfiguration conf = new OzoneConfiguration(); - RpcClient mockRpcClient = mock(RpcClient.class); - - List dns = Arrays.asList( + List dns = Collections.singletonList( DatanodeDetails.newBuilder().setUuid(UUID.randomUUID()).build()); - Pipeline pipeline; - pipeline = Pipeline.newBuilder() - .setId(PipelineID.randomId()) - .setReplicationConfig( - RatisReplicationConfig - .getInstance(HddsProtos.ReplicationFactor.THREE)) - .setState(Pipeline.PipelineState.CLOSED) - .setNodes(dns) - .build(); - + Pipeline pipeline = pipeline(helperType, dns); + BlockID blockID = new BlockID(1, 1); + OmKeyLocationInfo omKeyLocationInfo = + new OmKeyLocationInfo.Builder() + .setPipeline(pipeline) + .setBlockID(blockID) + .build(); + List omKeyLocationInfoList = + Collections.singletonList(omKeyLocationInfo); + OmKeyInfo omKeyInfo = omKeyInfo(helperType, noCachedChecksum, omKeyLocationInfoList); XceiverClientGrpc xceiverClientGrpc = new XceiverClientGrpc(pipeline, conf) { @Override public XceiverClientReply sendCommandAsync( ContainerProtos.ContainerCommandRequestProto request, DatanodeDetails dn) { - return buildValidResponse(); + return buildValidResponse(helperType); } }; XceiverClientFactory factory = mock(XceiverClientFactory.class); + OzoneManagerProtocol om = mock(OzoneManagerProtocol.class); when(factory.acquireClientForReadData(any())). thenReturn(xceiverClientGrpc); - when(mockRpcClient.getXceiverClientManager()).thenReturn(factory); - - OzoneManagerProtocol om = mock(OzoneManagerProtocol.class); when(mockRpcClient.getOzoneManagerClient()).thenReturn(om); - - BlockID blockID = new BlockID(1, 1); - OmKeyLocationInfo omKeyLocationInfo = - new OmKeyLocationInfo.Builder().setPipeline(pipeline) - .setBlockID(blockID) - .build(); - - List omKeyLocationInfoList = - Arrays.asList(omKeyLocationInfo); - - OmKeyInfo omKeyInfo = new OmKeyInfo.Builder() - .setVolumeName(null) - .setBucketName(null) - .setKeyName(null) - .setOmKeyLocationInfos(Collections.singletonList( - new OmKeyLocationInfoGroup(0, omKeyLocationInfoList))) - .setCreationTime(Time.now()) - .setModificationTime(Time.now()) - .setDataSize(0) - .setReplicationConfig(RatisReplicationConfig.getInstance( - HddsProtos.ReplicationFactor.ONE)) - .setFileEncryptionInfo(null) - .setAcls(null) - .build(); - when(om.lookupKey(any())).thenReturn(omKeyInfo); OzoneVolume mockVolume = mock(OzoneVolume.class); when(mockVolume.getName()).thenReturn("vol1"); - OzoneBucket bucket = mock(OzoneBucket.class); - when(bucket.getName()).thenReturn("bucket1"); + OzoneBucket mockBucket = mock(OzoneBucket.class); + when(mockBucket.getName()).thenReturn("bucket1"); OzoneClientConfig.ChecksumCombineMode combineMode = OzoneClientConfig.ChecksumCombineMode.MD5MD5CRC; - ReplicatedFileChecksumHelper helper = new ReplicatedFileChecksumHelper( - mockVolume, bucket, "dummy", 10, combineMode, mockRpcClient); + BaseFileChecksumHelper helper = checksumHelper( + helperType, mockVolume, mockBucket, 10, combineMode, mockRpcClient, omKeyInfo); helper.compute(); FileChecksum fileChecksum = helper.getFileChecksum(); @@ -249,28 +251,12 @@ public XceiverClientReply sendCommandAsync( FileChecksum cachedChecksum = new MD5MD5CRC32GzipFileChecksum(); /// test cached checksum - OmKeyInfo omKeyInfoWithChecksum = new OmKeyInfo.Builder() - .setVolumeName(null) - .setBucketName(null) - .setKeyName(null) - .setOmKeyLocationInfos(Collections.singletonList( - new OmKeyLocationInfoGroup(0, omKeyLocationInfoList))) - .setCreationTime(Time.now()) - .setModificationTime(Time.now()) - .setDataSize(0) - .setReplicationConfig( - RatisReplicationConfig.getInstance( - HddsProtos.ReplicationFactor.ONE)) - .setFileEncryptionInfo(null) - .setAcls(null) - .setFileChecksum(cachedChecksum) - .build(); + OmKeyInfo omKeyInfoWithChecksum = omKeyInfo(helperType, cachedChecksum, omKeyLocationInfoList); when(om.lookupKey(any())). thenReturn(omKeyInfoWithChecksum); - helper = new ReplicatedFileChecksumHelper( - mockVolume, bucket, "dummy", 10, combineMode, - mockRpcClient); + helper = checksumHelper( + helperType, mockVolume, mockBucket, 10, combineMode, mockRpcClient, omKeyInfo); helper.compute(); fileChecksum = helper.getFileChecksum(); @@ -278,7 +264,7 @@ public XceiverClientReply sendCommandAsync( assertEquals(1, helper.getKeyLocationInfoList().size()); } - private XceiverClientReply buildValidResponse() { + private XceiverClientReply buildValidResponse(ReplicationType type) { // return a GetBlockResponse message of a block and its chunk checksums. ContainerProtos.DatanodeBlockID blockID = ContainerProtos.DatanodeBlockID.newBuilder() @@ -286,7 +272,7 @@ private XceiverClientReply buildValidResponse() { .setLocalID(1) .setBlockCommitSequenceId(1).build(); - byte[] byteArray = new byte[10]; + byte[] byteArray = new byte[12]; ByteString byteString = ByteString.copyFrom(byteArray); ContainerProtos.ChecksumData checksumData = @@ -296,13 +282,17 @@ private XceiverClientReply buildValidResponse() { .addChecksums(byteString) .build(); - ContainerProtos.ChunkInfo chunkInfo = - ContainerProtos.ChunkInfo.newBuilder() + ContainerProtos.ChunkInfo.Builder chunkInfoBuilder = ContainerProtos.ChunkInfo.newBuilder() .setChunkName("dummy_chunk") .setOffset(1) .setLen(10) - .setChecksumData(checksumData) - .build(); + .setChecksumData(checksumData); + + if (type == ReplicationType.EC) { + chunkInfoBuilder.setStripeChecksum(byteString); + } + + ContainerProtos.ChunkInfo chunkInfo = chunkInfoBuilder.build(); ContainerProtos.BlockData blockData = ContainerProtos.BlockData.newBuilder() @@ -337,6 +327,7 @@ private OzoneBucket getOzoneBucket() throws IOException { /** * Write a real key and compute file checksum of it. + * * @throws IOException */ @Test diff --git a/hadoop-ozone/common/pom.xml b/hadoop-ozone/common/pom.xml index bd16a0a5dfe5..f7f60dcd1d13 100644 --- a/hadoop-ozone/common/pom.xml +++ b/hadoop-ozone/common/pom.xml @@ -20,10 +20,10 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> org.apache.ozone ozone - 1.5.0-SNAPSHOT + 2.0.0-SNAPSHOT ozone-common - 1.5.0-SNAPSHOT + 2.0.0-SNAPSHOT Apache Ozone Common Apache Ozone Common jar diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OFSPath.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OFSPath.java index 61ae0879f786..c5985f820933 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OFSPath.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OFSPath.java @@ -214,7 +214,7 @@ public String toString() { } /** - * Get the volume & bucket or mount name (non-key path). + * Get the volume and bucket or mount name (non-key path). * @return String of path excluding key in bucket. */ // Prepend a delimiter at beginning. e.g. /vol1/buc1 diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OmUtils.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OmUtils.java index 8fa8921cc9a9..d6320061253c 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OmUtils.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OmUtils.java @@ -119,7 +119,7 @@ public static InetSocketAddress getOmAddress(ConfigurationSource conf) { * Return list of OM addresses by service ids - when HA is enabled. * * @param conf {@link ConfigurationSource} - * @return {service.id -> [{@link InetSocketAddress}]} + * @return {service.id -> [{@link InetSocketAddress}]} */ public static Map> getOmHAAddressesById( ConfigurationSource conf) { @@ -278,7 +278,6 @@ public static boolean isReadOnly( case SetSafeMode: case PrintCompactionLogDag: case GetSnapshotInfo: - case GetServerDefaults: case GetQuotaRepairStatus: case StartQuotaRepair: return true; @@ -333,6 +332,7 @@ public static boolean isReadOnly( case DeleteSnapshot: case RenameSnapshot: case SnapshotMoveDeletedKeys: + case SnapshotMoveTableKeys: case SnapshotPurge: case RecoverLease: case SetTimes: @@ -707,7 +707,7 @@ public static void verifyKeyNameWithSnapshotReservedWordForDeletion(String keyNa * Look at 'ozone.om.internal.service.id' first. If configured, return that. * If the above is not configured, look at 'ozone.om.service.ids'. * If count(ozone.om.service.ids) == 1, return that id. - * If count(ozone.om.service.ids) > 1 throw exception + * If count(ozone.om.service.ids) > 1 throw exception * If 'ozone.om.service.ids' is not configured, return null. (Non HA) * @param conf configuration * @return OM service ID. diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/client/io/SelectorOutputStream.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/client/io/SelectorOutputStream.java index 8ffa3c45c09e..c7e20fb7e8b2 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/client/io/SelectorOutputStream.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/client/io/SelectorOutputStream.java @@ -27,7 +27,7 @@ /** * An {@link OutputStream} first write data to a buffer up to the capacity. - * Then, select {@link Underlying} by the number of bytes written. + * Then, select {@code Underlying} by the number of bytes written. * When {@link #flush()}, {@link #hflush()}, {@link #hsync()} * or {@link #close()} is invoked, * it will force flushing the buffer and {@link OutputStream} selection. diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/OMConfigKeys.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/OMConfigKeys.java index 46becc9e64b5..a77bc4f53048 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/OMConfigKeys.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/OMConfigKeys.java @@ -403,6 +403,8 @@ private OMConfigKeys() { /** * Configuration properties for Snapshot Directory Service. */ + public static final String OZONE_SNAPSHOT_DEEP_CLEANING_ENABLED = "ozone.snapshot.deep.cleaning.enabled"; + public static final boolean OZONE_SNAPSHOT_DEEP_CLEANING_ENABLED_DEFAULT = false; public static final String OZONE_SNAPSHOT_DIRECTORY_SERVICE_INTERVAL = "ozone.snapshot.directory.service.interval"; public static final String OZONE_SNAPSHOT_DIRECTORY_SERVICE_INTERVAL_DEFAULT diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/S3SecretManager.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/S3SecretManager.java index ae238f1b45ae..db00917daccb 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/S3SecretManager.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/S3SecretManager.java @@ -80,7 +80,7 @@ T doUnderLock(String lockId, S3SecretFunction action) /** * Default implementation of secret check method. * @param kerberosId kerberos principal. - * @return true if exist associated s3 secret for given {@param kerberosId}, + * @return true if exist associated s3 secret for given {@code kerberosId}, * false if not. */ default boolean hasS3Secret(String kerberosId) throws IOException { diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/exceptions/OMLeaderNotReadyException.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/exceptions/OMLeaderNotReadyException.java index 0bfd6922fee0..8c3943d0fabd 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/exceptions/OMLeaderNotReadyException.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/exceptions/OMLeaderNotReadyException.java @@ -24,7 +24,7 @@ * Exception thrown by * {@link org.apache.hadoop.ozone.om.protocolPB.OzoneManagerProtocolPB} when * OM leader is not ready to serve requests. This error is thrown when Raft - * Server returns {@link org.apache.ratis.protocol.LeaderNotReadyException}. + * Server returns {@link org.apache.ratis.protocol.exceptions.LeaderNotReadyException}. */ public class OMLeaderNotReadyException extends IOException { diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmBucketInfo.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmBucketInfo.java index 5a83f6dbba62..42c97211c973 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmBucketInfo.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmBucketInfo.java @@ -48,7 +48,8 @@ public final class OmBucketInfo extends WithObjectID implements Auditable, CopyO private static final Codec CODEC = new DelegatedCodec<>( Proto2Codec.get(BucketInfo.getDefaultInstance()), OmBucketInfo::getFromProtobuf, - OmBucketInfo::getProtobuf); + OmBucketInfo::getProtobuf, + OmBucketInfo.class); public static Codec getCodec() { return CODEC; diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmDBAccessIdInfo.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmDBAccessIdInfo.java index 8ca0054b3474..862cce21fe99 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmDBAccessIdInfo.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmDBAccessIdInfo.java @@ -34,6 +34,7 @@ public final class OmDBAccessIdInfo { Proto2Codec.get(ExtendedUserAccessIdInfo.getDefaultInstance()), OmDBAccessIdInfo::getFromProtobuf, OmDBAccessIdInfo::getProtobuf, + OmDBAccessIdInfo.class, DelegatedCodec.CopyType.SHALLOW); public static Codec getCodec() { diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmDBTenantState.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmDBTenantState.java index bb356eafdd9d..a1d51cab3614 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmDBTenantState.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmDBTenantState.java @@ -34,6 +34,7 @@ public final class OmDBTenantState implements Comparable { Proto2Codec.get(TenantState.getDefaultInstance()), OmDBTenantState::getFromProtobuf, OmDBTenantState::getProtobuf, + OmDBTenantState.class, DelegatedCodec.CopyType.SHALLOW); public static Codec getCodec() { diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmDBUserPrincipalInfo.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmDBUserPrincipalInfo.java index 75b01a04171b..4d93fc54134a 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmDBUserPrincipalInfo.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmDBUserPrincipalInfo.java @@ -33,11 +33,11 @@ * principal. */ public final class OmDBUserPrincipalInfo { - private static final Codec CODEC - = new DelegatedCodec<>( - Proto2Codec.get(TenantUserPrincipalInfo.getDefaultInstance()), - OmDBUserPrincipalInfo::getFromProtobuf, - OmDBUserPrincipalInfo::getProtobuf); + private static final Codec CODEC = new DelegatedCodec<>( + Proto2Codec.get(TenantUserPrincipalInfo.getDefaultInstance()), + OmDBUserPrincipalInfo::getFromProtobuf, + OmDBUserPrincipalInfo::getProtobuf, + OmDBUserPrincipalInfo.class); public static Codec getCodec() { return CODEC; diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmDirectoryInfo.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmDirectoryInfo.java index 55e138dbd105..69ed1b613bd9 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmDirectoryInfo.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmDirectoryInfo.java @@ -40,7 +40,8 @@ public class OmDirectoryInfo extends WithParentObjectId private static final Codec CODEC = new DelegatedCodec<>( Proto2Codec.get(DirectoryInfo.getDefaultInstance()), OmDirectoryInfo::getFromProtobuf, - OmDirectoryInfo::getProtobuf); + OmDirectoryInfo::getProtobuf, + OmDirectoryInfo.class); public static Codec getCodec() { return CODEC; diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyInfo.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyInfo.java index f52a142239b4..5c480860d2b2 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyInfo.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyInfo.java @@ -65,7 +65,8 @@ private static Codec newCodec(boolean ignorePipeline) { return new DelegatedCodec<>( Proto2Codec.get(KeyInfo.getDefaultInstance()), OmKeyInfo::getFromProtobuf, - k -> k.getProtobuf(ignorePipeline, ClientVersion.CURRENT_VERSION)); + k -> k.getProtobuf(ignorePipeline, ClientVersion.CURRENT_VERSION), + OmKeyInfo.class); } public static Codec getCodec(boolean ignorePipeline) { @@ -360,7 +361,6 @@ public synchronized void appendNewBlocks( * @param updateTime if true, updates modification time. * @param keepOldVersions if false, old blocks won't be kept * and the new block versions will always be 0 - * @throws IOException */ public synchronized long addNewVersion( List newLocationList, boolean updateTime, @@ -628,7 +628,7 @@ public OmKeyInfo build() { /** * For network transmit. - * @return + * @return KeyInfo */ public KeyInfo getProtobuf(int clientVersion) { return getProtobuf(false, clientVersion); @@ -660,7 +660,7 @@ public KeyInfo getNetworkProtobuf(String fullKeyName, int clientVersion, /** * * @param ignorePipeline true for persist to DB, false for network transmit. - * @return + * @return KeyInfo */ public KeyInfo getProtobuf(boolean ignorePipeline, int clientVersion) { return getProtobuf(ignorePipeline, null, clientVersion, false); diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmMultipartKeyInfo.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmMultipartKeyInfo.java index 61402ee28e6c..7c1e01d2ae5c 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmMultipartKeyInfo.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmMultipartKeyInfo.java @@ -42,7 +42,8 @@ public final class OmMultipartKeyInfo extends WithObjectID implements CopyObject private static final Codec CODEC = new DelegatedCodec<>( Proto2Codec.get(MultipartKeyInfo.getDefaultInstance()), OmMultipartKeyInfo::getFromProto, - OmMultipartKeyInfo::getProto); + OmMultipartKeyInfo::getProto, + OmMultipartKeyInfo.class); public static Codec getCodec() { return CODEC; diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmVolumeArgs.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmVolumeArgs.java index 499b4878362d..65182a860d9e 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmVolumeArgs.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmVolumeArgs.java @@ -46,7 +46,8 @@ public final class OmVolumeArgs extends WithObjectID private static final Codec CODEC = new DelegatedCodec<>( Proto2Codec.get(VolumeInfo.getDefaultInstance()), OmVolumeArgs::getFromProtobuf, - OmVolumeArgs::getProtobuf); + OmVolumeArgs::getProtobuf, + OmVolumeArgs.class); public static Codec getCodec() { return CODEC; diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OzoneIdentityProvider.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OzoneIdentityProvider.java index 6bab1025b13e..ed3d3ee25c2a 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OzoneIdentityProvider.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OzoneIdentityProvider.java @@ -21,7 +21,6 @@ import org.apache.hadoop.ipc.CallerContext; import org.apache.hadoop.ipc.IdentityProvider; import org.apache.hadoop.ipc.Schedulable; -import org.apache.hadoop.ipc.Server; import org.apache.hadoop.security.UserGroupInformation; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -43,7 +42,7 @@ public OzoneIdentityProvider() { } /** - * If schedulable isn't instance of {@link Server.Call}, + * If schedulable isn't instance of {@link org.apache.hadoop.ipc.Server.Call}, * then trying to access getCallerContext() method, will * result in an exception. * diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/RepeatedOmKeyInfo.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/RepeatedOmKeyInfo.java index 24c172ef8fd2..2d0f92a1f0cb 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/RepeatedOmKeyInfo.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/RepeatedOmKeyInfo.java @@ -34,7 +34,7 @@ /** * Args for deleted keys. This is written to om metadata deletedTable. * Once a key is deleted, it is moved to om metadata deletedTable. Having a - * {label: List} ensures that if users create & delete keys with + * label: {@code List} ensures that if users create and delete keys with * exact same uri multiple times, all the delete instances are bundled under * the same key name. This is useful as part of GDPR compliance where an * admin wants to confirm if a given key is deleted from deletedTable metadata. @@ -47,7 +47,8 @@ private static Codec newCodec(boolean ignorePipeline) { return new DelegatedCodec<>( Proto2Codec.get(RepeatedKeyInfo.getDefaultInstance()), RepeatedOmKeyInfo::getFromProto, - k -> k.getProto(ignorePipeline, ClientVersion.CURRENT_VERSION)); + k -> k.getProto(ignorePipeline, ClientVersion.CURRENT_VERSION), + RepeatedOmKeyInfo.class); } public static Codec getCodec(boolean ignorePipeline) { @@ -110,9 +111,7 @@ public static RepeatedOmKeyInfo getFromProto(RepeatedKeyInfo } /** - * - * @param compact, true for persistence, false for network transmit - * @return + * @param compact true for persistence, false for network transmit */ public RepeatedKeyInfo getProto(boolean compact, int clientVersion) { List list = new ArrayList<>(); diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/S3SecretValue.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/S3SecretValue.java index 20c145bd0c06..7ea932c5716d 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/S3SecretValue.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/S3SecretValue.java @@ -31,7 +31,8 @@ public final class S3SecretValue { private static final Codec CODEC = new DelegatedCodec<>( Proto2Codec.get(S3Secret.getDefaultInstance()), S3SecretValue::fromProtobuf, - S3SecretValue::getProtobuf); + S3SecretValue::getProtobuf, + S3SecretValue.class); public static Codec getCodec() { return CODEC; diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/ServiceInfo.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/ServiceInfo.java index c8bdbf43c429..5dbe3487e19c 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/ServiceInfo.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/ServiceInfo.java @@ -25,6 +25,7 @@ import java.util.Map; import org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeType; +import org.apache.hadoop.ozone.OzoneFsServerDefaults; import org.apache.hadoop.ozone.OzoneManagerVersion; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRoleInfo; @@ -59,6 +60,7 @@ public final class ServiceInfo { private Map ports; private OMRoleInfo omRoleInfo; + private OzoneFsServerDefaults serverDefaults; /** * Default constructor for JSON deserialization. @@ -76,6 +78,24 @@ private ServiceInfo(NodeType nodeType, List portList, OzoneManagerVersion omVersion, OMRoleInfo omRole) { + this(nodeType, hostname, portList, omVersion, omRole, null); + } + + /** + * Constructs the ServiceInfo for the {@code nodeType}. + * @param nodeType type of node/service + * @param hostname hostname of the service + * @param portList list of ports the service listens to + * @param omVersion Om Version + * @param omRole OM role Ino + * @param keyProviderUri KMS provider URI + */ + private ServiceInfo(NodeType nodeType, + String hostname, + List portList, + OzoneManagerVersion omVersion, + OMRoleInfo omRole, + OzoneFsServerDefaults serverDefaults) { Preconditions.checkNotNull(nodeType); Preconditions.checkNotNull(hostname); this.nodeType = nodeType; @@ -86,6 +106,7 @@ private ServiceInfo(NodeType nodeType, ports.put(port.getType(), port.getValue()); } this.omRoleInfo = omRole; + this.serverDefaults = serverDefaults; } /** @@ -143,6 +164,15 @@ public OMRoleInfo getOmRoleInfo() { return omRoleInfo; } + /** + * Returns the Ozone Server default configuration. + * @return OmRoleInfo + */ + @JsonIgnore + public OzoneFsServerDefaults getServerDefaults() { + return serverDefaults; + } + /** * Converts {@link ServiceInfo} to OzoneManagerProtocolProtos.ServiceInfo. * @@ -170,6 +200,9 @@ public OzoneManagerProtocolProtos.ServiceInfo getProtobuf() { if (nodeType == NodeType.OM && omRoleInfo != null) { builder.setOmRole(omRoleInfo); } + if (serverDefaults != null) { + builder.setServerDefaults(serverDefaults.getProtobuf()); + } return builder.build(); } @@ -185,7 +218,9 @@ public static ServiceInfo getFromProtobuf( serviceInfo.getHostname(), serviceInfo.getServicePortsList(), OzoneManagerVersion.fromProtoValue(serviceInfo.getOMVersion()), - serviceInfo.hasOmRole() ? serviceInfo.getOmRole() : null); + serviceInfo.hasOmRole() ? serviceInfo.getOmRole() : null, + serviceInfo.hasServerDefaults() ? OzoneFsServerDefaults.getFromProtobuf( + serviceInfo.getServerDefaults()) : null); } /** @@ -206,6 +241,7 @@ public static class Builder { private List portList = new ArrayList<>(); private OMRoleInfo omRoleInfo; private OzoneManagerVersion omVersion; + private OzoneFsServerDefaults serverDefaults; /** * Gets the Om Client Protocol Version. @@ -259,6 +295,11 @@ public Builder setOmRoleInfo(OMRoleInfo omRole) { return this; } + public Builder setServerDefaults(OzoneFsServerDefaults defaults) { + serverDefaults = defaults; + return this; + } + /** * Builds and returns {@link ServiceInfo} with the set values. * @return {@link ServiceInfo} @@ -268,7 +309,8 @@ public ServiceInfo build() { host, portList, omVersion, - omRoleInfo); + omRoleInfo, + serverDefaults); } } diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/SnapshotDiffJob.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/SnapshotDiffJob.java index 0d221dc1cd4b..c3c8efc11ad6 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/SnapshotDiffJob.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/SnapshotDiffJob.java @@ -261,6 +261,11 @@ private static final class SnapshotDiffJobCodec .setSerializationInclusion(JsonInclude.Include.NON_NULL) .configure(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, false); + @Override + public Class getTypeClass() { + return SnapshotDiffJob.class; + } + @Override public byte[] toPersistedFormat(SnapshotDiffJob object) throws IOException { diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/SnapshotInfo.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/SnapshotInfo.java index 8584796c2e95..cf0a60dd3530 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/SnapshotInfo.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/SnapshotInfo.java @@ -52,14 +52,14 @@ * Each snapshot created has an associated SnapshotInfo entry * containing the snapshotId, snapshot path, * snapshot checkpoint directory, previous snapshotId - * for the snapshot path & global amongst other necessary fields. + * for the snapshot path and global amongst other necessary fields. */ public final class SnapshotInfo implements Auditable, CopyObject { private static final Codec CODEC = new DelegatedCodec<>( - Proto2Codec.get( - OzoneManagerProtocolProtos.SnapshotInfo.getDefaultInstance()), + Proto2Codec.get(OzoneManagerProtocolProtos.SnapshotInfo.getDefaultInstance()), SnapshotInfo::getFromProtobuf, - SnapshotInfo::getProtobuf); + SnapshotInfo::getProtobuf, + SnapshotInfo.class); public static Codec getCodec() { return CODEC; diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/multitenant/AccountNameSpace.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/multitenant/AccountNameSpace.java index 753d528cb05a..a715bfbc1538 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/multitenant/AccountNameSpace.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/multitenant/AccountNameSpace.java @@ -57,7 +57,7 @@ public interface AccountNameSpace { * Get Space Usage Information for this AccountNameSpace. This can be * used for billing purpose. Such Aggregation can also be done lazily * by a Recon job. Implementations can decide. - * @return + * @return SpaceUsage */ SpaceUsageSource getSpaceUsage(); @@ -71,7 +71,7 @@ public interface AccountNameSpace { /** * Get Quota Information for this AccountNameSpace. - * @return + * @return OzoneQuota */ OzoneQuota getQuota(); } diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/multitenant/BucketNameSpace.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/multitenant/BucketNameSpace.java index 1481f1b466b7..d5ecf7bba800 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/multitenant/BucketNameSpace.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/multitenant/BucketNameSpace.java @@ -74,7 +74,7 @@ public interface BucketNameSpace { * Get Space Usage Information for this BucketNameSpace. This can be * used for billing purpose. Such Aggregation can also be done lazily * by a Recon job. Implementations can decide. - * @return + * @return SpaceUsageSource */ SpaceUsageSource getSpaceUsage(); @@ -88,7 +88,7 @@ public interface BucketNameSpace { /** * Get Quota Information for this BucketNameSpace. - * @return + * @return OzoneQuota */ OzoneQuota getQuota(); } diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocol/OzoneManagerProtocol.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocol/OzoneManagerProtocol.java index 37481b00ea28..94822630f8e4 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocol/OzoneManagerProtocol.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocol/OzoneManagerProtocol.java @@ -28,7 +28,6 @@ import org.apache.hadoop.fs.SafeModeAction; import org.apache.hadoop.hdds.scm.container.common.helpers.ExcludeList; import org.apache.hadoop.ozone.OzoneAcl; -import org.apache.hadoop.ozone.OzoneFsServerDefaults; import org.apache.hadoop.ozone.om.IOmMetadataReader; import org.apache.hadoop.ozone.om.OMConfigKeys; import org.apache.hadoop.ozone.om.exceptions.OMException; @@ -1062,7 +1061,7 @@ DBUpdates getDBUpdates( * @param txnApplyCheckIntervalSeconds Time in SECONDS to wait between * successive checks for all transactions * to be applied to the OM DB. - * @return + * @return {@code long} */ default long prepareOzoneManager( long txnApplyWaitTimeoutSeconds, long txnApplyCheckIntervalSeconds) @@ -1146,14 +1145,6 @@ void setTimes(OmKeyArgs keyArgs, long mtime, long atime) boolean setSafeMode(SafeModeAction action, boolean isChecked) throws IOException; - /** - * Get server default configurations. - * - * @return OzoneFsServerDefaults some default configurations from server. - * @throws IOException - */ - OzoneFsServerDefaults getServerDefaults() throws IOException; - /** * Get status of last triggered quota repair in OM. * @return String diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OzoneManagerProtocolClientSideTranslatorPB.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OzoneManagerProtocolClientSideTranslatorPB.java index 432b55051dac..b140cf95e693 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OzoneManagerProtocolClientSideTranslatorPB.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OzoneManagerProtocolClientSideTranslatorPB.java @@ -41,7 +41,6 @@ import org.apache.hadoop.ipc.CallerContext; import org.apache.hadoop.ozone.ClientVersion; import org.apache.hadoop.ozone.OzoneAcl; -import org.apache.hadoop.ozone.OzoneFsServerDefaults; import org.apache.hadoop.ozone.om.exceptions.OMException; import org.apache.hadoop.ozone.om.helpers.BasicOmKeyInfo; import org.apache.hadoop.ozone.om.helpers.ErrorInfo; @@ -193,8 +192,6 @@ import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.S3Authentication; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.S3Secret; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.SafeMode; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.ServerDefaultsRequest; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.ServerDefaultsResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.ServiceListRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.ServiceListResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.SetAclRequest; @@ -2559,22 +2556,6 @@ public boolean setSafeMode(SafeModeAction action, boolean isChecked) return setSafeModeResponse.getResponse(); } - @Override - public OzoneFsServerDefaults getServerDefaults() - throws IOException { - ServerDefaultsRequest serverDefaultsRequest = - ServerDefaultsRequest.newBuilder().build(); - - OMRequest omRequest = createOMRequest(Type.GetServerDefaults) - .setServerDefaultsRequest(serverDefaultsRequest).build(); - - ServerDefaultsResponse serverDefaultsResponse = - handleError(submitRequest(omRequest)).getServerDefaultsResponse(); - - return OzoneFsServerDefaults.getFromProtobuf( - serverDefaultsResponse.getServerDefaults()); - } - @Override public String getQuotaRepairStatus() throws IOException { OzoneManagerProtocolProtos.GetQuotaRepairStatusRequest quotaRepairStatusRequest = diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/acl/OzoneAccessAuthorizer.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/acl/OzoneAccessAuthorizer.java index 1f105a03ad46..abd4cd6f6d25 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/acl/OzoneAccessAuthorizer.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/acl/OzoneAccessAuthorizer.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with this * work for additional information regarding copyright ownership. The ASF @@ -19,7 +19,7 @@ import org.apache.hadoop.ozone.om.exceptions.OMException; /** - * Default implementation for {@link IAccessAuthorizer}. + * No-op implementation for {@link IAccessAuthorizer}, allows everything. * */ public class OzoneAccessAuthorizer implements IAccessAuthorizer { @@ -35,4 +35,9 @@ public boolean checkAccess(IOzoneObj ozoneObject, RequestContext context) throws OMException { return true; } + + @Override + public boolean isNative() { + return true; + } } diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/acl/OzoneObjInfo.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/acl/OzoneObjInfo.java index ca32c96855dd..e1f1f3a8c1ef 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/acl/OzoneObjInfo.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/acl/OzoneObjInfo.java @@ -24,7 +24,7 @@ /** * Class representing an ozone object. - * It can be a volume with non-null volumeName (bucketName=null & name=null) + * It can be a volume with non-null volumeName {@literal (bucketName=null & name=null)} * or a bucket with non-null volumeName and bucketName (name=null) * or a key with non-null volumeName, bucketName and key name * (via getKeyName) diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/snapshot/SnapshotDiffReportOzone.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/snapshot/SnapshotDiffReportOzone.java index a9e89033129c..83300d5689ad 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/snapshot/SnapshotDiffReportOzone.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/snapshot/SnapshotDiffReportOzone.java @@ -47,6 +47,7 @@ public class SnapshotDiffReportOzone Proto2Codec.get(DiffReportEntryProto.getDefaultInstance()), SnapshotDiffReportOzone::fromProtobufDiffReportEntry, SnapshotDiffReportOzone::toProtobufDiffReportEntry, + DiffReportEntry.class, DelegatedCodec.CopyType.SHALLOW); public static Codec getDiffReportEntryCodec() { diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/util/OzoneVersionInfo.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/util/OzoneVersionInfo.java index b5a15db39cd3..289fc42b4eda 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/util/OzoneVersionInfo.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/util/OzoneVersionInfo.java @@ -69,8 +69,6 @@ public static void main(String[] args) { System.out.println( "Source code repository " + OZONE_VERSION_INFO.getUrl() + " -r " + OZONE_VERSION_INFO.getRevision()); - System.out.println("Compiled by " + OZONE_VERSION_INFO.getUser() + " on " - + OZONE_VERSION_INFO.getDate()); System.out.println( "Compiled with protoc " + OZONE_VERSION_INFO.getHadoopProtoc2Version() + ", " + OZONE_VERSION_INFO.getGrpcProtocVersion() + diff --git a/hadoop-ozone/common/src/main/resources/ozone-version-info.properties b/hadoop-ozone/common/src/main/resources/ozone-version-info.properties index 1a6e3b615192..73f02760d6fa 100644 --- a/hadoop-ozone/common/src/main/resources/ozone-version-info.properties +++ b/hadoop-ozone/common/src/main/resources/ozone-version-info.properties @@ -19,9 +19,6 @@ version=${declared.ozone.version} release=${ozone.release} revision=${version-info.scm.commit} -branch=${version-info.scm.branch} -user=${user.name} -date=${version-info.build.time} url=${version-info.scm.uri} srcChecksum=${version-info.source.md5} hadoopProtoc2Version=${proto2.hadooprpc.protobuf.version} diff --git a/hadoop-ozone/csi/pom.xml b/hadoop-ozone/csi/pom.xml index a0565d7e8909..cd4d4eef0419 100644 --- a/hadoop-ozone/csi/pom.xml +++ b/hadoop-ozone/csi/pom.xml @@ -20,10 +20,10 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd"> org.apache.ozone ozone - 1.5.0-SNAPSHOT + 2.0.0-SNAPSHOT ozone-csi - 1.5.0-SNAPSHOT + 2.0.0-SNAPSHOT Apache Ozone CSI service Apache Ozone CSI service jar diff --git a/hadoop-ozone/datanode/pom.xml b/hadoop-ozone/datanode/pom.xml index 29c10671d9da..87664b26614c 100644 --- a/hadoop-ozone/datanode/pom.xml +++ b/hadoop-ozone/datanode/pom.xml @@ -19,12 +19,12 @@ org.apache.ozone ozone - 1.5.0-SNAPSHOT + 2.0.0-SNAPSHOT ozone-datanode Apache Ozone Datanode jar - 1.5.0-SNAPSHOT + 2.0.0-SNAPSHOT true diff --git a/hadoop-ozone/dev-support/checks/_build.sh b/hadoop-ozone/dev-support/checks/_build.sh new file mode 100755 index 000000000000..b1f23a9ba8a2 --- /dev/null +++ b/hadoop-ozone/dev-support/checks/_build.sh @@ -0,0 +1,31 @@ +#!/usr/bin/env bash +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" +cd "$DIR/../../.." || exit 1 + +: ${OZONE_WITH_COVERAGE:="false"} + +MAVEN_OPTIONS='-V -B -DskipTests -DskipDocs --no-transfer-progress' + +if [[ "${OZONE_WITH_COVERAGE}" == "true" ]]; then + MAVEN_OPTIONS="${MAVEN_OPTIONS} -Pcoverage" +else + MAVEN_OPTIONS="${MAVEN_OPTIONS} -Djacoco.skip" +fi + +export MAVEN_OPTS="-Xmx4096m ${MAVEN_OPTS:-}" +mvn ${MAVEN_OPTIONS} clean "$@" +rc=$? diff --git a/hadoop-ozone/dev-support/checks/_diffoscope.sh b/hadoop-ozone/dev-support/checks/_diffoscope.sh new file mode 100755 index 000000000000..cc7cc700c823 --- /dev/null +++ b/hadoop-ozone/dev-support/checks/_diffoscope.sh @@ -0,0 +1,45 @@ +#!/usr/bin/env bash +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Helper script to compare jars reported by maven-artifact-plugin + +set -e -u -o pipefail + +DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" +cd "$DIR/../../.." || exit 1 + +BASE_DIR="$(pwd -P)" +: ${OUTPUT_LOG:="${BASE_DIR}/target/repro/output.log"} + +for jar in $(grep -o "investigate with diffoscope [^ ]*\.jar [^ ]*\.jar" "${OUTPUT_LOG}" | awk '{ print $NF }'); do + jarname=$(basename "$jar") + if [[ ! -e "$jar" ]]; then + echo "$jar does not exist" + continue + fi + + ref=$(find target/reference -name "$jarname") + if [[ -z "$ref" ]]; then + ref=$(find ~/.m2/repository -name "$jarname") + fi + + if [[ ! -e "$ref" ]]; then + echo "Reference not found for: $jarname" + continue + fi + + diffoscope "$ref" "$jar" +done diff --git a/hadoop-ozone/dev-support/checks/_mvn_unit_report.sh b/hadoop-ozone/dev-support/checks/_mvn_unit_report.sh index 36205c69bb64..0249c7a498d9 100755 --- a/hadoop-ozone/dev-support/checks/_mvn_unit_report.sh +++ b/hadoop-ozone/dev-support/checks/_mvn_unit_report.sh @@ -29,16 +29,20 @@ _realpath() { tempfile="${REPORT_DIR}/summary.tmp" ## generate summary txt file +failures=${REPORT_DIR}/failures.txt find "." -not -path '*/iteration*' -name 'TEST*.xml' -print0 \ | xargs -n1 -0 "grep" -l -E " "${tempfile}" + > "${failures}" +cat ${failures} > "${tempfile}" +leaks=${REPORT_DIR}/leaks.txt if [[ "${CHECK:-unit}" == "integration" ]]; then find hadoop-ozone/integration-test -not -path '*/iteration*' -name '*-output.txt' -print0 \ | xargs -n1 -0 "grep" -l -E "not closed properly|was not shutdown properly" \ | awk -F/ '{sub("-output.txt",""); print $NF}' \ - >> "${tempfile}" + > "${leaks}" + cat ${leaks} >> "${tempfile}" fi #Copy heap dump and dump leftovers @@ -50,11 +54,13 @@ find "." -not -path '*/iteration*' \ -exec mv {} "$REPORT_DIR/" \; ## Add the tests where the JVM is crashed +crashes=${REPORT_DIR}/crashes.txt grep -A1 'Crashed tests' "${REPORT_DIR}/output.log" \ | grep -v -e 'Crashed tests' -e '--' \ | cut -f2- -d' ' \ | sort -u \ - >> "${tempfile}" + > "${crashes}" +cat "${crashes}" >> "${tempfile}" # Check for tests that started but were not finished if grep -q 'There was a timeout.*in the fork' "${REPORT_DIR}/output.log"; then @@ -93,20 +99,24 @@ fi ## generate summary markdown file export SUMMARY_FILE="$REPORT_DIR/summary.md" -for TEST_RESULT_FILE in $(find "$REPORT_DIR" -name "*.txt" | grep -v output); do - - FAILURES=$(grep FAILURE "$TEST_RESULT_FILE" | grep "Tests run" | awk '{print $18}' | sort | uniq) +echo -n > "$SUMMARY_FILE" +if [ -s "${failures}" ]; then + printf "# Failed Tests\n\n" >> "$SUMMARY_FILE" + cat "${failures}" | sed 's/^/ * /' >> "$SUMMARY_FILE" +fi +rm -f "${failures}" - for FAILURE in $FAILURES; do - TEST_RESULT_LOCATION="$(_realpath --relative-to="$REPORT_DIR" "$TEST_RESULT_FILE")" - TEST_OUTPUT_LOCATION="${TEST_RESULT_LOCATION//.txt/-output.txt}" - printf " * [%s](%s) ([output](%s))\n" "$FAILURE" "$TEST_RESULT_LOCATION" "$TEST_OUTPUT_LOCATION" >> "$SUMMARY_FILE" - done -done +if [[ -s "${leaks}" ]]; then + printf "# Leaks Detected\n\n" >> "$SUMMARY_FILE" + cat "${leaks}" | sed 's/^/ * /' >> "$SUMMARY_FILE" +fi +rm -f "${leaks}" -if [ -s "$SUMMARY_FILE" ]; then - printf "# Failing tests: \n\n" | cat - "$SUMMARY_FILE" > temp && mv temp "$SUMMARY_FILE" +if [[ -s "${crashes}" ]]; then + printf "# Crashed Tests\n\n" >> "$SUMMARY_FILE" + cat "${crashes}" | sed 's/^/ * /' >> "$SUMMARY_FILE" fi +rm -f "${crashes}" ## generate counter wc -l "$REPORT_DIR/summary.txt" | awk '{print $1}'> "$REPORT_DIR/failures" diff --git a/hadoop-ozone/dev-support/checks/build.sh b/hadoop-ozone/dev-support/checks/build.sh index f9938e703874..7e9a9d5cbf27 100755 --- a/hadoop-ozone/dev-support/checks/build.sh +++ b/hadoop-ozone/dev-support/checks/build.sh @@ -13,20 +13,6 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" -cd "$DIR/../../.." || exit 1 - -: ${OZONE_WITH_COVERAGE:="false"} - -MAVEN_OPTIONS='-V -B -DskipTests -DskipDocs --no-transfer-progress' -if [[ "${OZONE_WITH_COVERAGE}" == "true" ]]; then - MAVEN_OPTIONS="${MAVEN_OPTIONS} -Pcoverage" -else - MAVEN_OPTIONS="${MAVEN_OPTIONS} -Djacoco.skip" -fi - -export MAVEN_OPTS="-Xmx4096m $MAVEN_OPTS" -echo "${MAVEN_OPTIONS}" -mvn ${MAVEN_OPTIONS} clean install "$@" -exit $? +DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" +source "${DIR}"/_build.sh install "$@" diff --git a/hadoop-ozone/dev-support/checks/coverage.sh b/hadoop-ozone/dev-support/checks/coverage.sh index 04961921d968..67161d01a052 100755 --- a/hadoop-ozone/dev-support/checks/coverage.sh +++ b/hadoop-ozone/dev-support/checks/coverage.sh @@ -53,4 +53,5 @@ find target/coverage-classes -type d \( -name proto -or -name proto3 -or -name g | xargs rm -rf #generate the reports -jacoco report "$REPORT_DIR/jacoco-all.exec" --classfiles target/coverage-classes --html "$REPORT_DIR/all" --xml "$REPORT_DIR/all.xml" +src=$(find hadoop-* -path '*/src/main/java' | sed 's/^/--sourcefiles /g' | xargs echo) +jacoco report "$REPORT_DIR/jacoco-all.exec" $src --classfiles target/coverage-classes --html "$REPORT_DIR/all" --xml "$REPORT_DIR/all.xml" diff --git a/hadoop-ozone/dev-support/checks/junit.sh b/hadoop-ozone/dev-support/checks/junit.sh index bb7088f0cd54..f0002d4eedca 100755 --- a/hadoop-ozone/dev-support/checks/junit.sh +++ b/hadoop-ozone/dev-support/checks/junit.sh @@ -30,7 +30,7 @@ if [[ ${ITERATIONS} -le 0 ]]; then ITERATIONS=1 fi -export MAVEN_OPTS="-Xmx4096m $MAVEN_OPTS" +export MAVEN_OPTS="-Xmx4096m ${MAVEN_OPTS:-}" MAVEN_OPTIONS="-B -V -Dskip.npx -Dskip.installnpx -Dnative.lib.tmp.dir=/tmp --no-transfer-progress" if [[ "${OZONE_WITH_COVERAGE}" != "true" ]]; then diff --git a/hadoop-ozone/dev-support/checks/repro.sh b/hadoop-ozone/dev-support/checks/repro.sh new file mode 100755 index 000000000000..1b74ec113370 --- /dev/null +++ b/hadoop-ozone/dev-support/checks/repro.sh @@ -0,0 +1,40 @@ +#!/usr/bin/env bash +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# This check verifies build reproducibility. + +DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" +cd "$DIR/../../.." || exit 1 + +BASE_DIR="$(pwd -P)" +REPORT_DIR=${OUTPUT_DIR:-"${BASE_DIR}/target/repro"} + +rc=0 +source "${DIR}"/_build.sh verify artifact:compare "$@" | tee output.log + +mkdir -p "$REPORT_DIR" +mv output.log "$REPORT_DIR"/ + +REPORT_FILE="$REPORT_DIR/summary.txt" +grep 'ERROR.*mismatch' "${REPORT_DIR}/output.log" > "${REPORT_FILE}" + +wc -l "${REPORT_FILE}" | awk '{ print $1 }' > "${REPORT_DIR}/failures" + +if [[ -s "${REPORT_FILE}" ]]; then + exit 1 +fi + +exit $rc # result of mvn diff --git a/hadoop-ozone/dist/pom.xml b/hadoop-ozone/dist/pom.xml index f2c2ba365c0d..e288520accad 100644 --- a/hadoop-ozone/dist/pom.xml +++ b/hadoop-ozone/dist/pom.xml @@ -19,12 +19,12 @@ org.apache.ozone ozone - 1.5.0-SNAPSHOT + 2.0.0-SNAPSHOT ozone-dist Apache Ozone Distribution jar - 1.5.0-SNAPSHOT + 2.0.0-SNAPSHOT UTF-8 true diff --git a/hadoop-ozone/dist/src/main/compose/common/s3a-test.sh b/hadoop-ozone/dist/src/main/compose/common/s3a-test.sh index 554b22b5a394..e8032068465a 100644 --- a/hadoop-ozone/dist/src/main/compose/common/s3a-test.sh +++ b/hadoop-ozone/dist/src/main/compose/common/s3a-test.sh @@ -94,10 +94,9 @@ EOF # Some tests are skipped due to known issues. # - ITestS3AContractDistCp: HDDS-10616 - # - ITestS3AContractGetFileStatusV1List: HDDS-10617 # - ITestS3AContractRename: HDDS-10665 mvn -B -V --fail-never --no-transfer-progress \ - -Dtest='ITestS3AContract*, ITestS3ACommitterMRJob, !ITestS3AContractDistCp, !ITestS3AContractGetFileStatusV1List, !ITestS3AContractRename' \ + -Dtest='ITestS3AContract*, ITestS3ACommitterMRJob, !ITestS3AContractDistCp, !ITestS3AContractRename' \ clean test local target="${RESULT_DIR}/junit/${bucket}/target" diff --git a/hadoop-ozone/dist/src/main/compose/ozone-balancer/docker-compose.yaml b/hadoop-ozone/dist/src/main/compose/ozone-balancer/docker-compose.yaml index 5220d71669df..e2d7272b0300 100644 --- a/hadoop-ozone/dist/src/main/compose/ozone-balancer/docker-compose.yaml +++ b/hadoop-ozone/dist/src/main/compose/ozone-balancer/docker-compose.yaml @@ -39,6 +39,8 @@ services: volumes: - tmpfs1:/data - ../..:/opt/hadoop + deploy: + replicas: ${DATANODE1_REPLICA:-1} datanode2: <<: *common-config ports: @@ -50,6 +52,8 @@ services: volumes: - tmpfs2:/data - ../..:/opt/hadoop + deploy: + replicas: ${DATANODE2_REPLICA:-1} datanode3: <<: *common-config ports: @@ -61,6 +65,8 @@ services: volumes: - tmpfs3:/data - ../..:/opt/hadoop + deploy: + replicas: ${DATANODE3_REPLICA:-1} datanode4: <<: *common-config ports: @@ -72,6 +78,34 @@ services: volumes: - tmpfs4:/data - ../..:/opt/hadoop + deploy: + replicas: ${DATANODE4_REPLICA:-1} + datanode5: + <<: *common-config + ports: + - 19864 + - 9882 + environment: + <<: *replication + command: [ "ozone","datanode" ] + volumes: + - tmpfs5:/data + - ../..:/opt/hadoop + deploy: + replicas: ${DATANODE5_REPLICA:-1} + datanode6: + <<: *common-config + ports: + - 19864 + - 9882 + environment: + <<: *replication + command: [ "ozone","datanode" ] + volumes: + - tmpfs6:/data + - ../..:/opt/hadoop + deploy: + replicas: ${DATANODE6_REPLICA:-1} om1: <<: *common-config environment: @@ -175,3 +209,15 @@ volumes: o: "size=1g,uid=4000" device: tmpfs type: tmpfs + tmpfs5: + driver: local + driver_opts: + o: "size=1g,uid=5000" + device: tmpfs + type: tmpfs + tmpfs6: + driver: local + driver_opts: + o: "size=1g,uid=6000" + device: tmpfs + type: tmpfs \ No newline at end of file diff --git a/hadoop-ozone/dist/src/main/compose/ozone-balancer/test-ec.sh b/hadoop-ozone/dist/src/main/compose/ozone-balancer/test-ec.sh new file mode 100644 index 000000000000..7365d104fab1 --- /dev/null +++ b/hadoop-ozone/dist/src/main/compose/ozone-balancer/test-ec.sh @@ -0,0 +1,31 @@ +#!/usr/bin/env bash +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +#suite:balancer + +COMPOSE_DIR="$( cd "$( dirname "${BASH_SOURCE0}" )" >/dev/null 2>&1 && pwd )" +export COMPOSE_DIR +export OM_SERVICE_ID="om" +export OM=om1 +export SCM=scm1 +export OZONE_REPLICATION_FACTOR=3 + +# shellcheck source=/dev/null +source "$COMPOSE_DIR/../testlib.sh" + +start_docker_env +execute_robot_test ${OM} -v REPLICATION:rs-3-2-1024k -v TYPE:EC -v KEYS:7 -v LOWER_LIMIT:1.5 -v UPPER_LIMIT:2.5 -N ozone-balancer-EC balancer/testBalancer.robot diff --git a/hadoop-ozone/dist/src/main/compose/ozone-balancer/test.sh b/hadoop-ozone/dist/src/main/compose/ozone-balancer/test-ratis.sh similarity index 82% rename from hadoop-ozone/dist/src/main/compose/ozone-balancer/test.sh rename to hadoop-ozone/dist/src/main/compose/ozone-balancer/test-ratis.sh index e79979877ba3..a358aea79ad2 100644 --- a/hadoop-ozone/dist/src/main/compose/ozone-balancer/test.sh +++ b/hadoop-ozone/dist/src/main/compose/ozone-balancer/test-ratis.sh @@ -24,10 +24,12 @@ export OM=om1 export SCM=scm1 export OZONE_REPLICATION_FACTOR=3 +export DATANODE2_REPLICA=0 +export DATANODE5_REPLICA=0 + # shellcheck source=/dev/null source "$COMPOSE_DIR/../testlib.sh" -# We need 4 dataNodes in this tests -start_docker_env 4 +start_docker_env -execute_robot_test ${OM} balancer/testBalancer.robot +execute_robot_test ${OM} -v REPLICATION:THREE -v TYPE:RATIS -v KEYS:3 -v LOWER_LIMIT:3 -v UPPER_LIMIT:3.5 -N ozone-balancer-RATIS balancer/testBalancer.robot diff --git a/hadoop-ozone/dist/src/main/compose/upgrade/testlib.sh b/hadoop-ozone/dist/src/main/compose/upgrade/testlib.sh index 69af73f50c94..d2718d04b7d5 100755 --- a/hadoop-ozone/dist/src/main/compose/upgrade/testlib.sh +++ b/hadoop-ozone/dist/src/main/compose/upgrade/testlib.sh @@ -51,12 +51,12 @@ create_data_dirs() { # be used. ## Else, a binary image will be used. prepare_for_image() { - local image_version="$1" + local image_version="${1}" if [[ "$image_version" = "$OZONE_CURRENT_VERSION" ]]; then prepare_for_runner_image else - prepare_for_binary_image "$image_version" + prepare_for_binary_image "${image_version}-rocky" fi } diff --git a/hadoop-ozone/dist/src/main/compose/xcompat/.env b/hadoop-ozone/dist/src/main/compose/xcompat/.env index 140975d4bd0e..a673b7f46550 100644 --- a/hadoop-ozone/dist/src/main/compose/xcompat/.env +++ b/hadoop-ozone/dist/src/main/compose/xcompat/.env @@ -17,3 +17,5 @@ HDDS_VERSION=${hdds.version} OZONE_RUNNER_VERSION=${docker.ozone-runner.version} OZONE_RUNNER_IMAGE=apache/ozone-runner +HADOOP_VERSION=${hadoop.version} +OZONE_TESTKRB5_IMAGE=${docker.ozone-testkr5b.image} diff --git a/hadoop-ozone/dist/src/main/compose/xcompat/clients.yaml b/hadoop-ozone/dist/src/main/compose/xcompat/clients.yaml index 0bf0f619bd7c..eda143536883 100644 --- a/hadoop-ozone/dist/src/main/compose/xcompat/clients.yaml +++ b/hadoop-ozone/dist/src/main/compose/xcompat/clients.yaml @@ -16,39 +16,49 @@ services: old_client_1_0_0: - image: apache/ozone:1.0.0 + image: apache/ozone:1.0.0-rocky env_file: - docker-config volumes: - ../..:/opt/ozone + - ../_keytabs:/etc/security/keytabs + - ./krb5.conf:/etc/krb5.conf command: ["sleep","1000000"] old_client_1_1_0: - image: apache/ozone:1.1.0 + image: apache/ozone:1.1.0-rocky env_file: - docker-config volumes: - ../..:/opt/ozone + - ../_keytabs:/etc/security/keytabs + - ./krb5.conf:/etc/krb5.conf command: ["sleep","1000000"] old_client_1_2_1: - image: apache/ozone:1.2.1 + image: apache/ozone:1.2.1-rocky env_file: - docker-config volumes: - ../..:/opt/ozone + - ../_keytabs:/etc/security/keytabs + - ./krb5.conf:/etc/krb5.conf command: ["sleep","1000000"] old_client_1_3_0: - image: apache/ozone:1.3.0 + image: apache/ozone:1.3.0-rocky env_file: - docker-config volumes: - ../..:/opt/ozone + - ../_keytabs:/etc/security/keytabs + - ./krb5.conf:/etc/krb5.conf command: ["sleep","1000000"] old_client_1_4_0: - image: apache/ozone:1.4.0 + image: apache/ozone:1.4.0-rocky env_file: - docker-config volumes: - ../..:/opt/ozone + - ../_keytabs:/etc/security/keytabs + - ./krb5.conf:/etc/krb5.conf command: ["sleep","1000000"] new_client: image: ${OZONE_RUNNER_IMAGE}:${OZONE_RUNNER_VERSION} @@ -56,6 +66,8 @@ services: - docker-config volumes: - ../..:/opt/hadoop + - ../_keytabs:/etc/security/keytabs + - ./krb5.conf:/etc/krb5.conf environment: OZONE_OPTS: command: ["sleep","1000000"] diff --git a/hadoop-ozone/dist/src/main/compose/xcompat/docker-config b/hadoop-ozone/dist/src/main/compose/xcompat/docker-config index 85099f902d39..1a61aaf4f7e9 100644 --- a/hadoop-ozone/dist/src/main/compose/xcompat/docker-config +++ b/hadoop-ozone/dist/src/main/compose/xcompat/docker-config @@ -14,6 +14,8 @@ # See the License for the specific language governing permissions and # limitations under the License. +CORE-SITE.XML_fs.defaultFS=ofs://om +CORE-SITE.XML_fs.trash.interval=1 CORE-SITE.XML_fs.ofs.impl=org.apache.hadoop.fs.ozone.RootedOzoneFileSystem OZONE-SITE.XML_hdds.datanode.dir=/data/hdds @@ -22,6 +24,7 @@ OZONE-SITE.XML_hdds.scm.safemode.min.datanode=3 OZONE-SITE.XML_ozone.metadata.dirs=/data/metadata OZONE-SITE.XML_ozone.om.address=om OZONE-SITE.XML_ozone.om.http-address=om:9874 +OZONE-SITE.XML_ozone.scm.http-address=scm:9876 OZONE-SITE.XML_ozone.recon.address=recon:9891 OZONE-SITE.XML_ozone.recon.db.dir=/data/metadata/recon OZONE-SITE.XML_ozone.server.default.replication=3 @@ -31,9 +34,98 @@ OZONE-SITE.XML_ozone.scm.container.size=1GB OZONE-SITE.XML_ozone.scm.datanode.ratis.volume.free-space.min=10MB OZONE-SITE.XML_ozone.scm.datanode.id.dir=/data OZONE-SITE.XML_ozone.scm.names=scm +OZONE-SITE.XML_ozone.scm.pipeline.creation.interval=30s OZONE-SITE.XML_ozone.scm.pipeline.owner.container.count=1 +OZONE-SITE.XML_ozone.datanode.pipeline.limit=1 OZONE-SITE.XML_recon.om.snapshot.task.interval.delay=1m OZONE-SITE.XML_hdds.scmclient.max.retry.timeout=30s OZONE-SITE.XML_ozone.default.bucket.layout=LEGACY OZONE-SITE.XML_ozone.http.basedir=/tmp/ozone_http -no_proxy=om,recon,scm,s3g,kdc,localhost,127.0.0.1 + +OZONE-SITE.XML_hdds.block.token.enabled=true +OZONE-SITE.XML_hdds.container.token.enabled=true +OZONE-SITE.XML_hdds.grpc.tls.enabled=true + +OZONE-SITE.XML_ozone.security.enabled=true +OZONE-SITE.XML_ozone.acl.enabled=true +OZONE-SITE.XML_ozone.acl.authorizer.class=org.apache.hadoop.ozone.security.acl.OzoneNativeAuthorizer +OZONE-SITE.XML_ozone.administrators="testuser,recon,om" +OZONE-SITE.XML_ozone.s3.administrators="testuser,recon,om" +OZONE-SITE.XML_ozone.recon.administrators="testuser2" +OZONE-SITE.XML_ozone.s3.administrators="testuser,s3g" + +HDFS-SITE.XML_dfs.datanode.address=0.0.0.0:1019 +HDFS-SITE.XML_dfs.datanode.http.address=0.0.0.0:1012 +CORE-SITE.XML_dfs.data.transfer.protection=authentication +CORE-SITE.XML_hadoop.security.authentication=kerberos +CORE-SITE.XML_hadoop.security.auth_to_local="DEFAULT" +CORE-SITE.XML_hadoop.security.key.provider.path=kms://http@kms:9600/kms + +OZONE-SITE.XML_hdds.scm.kerberos.principal=scm/scm@EXAMPLE.COM +OZONE-SITE.XML_hdds.scm.kerberos.keytab.file=/etc/security/keytabs/scm.keytab +OZONE-SITE.XML_ozone.om.kerberos.principal=om/om@EXAMPLE.COM +OZONE-SITE.XML_ozone.om.kerberos.keytab.file=/etc/security/keytabs/om.keytab +OZONE-SITE.XML_ozone.recon.kerberos.keytab.file=/etc/security/keytabs/recon.keytab +OZONE-SITE.XML_ozone.recon.kerberos.principal=recon/recon@EXAMPLE.COM + +OZONE-SITE.XML_ozone.s3g.kerberos.keytab.file=/etc/security/keytabs/s3g.keytab +OZONE-SITE.XML_ozone.s3g.kerberos.principal=s3g/s3g@EXAMPLE.COM + +OZONE-SITE.XML_ozone.httpfs.kerberos.keytab.file=/etc/security/keytabs/httpfs.keytab +OZONE-SITE.XML_ozone.httpfs.kerberos.principal=httpfs/httpfs@EXAMPLE.COM + +HDFS-SITE.XML_dfs.datanode.kerberos.principal=dn/dn@EXAMPLE.COM +HDFS-SITE.XML_dfs.datanode.keytab.file=/etc/security/keytabs/dn.keytab +HDFS-SITE.XML_dfs.datanode.kerberos.keytab.file=/etc/security/keytabs/dn.keytab +HDFS-SITE.XML_dfs.web.authentication.kerberos.principal=HTTP/ozone@EXAMPLE.COM +HDFS-SITE.XML_dfs.web.authentication.kerberos.keytab=/etc/security/keytabs/HTTP.keytab + +OZONE-SITE.XML_ozone.security.http.kerberos.enabled=true +OZONE-SITE.XML_ozone.s3g.secret.http.enabled=true +OZONE-SITE.XML_ozone.http.filter.initializers=org.apache.hadoop.security.AuthenticationFilterInitializer + +OZONE-SITE.XML_ozone.om.http.auth.type=kerberos +OZONE-SITE.XML_hdds.scm.http.auth.type=kerberos +OZONE-SITE.XML_hdds.datanode.http.auth.type=kerberos +OZONE-SITE.XML_ozone.s3g.http.auth.type=kerberos +OZONE-SITE.XML_ozone.s3g.secret.http.auth.type=kerberos +OZONE-SITE.XML_ozone.httpfs.http.auth.type=kerberos +OZONE-SITE.XML_ozone.recon.http.auth.type=kerberos + +OZONE-SITE.XML_hdds.scm.http.auth.kerberos.principal=HTTP/scm@EXAMPLE.COM +OZONE-SITE.XML_hdds.scm.http.auth.kerberos.keytab=/etc/security/keytabs/scm.keytab +OZONE-SITE.XML_ozone.om.http.auth.kerberos.principal=HTTP/om@EXAMPLE.COM +OZONE-SITE.XML_ozone.om.http.auth.kerberos.keytab=/etc/security/keytabs/om.keytab +OZONE-SITE.XML_hdds.datanode.http.auth.kerberos.principal=HTTP/dn@EXAMPLE.COM +OZONE-SITE.XML_hdds.datanode.http.auth.kerberos.keytab=/etc/security/keytabs/dn.keytab +OZONE-SITE.XML_ozone.s3g.http.auth.kerberos.keytab=/etc/security/keytabs/s3g.keytab +OZONE-SITE.XML_ozone.s3g.http.auth.kerberos.principal=HTTP/s3g@EXAMPLE.COM +OZONE-SITE.XML_ozone.httpfs.http.auth.kerberos.keytab=/etc/security/keytabs/httpfs.keytab +OZONE-SITE.XML_ozone.httpfs.http.auth.kerberos.principal=HTTP/httpfs@EXAMPLE.COM +OZONE-SITE.XML_ozone.recon.http.auth.kerberos.principal=* +OZONE-SITE.XML_ozone.recon.http.auth.kerberos.keytab=/etc/security/keytabs/recon.keytab + +CORE-SITE.XML_hadoop.http.authentication.simple.anonymous.allowed=false +CORE-SITE.XML_hadoop.http.authentication.signature.secret.file=/etc/security/http_secret +CORE-SITE.XML_hadoop.http.authentication.type=kerberos +CORE-SITE.XML_hadoop.http.authentication.kerberos.principal=HTTP/ozone@EXAMPLE.COM +CORE-SITE.XML_hadoop.http.authentication.kerberos.keytab=/etc/security/keytabs/HTTP.keytab + +CORE-SITE.XML_hadoop.security.authorization=true +HADOOP-POLICY.XML_ozone.om.security.client.protocol.acl=* +HADOOP-POLICY.XML_hdds.security.client.datanode.container.protocol.acl=* +HADOOP-POLICY.XML_hdds.security.client.scm.container.protocol.acl=* +HADOOP-POLICY.XML_hdds.security.client.scm.block.protocol.acl=* +HADOOP-POLICY.XML_hdds.security.client.scm.certificate.protocol.acl=* +HADOOP-POLICY.XML_ozone.security.reconfigure.protocol.acl=* + +KMS-SITE.XML_hadoop.kms.proxyuser.s3g.users=* +KMS-SITE.XML_hadoop.kms.proxyuser.s3g.groups=* +KMS-SITE.XML_hadoop.kms.proxyuser.s3g.hosts=* + +OZONE_DATANODE_SECURE_USER=root +JSVC_HOME=/usr/bin + +OZONE_LOG_DIR=/var/log/hadoop + +no_proxy=om,scm,recon,s3g,kdc,localhost,127.0.0.1 diff --git a/hadoop-ozone/dist/src/main/compose/xcompat/krb5.conf b/hadoop-ozone/dist/src/main/compose/xcompat/krb5.conf new file mode 100644 index 000000000000..eefc5b9c6858 --- /dev/null +++ b/hadoop-ozone/dist/src/main/compose/xcompat/krb5.conf @@ -0,0 +1,41 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +[logging] +default = FILE:/var/log/krb5libs.log +kdc = FILE:/var/log/krb5kdc.log +admin_server = FILE:/var/log/kadmind.log + +[libdefaults] + dns_canonicalize_hostname = false + dns_lookup_realm = false + ticket_lifetime = 24h + renew_lifetime = 7d + forwardable = true + rdns = false + default_realm = EXAMPLE.COM + +[realms] + EXAMPLE.COM = { + kdc = kdc + admin_server = kdc + max_renewable_life = 7d + } + +[domain_realm] + .example.com = EXAMPLE.COM + example.com = EXAMPLE.COM + diff --git a/hadoop-ozone/dist/src/main/compose/xcompat/new-cluster.yaml b/hadoop-ozone/dist/src/main/compose/xcompat/new-cluster.yaml index 6e3ff6cfbc9c..32059140ce91 100644 --- a/hadoop-ozone/dist/src/main/compose/xcompat/new-cluster.yaml +++ b/hadoop-ozone/dist/src/main/compose/xcompat/new-cluster.yaml @@ -18,14 +18,39 @@ x-new-config: &new-config image: ${OZONE_RUNNER_IMAGE}:${OZONE_RUNNER_VERSION} + dns_search: . env_file: - docker-config volumes: - ../..:/opt/hadoop + - ../_keytabs:/etc/security/keytabs + - ./krb5.conf:/etc/krb5.conf services: + kdc: + image: ${OZONE_TESTKRB5_IMAGE} + hostname: kdc + dns_search: . + volumes: + - ../..:/opt/hadoop + - ../_keytabs:/etc/security/keytabs + command: [ "krb5kdc","-n" ] + kms: + image: apache/hadoop:${HADOOP_VERSION} + hostname: kms + dns_search: . + ports: + - 9600:9600 + env_file: + - ./docker-config + environment: + HADOOP_CONF_DIR: /opt/hadoop/etc/hadoop + volumes: + - ../../libexec/transformation.py:/opt/transformation.py + command: [ "hadoop", "kms" ] datanode: <<: *new-config + hostname: dn ports: - 19864 - 9882 @@ -34,15 +59,17 @@ services: command: ["ozone","datanode"] om: <<: *new-config + hostname: om environment: ENSURE_OM_INITIALIZED: /data/metadata/om/current/VERSION - OZONE_OPTS: + OZONE_OPTS: -Dcom.sun.net.ssl.checkRevocation=false ports: - 9874:9874 - 9862:9862 command: ["ozone","om"] recon: <<: *new-config + hostname: recon ports: - 9888:9888 environment: @@ -50,6 +77,7 @@ services: command: ["ozone","recon"] s3g: <<: *new-config + hostname: s3g environment: OZONE_OPTS: ports: @@ -57,9 +85,12 @@ services: command: ["ozone","s3g"] scm: <<: *new-config + hostname: scm ports: - 9876:9876 + - 9860:9860 environment: ENSURE_SCM_INITIALIZED: /data/metadata/scm/current/VERSION + OZONE-SITE.XML_hdds.scm.safemode.min.datanode: "${OZONE_SAFEMODE_MIN_DATANODES:-1}" OZONE_OPTS: command: ["ozone","scm"] diff --git a/hadoop-ozone/dist/src/main/compose/xcompat/old-cluster.yaml b/hadoop-ozone/dist/src/main/compose/xcompat/old-cluster.yaml index c603bb51df32..d1b6e56a0847 100644 --- a/hadoop-ozone/dist/src/main/compose/xcompat/old-cluster.yaml +++ b/hadoop-ozone/dist/src/main/compose/xcompat/old-cluster.yaml @@ -17,15 +17,40 @@ # reusable fragments (see https://docs.docker.com/compose/compose-file/#extension-fields) x-old-config: &old-config - image: apache/ozone:${OZONE_VERSION} + image: apache/ozone:${OZONE_VERSION}-rocky + dns_search: . env_file: - docker-config volumes: - ../..:/opt/ozone + - ../_keytabs:/etc/security/keytabs + - ./krb5.conf:/etc/krb5.conf services: + kdc: + image: ${OZONE_TESTKRB5_IMAGE} + hostname: kdc + dns_search: . + volumes: + - ../..:/opt/ozone + - ../_keytabs:/etc/security/keytabs + command: [ "krb5kdc","-n" ] + kms: + image: apache/hadoop:${HADOOP_VERSION} + hostname: kms + dns_search: . + ports: + - 9600:9600 + env_file: + - ./docker-config + environment: + HADOOP_CONF_DIR: /opt/hadoop/etc/hadoop + volumes: + - ../../libexec/transformation.py:/opt/transformation.py + command: [ "hadoop", "kms" ] datanode: <<: *old-config + hostname: dn ports: - 19864 - 9882 @@ -34,8 +59,10 @@ services: command: ["ozone","datanode"] om: <<: *old-config + hostname: om environment: ENSURE_OM_INITIALIZED: /data/metadata/om/current/VERSION + OZONE_OPTS: -Dcom.sun.net.ssl.checkRevocation=false HADOOP_OPTS: ports: - 9874:9874 @@ -43,6 +70,7 @@ services: command: ["ozone","om"] recon: <<: *old-config + hostname: recon ports: - 9888:9888 environment: @@ -50,6 +78,7 @@ services: command: ["ozone","recon"] s3g: <<: *old-config + hostname: s3g environment: HADOOP_OPTS: ports: @@ -57,9 +86,11 @@ services: command: ["ozone","s3g"] scm: <<: *old-config + hostname: scm ports: - 9876:9876 environment: ENSURE_SCM_INITIALIZED: /data/metadata/scm/current/VERSION + OZONE-SITE.XML_hdds.scm.safemode.min.datanode: "${OZONE_SAFEMODE_MIN_DATANODES:-1}" HADOOP_OPTS: command: ["ozone","scm"] diff --git a/hadoop-ozone/dist/src/main/compose/xcompat/test.sh b/hadoop-ozone/dist/src/main/compose/xcompat/test.sh index 695d8bf06abc..8774cf2f6322 100755 --- a/hadoop-ozone/dist/src/main/compose/xcompat/test.sh +++ b/hadoop-ozone/dist/src/main/compose/xcompat/test.sh @@ -22,11 +22,15 @@ export COMPOSE_DIR basename=$(basename ${COMPOSE_DIR}) current_version="${ozone.version}" -old_versions="1.0.0 1.1.0 1.2.1 1.3.0 1.4.0" # container is needed for each version in clients.yaml +# TODO: debug acceptance test failures for client versions 1.0.0 on secure clusters +old_versions="1.1.0 1.2.1 1.3.0 1.4.0" # container is needed for each version in clients.yaml # shellcheck source=hadoop-ozone/dist/src/main/compose/testlib.sh source "${COMPOSE_DIR}/../testlib.sh" +export SECURITY_ENABLED=true +: ${OZONE_BUCKET_KEY_NAME:=key1} + old_client() { OZONE_DIR=/opt/ozone container=${client} @@ -40,24 +44,40 @@ new_client() { "$@" } +_kinit() { + execute_command_in_container ${container} kinit -k -t /etc/security/keytabs/testuser.keytab testuser/scm@EXAMPLE.COM +} + _init() { + _kinit execute_command_in_container ${container} ozone freon ockg -n1 -t1 -p warmup } _write() { + _kinit execute_robot_test ${container} -N "xcompat-cluster-${cluster_version}-client-${client_version}-write" -v SUFFIX:${client_version} compatibility/write.robot } _read() { + _kinit local data_version="$1" execute_robot_test ${container} -N "xcompat-cluster-${cluster_version}-client-${client_version}-read-${data_version}" -v SUFFIX:${data_version} compatibility/read.robot } +test_bucket_encryption() { + _kinit + execute_robot_test ${container} -N "xcompat-cluster-${cluster_version}-client-${client_version}" -v SUFFIX:${client_version} security/bucket-encryption.robot +} + test_cross_compatibility() { echo "Starting cluster with COMPOSE_FILE=${COMPOSE_FILE}" OZONE_KEEP_RESULTS=true start_docker_env + execute_command_in_container kms hadoop key create ${OZONE_BUCKET_KEY_NAME} + new_client test_bucket_encryption + + container=scm _kinit execute_command_in_container scm ozone freon ockg -n1 -t1 -p warmup new_client _write new_client _read ${current_version} @@ -65,6 +85,8 @@ test_cross_compatibility() { for client_version in "$@"; do client="old_client_${client_version//./_}" + old_client test_bucket_encryption + old_client _write old_client _read ${client_version} @@ -79,7 +101,8 @@ test_ec_cross_compatibility() { echo "Running Erasure Coded storage backward compatibility tests." # local cluster_versions_with_ec="1.3.0 1.4.0 ${current_version}" local cluster_versions_with_ec="${current_version}" # until HDDS-11334 - local non_ec_client_versions="1.0.0 1.1.0 1.2.1" + # TODO: debug acceptance test failures for client versions 1.0.0 on secure clusters + local non_ec_client_versions="1.1.0 1.2.1" for cluster_version in ${cluster_versions_with_ec}; do export COMPOSE_FILE=new-cluster.yaml:clients.yaml cluster_version=${cluster_version} @@ -102,12 +125,14 @@ test_ec_cross_compatibility() { local prefix=$(LC_CTYPE=C tr -dc '[:alnum:]' < /dev/urandom | head -c 5 | tr '[:upper:]' '[:lower:]') OZONE_DIR=/opt/hadoop + new_client _kinit execute_robot_test new_client --include setup-ec-data -N "xcompat-cluster-${cluster_version}-setup-data" -v prefix:"${prefix}" ec/backward-compat.robot OZONE_DIR=/opt/ozone for client_version in ${non_ec_client_versions}; do client="old_client_${client_version//./_}" unset OUTPUT_PATH + container="${client}" _kinit execute_robot_test "${client}" --include test-ec-compat -N "xcompat-cluster-${cluster_version}-client-${client_version}-read-${cluster_version}" -v prefix:"${prefix}" ec/backward-compat.robot done diff --git a/hadoop-ozone/dist/src/main/license/bin/LICENSE.txt b/hadoop-ozone/dist/src/main/license/bin/LICENSE.txt index c28483c67356..9cb9202be0b0 100644 --- a/hadoop-ozone/dist/src/main/license/bin/LICENSE.txt +++ b/hadoop-ozone/dist/src/main/license/bin/LICENSE.txt @@ -407,6 +407,7 @@ Apache License 2.0 org.apache.ratis:ratis-proto org.apache.ratis:ratis-server org.apache.ratis:ratis-server-api + org.apache.ratis:ratis-shell org.apache.ratis:ratis-thirdparty-misc org.apache.ratis:ratis-tools org.apache.thrift:libthrift diff --git a/hadoop-ozone/dist/src/main/license/jar-report.txt b/hadoop-ozone/dist/src/main/license/jar-report.txt index 042c9380e4af..1e07ec1a2c23 100644 --- a/hadoop-ozone/dist/src/main/license/jar-report.txt +++ b/hadoop-ozone/dist/src/main/license/jar-report.txt @@ -252,6 +252,7 @@ share/ozone/lib/ratis-netty.jar share/ozone/lib/ratis-proto.jar share/ozone/lib/ratis-server-api.jar share/ozone/lib/ratis-server.jar +share/ozone/lib/ratis-shell.jar share/ozone/lib/ratis-thirdparty-misc.jar share/ozone/lib/ratis-tools.jar share/ozone/lib/re2j.jar diff --git a/hadoop-ozone/dist/src/main/smoketest/admincli/container.robot b/hadoop-ozone/dist/src/main/smoketest/admincli/container.robot index c50daa724dad..83c0731ff76c 100644 --- a/hadoop-ozone/dist/src/main/smoketest/admincli/container.robot +++ b/hadoop-ozone/dist/src/main/smoketest/admincli/container.robot @@ -84,6 +84,18 @@ Report containers as JSON Should contain ${output} stats Should contain ${output} samples +List all containers + ${output} = Execute ozone admin container list --all + Should contain ${output} OPEN + +List all containers according to count (batchSize) + ${output} = Execute ozone admin container list --all --count 10 + Should contain ${output} OPEN + +List all containers from a particular container ID + ${output} = Execute ozone admin container list --all --start 1 + Should contain ${output} OPEN + Close container ${container} = Execute ozone admin container list --state OPEN | jq -r 'select(.replicationConfig.replicationFactor == "THREE") | .containerID' | head -1 Execute ozone admin container close "${container}" diff --git a/hadoop-ozone/dist/src/main/smoketest/admincli/scmrole.robot b/hadoop-ozone/dist/src/main/smoketest/admincli/scmrole.robot index 297275485612..f16289394517 100644 --- a/hadoop-ozone/dist/src/main/smoketest/admincli/scmrole.robot +++ b/hadoop-ozone/dist/src/main/smoketest/admincli/scmrole.robot @@ -30,4 +30,8 @@ Run scm roles List scm roles as JSON ${output} = Execute ozone admin scm roles --json ${leader} = Execute echo '${output}' | jq -r '.[] | select(.raftPeerRole == "LEADER")' - Should Not Be Equal ${leader} ${EMPTY} \ No newline at end of file + Should Not Be Equal ${leader} ${EMPTY} + +List scm roles as TABLE + ${output} = Execute ozone admin scm roles --table + Should Match Regexp ${output} \\|.*LEADER.* \ No newline at end of file diff --git a/hadoop-ozone/dist/src/main/smoketest/balancer/testBalancer.robot b/hadoop-ozone/dist/src/main/smoketest/balancer/testBalancer.robot index 4299afe5f2d1..06d8a3416f00 100644 --- a/hadoop-ozone/dist/src/main/smoketest/balancer/testBalancer.robot +++ b/hadoop-ozone/dist/src/main/smoketest/balancer/testBalancer.robot @@ -16,6 +16,7 @@ *** Settings *** Documentation Smoketest ozone cluster startup Library OperatingSystem +Library String Library Collections Resource ../commonlib.robot Resource ../ozone-lib/shell.robot @@ -35,7 +36,7 @@ Prepare For Tests Execute dd if=/dev/urandom of=/tmp/100mb bs=1048576 count=100 Run Keyword if '${SECURITY_ENABLED}' == 'true' Kinit test user testuser testuser.keytab Execute ozone sh volume create /${VOLUME} - Execute ozone sh bucket create /${VOLUME}/${BUCKET} + Execute ozone sh bucket create --replication ${REPLICATION} --type ${TYPE} /${VOLUME}/${BUCKET} Datanode In Maintenance Mode @@ -67,7 +68,7 @@ Run Container Balancer Wait Finish Of Balancing ${result} = Execute ozone admin containerbalancer status Should Contain ${result} ContainerBalancer is Running. - Wait Until Keyword Succeeds 3min 10sec ContainerBalancer is Not Running + Wait Until Keyword Succeeds 6min 10sec ContainerBalancer is Not Running Sleep 60000ms Verify Verbose Balancer Status @@ -111,7 +112,7 @@ Create Multiple Keys ${fileName} = Set Variable file-${INDEX}.txt ${key} = Set Variable /${VOLUME}/${BUCKET}/${fileName} LOG ${fileName} - Create Key ${key} ${file} + Create Key ${key} ${file} --replication=${REPLICATION} --type=${TYPE} Key Should Match Local File ${key} ${file} END @@ -126,14 +127,14 @@ Get Uuid Close All Containers FOR ${INDEX} IN RANGE 15 - ${container} = Execute ozone admin container list --state OPEN | jq -r 'select(.replicationConfig.replicationFactor == "THREE") | .containerID' | head -1 + ${container} = Execute ozone admin container list --state OPEN | jq -r 'select(.replicationConfig.data == 3) | .containerID' | head -1 EXIT FOR LOOP IF "${container}" == "${EMPTY}" ${message} = Execute And Ignore Error ozone admin container close "${container}" Run Keyword If '${message}' != '${EMPTY}' Should Contain ${message} is in closing state ${output} = Execute ozone admin container info "${container}" Should contain ${output} CLOS END - Wait until keyword succeeds 3min 10sec All container is closed + Wait until keyword succeeds 4min 10sec All container is closed All container is closed ${output} = Execute ozone admin container list --state OPEN @@ -146,7 +147,7 @@ Get Datanode Ozone Used Bytes Info [return] ${result} ** Test Cases *** -Verify Container Balancer for RATIS containers +Verify Container Balancer for RATIS/EC containers Prepare For Tests Datanode In Maintenance Mode @@ -154,7 +155,7 @@ Verify Container Balancer for RATIS containers ${uuid} = Get Uuid Datanode Usageinfo ${uuid} - Create Multiple Keys 3 + Create Multiple Keys ${KEYS} Close All Containers @@ -175,8 +176,10 @@ Verify Container Balancer for RATIS containers ${datanodeOzoneUsedBytesInfoAfterContainerBalancing} = Get Datanode Ozone Used Bytes Info ${uuid} Should Not Be Equal As Integers ${datanodeOzoneUsedBytesInfo} ${datanodeOzoneUsedBytesInfoAfterContainerBalancing} - Should Be True ${datanodeOzoneUsedBytesInfoAfterContainerBalancing} < ${SIZE} * 3.5 - Should Be True ${datanodeOzoneUsedBytesInfoAfterContainerBalancing} > ${SIZE} * 3 + #We need to ensure that after balancing, the amount of data recorded on each datanode falls within the following ranges: + #{SIZE}*3 < used < {SIZE}*3.5 for RATIS containers, and {SIZE}*1.5 < used < {SIZE}*2.5 for EC containers. + Should Be True ${datanodeOzoneUsedBytesInfoAfterContainerBalancing} < ${SIZE} * ${UPPER_LIMIT} + Should Be True ${datanodeOzoneUsedBytesInfoAfterContainerBalancing} > ${SIZE} * ${LOWER_LIMIT} diff --git a/hadoop-ozone/dist/src/main/smoketest/debug/ozone-debug-ldb.robot b/hadoop-ozone/dist/src/main/smoketest/debug/ozone-debug-ldb.robot index e006e154af1b..7157c232d1a9 100644 --- a/hadoop-ozone/dist/src/main/smoketest/debug/ozone-debug-ldb.robot +++ b/hadoop-ozone/dist/src/main/smoketest/debug/ozone-debug-ldb.robot @@ -32,10 +32,12 @@ Write keys Run Keyword if '${SECURITY_ENABLED}' == 'true' Kinit test user testuser testuser.keytab Execute ozone sh volume create ${VOLUME} Execute ozone sh bucket create ${VOLUME}/${BUCKET} -l OBJECT_STORE - Execute dd if=/dev/urandom of=${TEMP_DIR}/${TESTFILE} bs=100000 count=15 - Execute ozone sh key put ${VOLUME}/${BUCKET}/${TESTFILE}1 ${TEMP_DIR}/${TESTFILE} - Execute ozone sh key put ${VOLUME}/${BUCKET}/${TESTFILE}2 ${TEMP_DIR}/${TESTFILE} - Execute ozone sh key put ${VOLUME}/${BUCKET}/${TESTFILE}3 ${TEMP_DIR}/${TESTFILE} + Execute dd if=/dev/urandom of=${TEMP_DIR}/${TESTFILE}1 bs=100 count=10 + Execute ozone sh key put ${VOLUME}/${BUCKET}/${TESTFILE}1 ${TEMP_DIR}/${TESTFILE}1 + Execute dd if=/dev/urandom of=${TEMP_DIR}/${TESTFILE}2 bs=100 count=15 + Execute ozone sh key put ${VOLUME}/${BUCKET}/${TESTFILE}2 ${TEMP_DIR}/${TESTFILE}2 + Execute dd if=/dev/urandom of=${TEMP_DIR}/${TESTFILE}3 bs=100 count=20 + Execute ozone sh key put ${VOLUME}/${BUCKET}/${TESTFILE}3 ${TEMP_DIR}/${TESTFILE}3 Execute ozone sh key addacl -a user:systest:a ${VOLUME}/${BUCKET}/${TESTFILE}3 *** Test Cases *** @@ -71,6 +73,8 @@ Test ozone debug ldb scan Should not contain ${output} objectID Should not contain ${output} dataSize Should not contain ${output} keyLocationVersions + +Test ozone debug ldb scan with filter option success # test filter option with one filter ${output} = Execute ozone debug ldb --db=/data/metadata/om.db scan --cf=keyTable --filter="keyName:equals:testfile2" Should not contain ${output} testfile1 @@ -91,3 +95,42 @@ Test ozone debug ldb scan Should not contain ${output} testfile1 Should not contain ${output} testfile2 Should not contain ${output} testfile3 + # test filter option for size > 1200 + ${output} = Execute ozone debug ldb --db=/data/metadata/om.db scan --cf=keyTable --filter="dataSize:greater:1200" + Should not contain ${output} testfile1 + Should contain ${output} testfile2 + Should contain ${output} testfile3 + # test filter option for size < 1200 + ${output} = Execute ozone debug ldb --db=/data/metadata/om.db scan --cf=keyTable --filter="dataSize:lesser:1200" + Should contain ${output} testfile1 + Should not contain ${output} testfile2 + Should not contain ${output} testfile3 + # test filter option with no records match both filters + ${output} = Execute ozone debug ldb --db=/data/metadata/om.db scan --cf=keyTable --filter="dataSize:lesser:1200,keyName:equals:testfile2" + Should not contain ${output} testfile1 + Should not contain ${output} testfile2 + Should not contain ${output} testfile3 + # test filter option with regex matching numbers + ${output} = Execute ozone debug ldb --db=/data/metadata/om.db scan --cf=keyTable --filter="dataSize:regex:^1[0-2]{3}$" + Should contain ${output} testfile1 + Should not contain ${output} testfile2 + Should not contain ${output} testfile3 + # test filter option with regex matching string + ${output} = Execute ozone debug ldb --db=/data/metadata/om.db scan --cf=keyTable --filter="keyName:regex:^test.*[0-1]$" + Should contain ${output} testfile1 + Should not contain ${output} testfile2 + Should not contain ${output} testfile3 + +Test ozone debug ldb scan with filter option failure + # test filter option with invalid operator + ${output} = Execute ozone debug ldb --db=/data/metadata/om.db scan --cf=keyTable --filter="dataSize:lesserthan:1200" + Should contain ${output} Error: Invalid operator + # test filter option with invalid format + ${output} = Execute And Ignore Error ozone debug ldb --db=/data/metadata/om.db scan --cf=keyTable --filter="dataSize:1200" + Should contain ${output} Error: Invalid format + # test filter option with invalid field + ${output} = Execute And Ignore Error ozone debug ldb --db=/data/metadata/om.db scan --cf=keyTable --filter="size:equals:1200" + Should contain ${output} Error: Invalid field + # test filter option for lesser/greater operator on non-numeric field + ${output} = Execute And Ignore Error ozone debug ldb --db=/data/metadata/om.db scan --cf=keyTable --filter="keyName:lesser:k1" + Should contain ${output} only on numeric values diff --git a/hadoop-ozone/dist/src/main/smoketest/omha/om-roles.robot b/hadoop-ozone/dist/src/main/smoketest/omha/om-roles.robot index 54e44bce36b2..3513ec12de16 100644 --- a/hadoop-ozone/dist/src/main/smoketest/omha/om-roles.robot +++ b/hadoop-ozone/dist/src/main/smoketest/omha/om-roles.robot @@ -28,6 +28,9 @@ Assert Leader Present in JSON [Arguments] ${output} ${leader} = Execute echo '${output}' | jq '.[] | select(.[] | .serverRole == "LEADER")' Should Not Be Equal ${leader} ${EMPTY} +Assert Leader Present in TABLE + [Arguments] ${output} + Should Match Regexp ${output} \\|.*LEADER.* *** Test Cases *** List om roles with OM service ID passed @@ -53,3 +56,15 @@ List om roles as JSON without OM service ID passed Assert Leader Present in JSON ${output_without_id_passed} ${output_without_id_passed} = Execute And Ignore Error ozone admin --set=ozone.om.service.ids=omservice,omservice2 om roles --json Should Contain ${output_without_id_passed} no Ozone Manager service ID specified + +List om roles as TABLE with OM service ID passed + ${output_with_id_passed} = Execute ozone admin om roles --service-id=omservice --table + Assert Leader Present in TABLE ${output_with_id_passed} + ${output_with_id_passed} = Execute ozone admin --set=ozone.om.service.ids=omservice,omservice2 om roles --service-id=omservice --table + Assert Leader Present in TABLE ${output_with_id_passed} + +List om roles as TABLE without OM service ID passed + ${output_without_id_passed} = Execute ozone admin om roles --table + Assert Leader Present in TABLE ${output_without_id_passed} + ${output_without_id_passed} = Execute And Ignore Error ozone admin --set=ozone.om.service.ids=omservice,omservice2 om roles --table + Should Contain ${output_without_id_passed} no Ozone Manager service ID specified \ No newline at end of file diff --git a/hadoop-ozone/dist/src/main/smoketest/s3/MultipartUpload.robot b/hadoop-ozone/dist/src/main/smoketest/s3/MultipartUpload.robot index d62a217e606a..e630fe6cdaec 100644 --- a/hadoop-ozone/dist/src/main/smoketest/s3/MultipartUpload.robot +++ b/hadoop-ozone/dist/src/main/smoketest/s3/MultipartUpload.robot @@ -20,6 +20,7 @@ Library String Library DateTime Resource ../commonlib.robot Resource commonawslib.robot +Resource mpu_lib.robot Test Timeout 5 minutes Suite Setup Setup Multipart Tests Suite Teardown Teardown Multipart Tests @@ -61,17 +62,8 @@ Test Multipart Upload With Adjusted Length Verify Multipart Upload ${BUCKET} multipart/adjusted_length_${PREFIX} /tmp/part1 /tmp/part2 Test Multipart Upload - ${result} = Execute AWSS3APICli create-multipart-upload --bucket ${BUCKET} --key ${PREFIX}/multipartKey - ${uploadID} = Execute and checkrc echo '${result}' | jq -r '.UploadId' 0 - Should contain ${result} ${BUCKET} - Should contain ${result} ${PREFIX}/multipartKey - Should contain ${result} UploadId -# initiate again - ${result} = Execute AWSS3APICli create-multipart-upload --bucket ${BUCKET} --key ${PREFIX}/multipartKey - ${nextUploadID} = Execute and checkrc echo '${result}' | jq -r '.UploadId' 0 - Should contain ${result} ${BUCKET} - Should contain ${result} ${PREFIX}/multipartKey - Should contain ${result} UploadId + ${uploadID} = Initiate MPU ${BUCKET} ${PREFIX}/multipartKey + ${nextUploadID} = Initiate MPU ${BUCKET} ${PREFIX}/multipartKey Should Not Be Equal ${uploadID} ${nextUploadID} # upload part @@ -79,33 +71,15 @@ Test Multipart Upload # upload we get error entity too small. So, considering further complete # multipart upload, uploading each part as 5MB file, exception is for last part - ${result} = Execute AWSS3APICli upload-part --bucket ${BUCKET} --key ${PREFIX}/multipartKey --part-number 1 --body /tmp/part1 --upload-id ${nextUploadID} - Should contain ${result} ETag -# override part - ${result} = Execute AWSS3APICli upload-part --bucket ${BUCKET} --key ${PREFIX}/multipartKey --part-number 1 --body /tmp/part1 --upload-id ${nextUploadID} - Should contain ${result} ETag + Upload MPU part ${BUCKET} ${PREFIX}/multipartKey ${nextUploadID} 1 /tmp/part1 + Upload MPU part ${BUCKET} ${PREFIX}/multipartKey ${nextUploadID} 1 /tmp/part1 Test Multipart Upload Complete - ${result} = Execute AWSS3APICli create-multipart-upload --bucket ${BUCKET} --key ${PREFIX}/multipartKey1 --metadata="custom-key1=custom-value1,custom-key2=custom-value2,gdprEnabled=true" --tagging="tag-key1=tag-value1&tag-key2=tag-value2" - ${uploadID} = Execute and checkrc echo '${result}' | jq -r '.UploadId' 0 - Should contain ${result} ${BUCKET} - Should contain ${result} ${PREFIX}/multipartKey - Should contain ${result} UploadId + ${uploadID} = Initiate MPU ${BUCKET} ${PREFIX}/multipartKey1 0 --metadata="custom-key1=custom-value1,custom-key2=custom-value2,gdprEnabled=true" --tagging="tag-key1=tag-value1&tag-key2=tag-value2" -#upload parts - ${result} = Execute AWSS3APICli upload-part --bucket ${BUCKET} --key ${PREFIX}/multipartKey1 --part-number 1 --body /tmp/part1 --upload-id ${uploadID} - ${eTag1} = Execute and checkrc echo '${result}' | jq -r '.ETag' 0 - Should contain ${result} ETag - ${part1Md5Sum} = Execute md5sum /tmp/part1 | awk '{print $1}' - Should Be Equal As Strings ${eTag1} ${part1Md5Sum} - - Execute echo "Part2" > /tmp/part2 - ${result} = Execute AWSS3APICli upload-part --bucket ${BUCKET} --key ${PREFIX}/multipartKey1 --part-number 2 --body /tmp/part2 --upload-id ${uploadID} - ${eTag2} = Execute and checkrc echo '${result}' | jq -r '.ETag' 0 - Should contain ${result} ETag - ${part2Md5Sum} = Execute md5sum /tmp/part2 | awk '{print $1}' - Should Be Equal As Strings ${eTag2} ${part2Md5Sum} + ${eTag1} = Upload MPU part ${BUCKET} ${PREFIX}/multipartKey1 ${uploadID} 1 /tmp/part1 + ${eTag2} = Upload MPU part ${BUCKET} ${PREFIX}/multipartKey1 ${uploadID} 2 /tmp/part2 #complete multipart upload without any parts ${result} = Execute AWSS3APICli and checkrc complete-multipart-upload --upload-id ${uploadID} --bucket ${BUCKET} --key ${PREFIX}/multipartKey1 255 @@ -113,12 +87,8 @@ Test Multipart Upload Complete Should contain ${result} must specify at least one part #complete multipart upload - ${result} = Execute AWSS3APICli complete-multipart-upload --upload-id ${uploadID} --bucket ${BUCKET} --key ${PREFIX}/multipartKey1 --multipart-upload 'Parts=[{ETag=${eTag1},PartNumber=1},{ETag=${eTag2},PartNumber=2}]' - Should contain ${result} ${BUCKET} - Should contain ${result} ${PREFIX}/multipartKey1 - ${resultETag} = Execute and checkrc echo '${result}' | jq -r '.ETag' 0 + ${resultETag} = Complete MPU ${BUCKET} ${PREFIX}/multipartKey1 ${uploadID} {ETag=${eTag1},PartNumber=1},{ETag=${eTag2},PartNumber=2} ${expectedResultETag} = Execute echo -n ${eTag1}${eTag2} | md5sum | awk '{print $1}' - Should contain ${result} ETag Should Be Equal As Strings ${resultETag} "${expectedResultETag}-2" #check whether the user defined metadata and parts count can be retrieved @@ -163,116 +133,69 @@ Test Multipart Upload Complete Test Multipart Upload with user defined metadata size larger than 2 KB ${custom_metadata_value} = Generate Random String 3000 - ${result} = Execute AWSS3APICli and checkrc create-multipart-upload --bucket ${BUCKET} --key ${PREFIX}/mpuWithLargeMetadata --metadata="custom-key1=${custom_metadata_value}" 255 + ${result} = Initiate MPU ${BUCKET} ${PREFIX}/mpuWithLargeMetadata 255 --metadata="custom-key1=${custom_metadata_value}" Should contain ${result} MetadataTooLarge Should not contain ${result} custom-key1: ${custom_metadata_value} Test Multipart Upload Complete Entity too small - ${result} = Execute AWSS3APICli create-multipart-upload --bucket ${BUCKET} --key ${PREFIX}/multipartKey2 - ${uploadID} = Execute and checkrc echo '${result}' | jq -r '.UploadId' 0 - Should contain ${result} ${BUCKET} - Should contain ${result} ${PREFIX}/multipartKey - Should contain ${result} UploadId - -#upload parts - ${result} = Execute AWSS3APICli upload-part --bucket ${BUCKET} --key ${PREFIX}/multipartKey2 --part-number 1 --body /tmp/10kb --upload-id ${uploadID} - ${eTag1} = Execute and checkrc echo '${result}' | jq -r '.ETag' 0 - Should contain ${result} ETag - - ${result} = Execute AWSS3APICli upload-part --bucket ${BUCKET} --key ${PREFIX}/multipartKey2 --part-number 2 --body /tmp/10kb --upload-id ${uploadID} - ${eTag2} = Execute and checkrc echo '${result}' | jq -r '.ETag' 0 - Should contain ${result} ETag - -#complete multipart upload - ${result} = Execute AWSS3APICli and checkrc complete-multipart-upload --upload-id ${uploadID} --bucket ${BUCKET} --key ${PREFIX}/multipartKey2 --multipart-upload 'Parts=[{ETag=${eTag1},PartNumber=1},{ETag=${eTag2},PartNumber=2}]' 255 + ${uploadID} = Initiate MPU ${BUCKET} ${PREFIX}/multipartKey2 + ${parts} = Upload MPU parts ${BUCKET} ${PREFIX}/multipartKey2 ${uploadID} /tmp/10kb /tmp/10kb + ${result} = Complete MPU ${BUCKET} ${PREFIX}/multipartKey2 ${uploadID} ${parts} 255 Should contain ${result} EntityTooSmall Test Multipart Upload Complete Invalid part errors and complete mpu with few parts - ${result} = Execute AWSS3APICli create-multipart-upload --bucket ${BUCKET} --key ${PREFIX}/multipartKey3 - ${uploadID} = Execute and checkrc echo '${result}' | jq -r '.UploadId' 0 - Should contain ${result} ${BUCKET} - Should contain ${result} ${PREFIX}/multipartKey - Should contain ${result} UploadId + ${uploadID} = Initiate MPU ${BUCKET} ${PREFIX}/multipartKey3 #complete multipart upload when no parts uploaded - ${result} = Execute AWSS3APICli and checkrc complete-multipart-upload --upload-id ${uploadID} --bucket ${BUCKET} --key ${PREFIX}/multipartKey3 --multipart-upload 'Parts=[{ETag=etag1,PartNumber=1},{ETag=etag2,PartNumber=2}]' 255 + ${result} = Complete MPU ${BUCKET} ${PREFIX}/multipartKey3 ${uploadID} {ETag=etag1,PartNumber=1},{ETag=etag2,PartNumber=2} 255 Should contain ${result} InvalidPart - ${result} = Execute AWSS3APICli and checkrc complete-multipart-upload --upload-id ${uploadID} --bucket ${BUCKET} --key ${PREFIX}/multipartKey3 --multipart-upload 'Parts=[{ETag=etag1,PartNumber=2},{ETag=etag2,PartNumber=1}]' 255 + ${result} = Complete MPU ${BUCKET} ${PREFIX}/multipartKey3 ${uploadID} {ETag=etag1,PartNumber=2},{ETag=etag2,PartNumber=1} 255 Should contain ${result} InvalidPart #upload parts - ${result} = Execute AWSS3APICli upload-part --bucket ${BUCKET} --key ${PREFIX}/multipartKey3 --part-number 1 --body /tmp/part1 --upload-id ${uploadID} - ${eTag1} = Execute and checkrc echo '${result}' | jq -r '.ETag' 0 - Should contain ${result} ETag - - - ${result} = Execute AWSS3APICli upload-part --bucket ${BUCKET} --key ${PREFIX}/multipartKey3 --part-number 2 --body /tmp/part1 --upload-id ${uploadID} - ${eTag2} = Execute and checkrc echo '${result}' | jq -r '.ETag' 0 - Should contain ${result} ETag - - Execute echo "Part3" > /tmp/part3 - ${result} = Execute AWSS3APICli upload-part --bucket ${BUCKET} --key ${PREFIX}/multipartKey3 --part-number 3 --body /tmp/part3 --upload-id ${uploadID} - ${eTag3} = Execute and checkrc echo '${result}' | jq -r '.ETag' 0 - Should contain ${result} ETag + ${eTag1} = Upload MPU part ${BUCKET} ${PREFIX}/multipartKey3 ${uploadID} 1 /tmp/part1 + ${eTag2} = Upload MPU part ${BUCKET} ${PREFIX}/multipartKey3 ${uploadID} 2 /tmp/part1 + ${eTag3} = Upload MPU part ${BUCKET} ${PREFIX}/multipartKey3 ${uploadID} 3 /tmp/part2 #complete multipart upload - ${result} = Execute AWSS3APICli and checkrc complete-multipart-upload --upload-id ${uploadID} --bucket ${BUCKET} --key ${PREFIX}/multipartKey3 --multipart-upload 'Parts=[{ETag=etag1,PartNumber=1},{ETag=etag2,PartNumber=2}]' 255 + ${result} = Complete MPU ${BUCKET} ${PREFIX}/multipartKey3 ${uploadID} {ETag=etag1,PartNumber=1},{ETag=etag2,PartNumber=2} 255 Should contain ${result} InvalidPart - ${result} = Execute AWSS3APICli and checkrc complete-multipart-upload --upload-id ${uploadID} --bucket ${BUCKET} --key ${PREFIX}/multipartKey3 --multipart-upload 'Parts=[{ETag=${eTag1},PartNumber=1},{ETag=etag2,PartNumber=2}]' 255 + ${result} = Complete MPU ${BUCKET} ${PREFIX}/multipartKey3 ${uploadID} {ETag=${eTag1},PartNumber=1},{ETag=etag2,PartNumber=2} 255 Should contain ${result} InvalidPart - ${result} = Execute AWSS3APICli and checkrc complete-multipart-upload --upload-id ${uploadID} --bucket ${BUCKET} --key ${PREFIX}/multipartKey3 --multipart-upload 'Parts=[{ETag=${eTag1},PartNumber=4},{ETag=etag2,PartNumber=2}]' 255 + ${result} = Complete MPU ${BUCKET} ${PREFIX}/multipartKey3 ${uploadID} {ETag=${eTag1},PartNumber=4},{ETag=etag2,PartNumber=2} 255 Should contain ${result} InvalidPartOrder #complete multipart upload(merge with few parts) - ${result} = Execute AWSS3APICli complete-multipart-upload --upload-id ${uploadID} --bucket ${BUCKET} --key ${PREFIX}/multipartKey3 --multipart-upload 'Parts=[{ETag=${eTag1},PartNumber=1},{ETag=${eTag3},PartNumber=3}]' - Should contain ${result} ${BUCKET} - Should contain ${result} ${PREFIX}/multipartKey3 - Should contain ${result} ETag + ${result} = Complete MPU ${BUCKET} ${PREFIX}/multipartKey3 ${uploadID} {ETag=${eTag1},PartNumber=1},{ETag=${eTag3},PartNumber=3} ${result} = Execute AWSS3ApiCli get-object --bucket ${BUCKET} --key ${PREFIX}/multipartKey3 /tmp/${PREFIX}-multipartKey3.result - Execute cat /tmp/part1 /tmp/part3 > /tmp/${PREFIX}-multipartKey3 + Execute cat /tmp/part1 /tmp/part2 > /tmp/${PREFIX}-multipartKey3 Compare files /tmp/${PREFIX}-multipartKey3 /tmp/${PREFIX}-multipartKey3.result ${result} = Execute AWSS3ApiCli get-object --bucket ${BUCKET} --key ${PREFIX}/multipartKey3 --part-number 1 /tmp/${PREFIX}-multipartKey3-part1.result Compare files /tmp/part1 /tmp/${PREFIX}-multipartKey3-part1.result - ${result} = Execute AWSS3ApiCli get-object --bucket ${BUCKET} --key ${PREFIX}/multipartKey3 --part-number 3 /tmp/${PREFIX}-multipartKey3-part3.result - Compare files /tmp/part3 /tmp/${PREFIX}-multipartKey3-part3.result + ${result} = Execute AWSS3ApiCli get-object --bucket ${BUCKET} --key ${PREFIX}/multipartKey3 --part-number 3 /tmp/${PREFIX}-multipartKey3-part2.result + Compare files /tmp/part2 /tmp/${PREFIX}-multipartKey3-part2.result Test abort Multipart upload - ${result} = Execute AWSS3APICli create-multipart-upload --bucket ${BUCKET} --key ${PREFIX}/multipartKey4 --storage-class REDUCED_REDUNDANCY - ${uploadID} = Execute and checkrc echo '${result}' | jq -r '.UploadId' 0 - Should contain ${result} ${BUCKET} - Should contain ${result} ${PREFIX}/multipartKey - Should contain ${result} UploadId - - ${result} = Execute AWSS3APICli and checkrc abort-multipart-upload --bucket ${BUCKET} --key ${PREFIX}/multipartKey4 --upload-id ${uploadID} 0 + ${uploadID} = Initiate MPU ${BUCKET} ${PREFIX}/multipartKey4 0 --storage-class REDUCED_REDUNDANCY + ${result} = Abort MPU ${BUCKET} ${PREFIX}/multipartKey4 ${uploadID} 0 Test abort Multipart upload with invalid uploadId - ${result} = Execute AWSS3APICli and checkrc abort-multipart-upload --bucket ${BUCKET} --key ${PREFIX}/multipartKey5 --upload-id "random" 255 + ${result} = Abort MPU ${BUCKET} ${PREFIX}/multipartKey5 "random" 255 Upload part with Incorrect uploadID - ${result} = Execute AWSS3APICli create-multipart-upload --bucket ${BUCKET} --key ${PREFIX}/multipartKey - Execute echo "Multipart upload" > /tmp/testfile - ${result} = Execute AWSS3APICli and checkrc upload-part --bucket ${BUCKET} --key ${PREFIX}/multipartKey --part-number 1 --body /tmp/testfile --upload-id "random" 255 + ${uploadID} = Initiate MPU ${BUCKET} ${PREFIX}/multipartKey + ${result} = Upload MPU part ${BUCKET} ${PREFIX}/multipartKey "no-such-upload-id" 1 /tmp/10kb 255 Should contain ${result} NoSuchUpload Test list parts #initiate multipart upload - ${result} = Execute AWSS3APICli create-multipart-upload --bucket ${BUCKET} --key ${PREFIX}/multipartKey5 - ${uploadID} = Execute and checkrc echo '${result}' | jq -r '.UploadId' 0 - Should contain ${result} ${BUCKET} - Should contain ${result} ${PREFIX}/multipartKey - Should contain ${result} UploadId + ${uploadID} = Initiate MPU ${BUCKET} ${PREFIX}/multipartKey5 #upload parts - ${result} = Execute AWSS3APICli upload-part --bucket ${BUCKET} --key ${PREFIX}/multipartKey5 --part-number 1 --body /tmp/part1 --upload-id ${uploadID} - ${eTag1} = Execute and checkrc echo '${result}' | jq -r '.ETag' 0 - Should contain ${result} ETag - - Execute echo "Part2" > /tmp/part2 - ${result} = Execute AWSS3APICli upload-part --bucket ${BUCKET} --key ${PREFIX}/multipartKey5 --part-number 2 --body /tmp/part2 --upload-id ${uploadID} - ${eTag2} = Execute and checkrc echo '${result}' | jq -r '.ETag' 0 - Should contain ${result} ETag + ${eTag1} = Upload MPU part ${BUCKET} ${PREFIX}/multipartKey5 ${uploadID} 1 /tmp/part1 + ${eTag2} = Upload MPU part ${BUCKET} ${PREFIX}/multipartKey5 ${uploadID} 2 /tmp/part2 #list parts ${result} = Execute AWSS3APICli list-parts --bucket ${BUCKET} --key ${PREFIX}/multipartKey5 --upload-id ${uploadID} @@ -295,7 +218,7 @@ Test list parts Should contain ${result} STANDARD #finally abort it - ${result} = Execute AWSS3APICli and checkrc abort-multipart-upload --bucket ${BUCKET} --key ${PREFIX}/multipartKey5 --upload-id ${uploadID} 0 + ${result} = Abort MPU ${BUCKET} ${PREFIX}/multipartKey5 ${uploadID} 0 Test Multipart Upload with the simplified aws s3 cp API Execute AWSS3Cli cp /tmp/22mb s3://${BUCKET}/mpyawscli @@ -307,19 +230,14 @@ Test Multipart Upload Put With Copy ${result} = Execute AWSS3APICli put-object --bucket ${BUCKET} --key ${PREFIX}/copytest/source --body /tmp/part1 - ${result} = Execute AWSS3APICli create-multipart-upload --bucket ${BUCKET} --key ${PREFIX}/copytest/destination - - ${uploadID} = Execute and checkrc echo '${result}' | jq -r '.UploadId' 0 - Should contain ${result} ${BUCKET} - Should contain ${result} UploadId - + ${uploadID} = Initiate MPU ${BUCKET} ${PREFIX}/copytest/destination ${result} = Execute AWSS3APICli upload-part-copy --bucket ${BUCKET} --key ${PREFIX}/copytest/destination --upload-id ${uploadID} --part-number 1 --copy-source ${BUCKET}/${PREFIX}/copytest/source Should contain ${result} ETag Should contain ${result} LastModified ${eTag1} = Execute and checkrc echo '${result}' | jq -r '.CopyPartResult.ETag' 0 - Execute AWSS3APICli complete-multipart-upload --upload-id ${uploadID} --bucket ${BUCKET} --key ${PREFIX}/copytest/destination --multipart-upload 'Parts=[{ETag=${eTag1},PartNumber=1}]' + Complete MPU ${BUCKET} ${PREFIX}/copytest/destination ${uploadID} {ETag=${eTag1},PartNumber=1} Execute AWSS3APICli get-object --bucket ${BUCKET} --key ${PREFIX}/copytest/destination /tmp/part-result Compare files /tmp/part1 /tmp/part-result @@ -328,11 +246,7 @@ Test Multipart Upload Put With Copy and range ${result} = Execute AWSS3APICli put-object --bucket ${BUCKET} --key ${PREFIX}/copyrange/source --body /tmp/10mb - ${result} = Execute AWSS3APICli create-multipart-upload --bucket ${BUCKET} --key ${PREFIX}/copyrange/destination - - ${uploadID} = Execute and checkrc echo '${result}' | jq -r '.UploadId' 0 - Should contain ${result} ${BUCKET} - Should contain ${result} UploadId + ${uploadID} = Initiate MPU ${BUCKET} ${PREFIX}/copyrange/destination ${result} = Execute AWSS3APICli upload-part-copy --bucket ${BUCKET} --key ${PREFIX}/copyrange/destination --upload-id ${uploadID} --part-number 1 --copy-source ${BUCKET}/${PREFIX}/copyrange/source --copy-source-range bytes=0-10485757 Should contain ${result} ETag @@ -345,7 +259,7 @@ Test Multipart Upload Put With Copy and range ${eTag2} = Execute and checkrc echo '${result}' | jq -r '.CopyPartResult.ETag' 0 - Execute AWSS3APICli complete-multipart-upload --upload-id ${uploadID} --bucket ${BUCKET} --key ${PREFIX}/copyrange/destination --multipart-upload 'Parts=[{ETag=${eTag1},PartNumber=1},{ETag=${eTag2},PartNumber=2}]' + Complete MPU ${BUCKET} ${PREFIX}/copyrange/destination ${uploadID} {ETag=${eTag1},PartNumber=1},{ETag=${eTag2},PartNumber=2} Execute AWSS3APICli get-object --bucket ${BUCKET} --key ${PREFIX}/copyrange/destination /tmp/part-result Compare files /tmp/10mb /tmp/part-result @@ -357,11 +271,7 @@ Test Multipart Upload Put With Copy and range with IfModifiedSince ${result} = Execute AWSS3APICli put-object --bucket ${BUCKET} --key ${PREFIX}/copyrange/source --body /tmp/10mb - ${result} = Execute AWSS3APICli create-multipart-upload --bucket ${BUCKET} --key ${PREFIX}/copyrange/destination - - ${uploadID} = Execute and checkrc echo '${result}' | jq -r '.UploadId' 0 - Should contain ${result} ${BUCKET} - Should contain ${result} UploadId + ${uploadID} = Initiate MPU ${BUCKET} ${PREFIX}/copyrange/destination #calc time-to-sleep from time-last-modified plus a few seconds ${result} = Execute AWSS3APICli head-object --bucket ${BUCKET} --key ${PREFIX}/copyrange/source @@ -396,24 +306,14 @@ Test Multipart Upload Put With Copy and range with IfModifiedSince ${eTag1} = Execute and checkrc echo '${result}' | jq -r '.CopyPartResult.ETag' 0 - - Execute AWSS3APICli complete-multipart-upload --upload-id ${uploadID} --bucket ${BUCKET} --key ${PREFIX}/copyrange/destination --multipart-upload 'Parts=[{ETag=${eTag1},PartNumber=1},{ETag=${eTag2},PartNumber=2}]' + Complete MPU ${BUCKET} ${PREFIX}/copyrange/destination ${uploadID} {ETag=${eTag1},PartNumber=1},{ETag=${eTag2},PartNumber=2} Execute AWSS3APICli get-object --bucket ${BUCKET} --key ${PREFIX}/copyrange/destination /tmp/part-result Compare files /tmp/10mb /tmp/part-result Test Multipart Upload list - ${result} = Execute AWSS3APICli create-multipart-upload --bucket ${BUCKET} --key ${PREFIX}/listtest/key1 - ${uploadID1} = Execute and checkrc echo '${result}' | jq -r '.UploadId' 0 - Should contain ${result} ${BUCKET} - Should contain ${result} ${PREFIX}/listtest/key1 - Should contain ${result} UploadId - - ${result} = Execute AWSS3APICli create-multipart-upload --bucket ${BUCKET} --key ${PREFIX}/listtest/key2 - ${uploadID2} = Execute and checkrc echo '${result}' | jq -r '.UploadId' 0 - Should contain ${result} ${BUCKET} - Should contain ${result} ${PREFIX}/listtest/key2 - Should contain ${result} UploadId + ${uploadID1} = Initiate MPU ${BUCKET} ${PREFIX}/listtest/key1 + ${uploadID2} = Initiate MPU ${BUCKET} ${PREFIX}/listtest/key2 ${result} = Execute AWSS3APICli list-multipart-uploads --bucket ${BUCKET} --prefix ${PREFIX}/listtest Should contain ${result} ${uploadID1} diff --git a/hadoop-ozone/dist/src/main/smoketest/s3/bucketdelete.robot b/hadoop-ozone/dist/src/main/smoketest/s3/bucketdelete.robot index 607a7dee9600..a382970a6dec 100644 --- a/hadoop-ozone/dist/src/main/smoketest/s3/bucketdelete.robot +++ b/hadoop-ozone/dist/src/main/smoketest/s3/bucketdelete.robot @@ -19,6 +19,7 @@ Library OperatingSystem Library String Resource ../commonlib.robot Resource commonawslib.robot +Resource mpu_lib.robot Test Timeout 5 minutes Suite Setup Setup s3 tests @@ -48,17 +49,13 @@ Delete bucket with incomplete multipart uploads [tags] no-bucket-type ${bucket} = Create bucket - # initiate incomplete multipart uploads (multipart upload is initiated but not completed/aborted) - ${initiate_result} = Execute AWSS3APICli create-multipart-upload --bucket ${bucket} --key incomplete-multipartkey - ${uploadID} = Execute echo '${initiate_result}' | jq -r '.UploadId' - Should contain ${initiate_result} ${bucket} - Should contain ${initiate_result} incomplete-multipartkey - Should contain ${initiate_result} UploadId + # initiate incomplete multipart upload (multipart upload is initiated but not completed/aborted) + ${uploadID} = Initiate MPU ${bucket} incomplete-multipartkey # bucket deletion should fail since there is still incomplete multipart upload ${delete_fail_result} = Execute AWSS3APICli and checkrc delete-bucket --bucket ${bucket} 255 Should contain ${delete_fail_result} BucketNotEmpty # after aborting the multipart upload, the bucket deletion should succeed - ${abort_result} = Execute AWSS3APICli and checkrc abort-multipart-upload --bucket ${bucket} --key incomplete-multipartkey --upload-id ${uploadID} 0 - ${delete_result} = Execute AWSS3APICli and checkrc delete-bucket --bucket ${bucket} 0 \ No newline at end of file + ${abort_result} = Abort MPU ${bucket} incomplete-multipartkey ${uploadID} + ${delete_result} = Execute AWSS3APICli and checkrc delete-bucket --bucket ${bucket} 0 diff --git a/hadoop-ozone/dist/src/main/smoketest/s3/commonawslib.robot b/hadoop-ozone/dist/src/main/smoketest/s3/commonawslib.robot index 45dee9270bd8..44ad919555de 100644 --- a/hadoop-ozone/dist/src/main/smoketest/s3/commonawslib.robot +++ b/hadoop-ozone/dist/src/main/smoketest/s3/commonawslib.robot @@ -172,33 +172,6 @@ Generate random prefix ${random} = Generate Ozone String Set Global Variable ${PREFIX} ${random} -Perform Multipart Upload - [arguments] ${bucket} ${key} @{files} - - ${result} = Execute AWSS3APICli create-multipart-upload --bucket ${bucket} --key ${key} - ${upload_id} = Execute and checkrc echo '${result}' | jq -r '.UploadId' 0 - - @{etags} = Create List - FOR ${i} ${file} IN ENUMERATE @{files} - ${part} = Evaluate ${i} + 1 - ${result} = Execute AWSS3APICli upload-part --bucket ${bucket} --key ${key} --part-number ${part} --body ${file} --upload-id ${upload_id} - ${etag} = Execute echo '${result}' | jq -r '.ETag' - Append To List ${etags} {ETag=${etag},PartNumber=${part}} - END - - ${parts} = Catenate SEPARATOR=, @{etags} - Execute AWSS3APICli complete-multipart-upload --bucket ${bucket} --key ${key} --upload-id ${upload_id} --multipart-upload 'Parts=[${parts}]' - -Verify Multipart Upload - [arguments] ${bucket} ${key} @{files} - - ${random} = Generate Ozone String - - Execute AWSS3APICli get-object --bucket ${bucket} --key ${key} /tmp/verify${random} - ${tmp} = Catenate @{files} - Execute cat ${tmp} > /tmp/original${random} - Compare files /tmp/original${random} /tmp/verify${random} - Revoke S3 secrets Execute and Ignore Error ozone s3 revokesecret -y Execute and Ignore Error ozone s3 revokesecret -y -u testuser diff --git a/hadoop-ozone/dist/src/main/smoketest/s3/mpu_lib.robot b/hadoop-ozone/dist/src/main/smoketest/s3/mpu_lib.robot new file mode 100644 index 000000000000..0aaa0affec1d --- /dev/null +++ b/hadoop-ozone/dist/src/main/smoketest/s3/mpu_lib.robot @@ -0,0 +1,105 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +*** Settings *** +Documentation Keywords for Multipart Upload +Library OperatingSystem +Library String +Resource commonawslib.robot + +*** Keywords *** + +Initiate MPU + [arguments] ${bucket} ${key} ${expected_rc}=0 ${opts}=${EMPTY} + + ${result} = Execute AWSS3APICli and checkrc create-multipart-upload --bucket ${bucket} --key ${key} ${opts} ${expected_rc} + IF '${expected_rc}' == '0' + Should contain ${result} ${bucket} + Should contain ${result} ${key} + ${upload_id} = Execute and checkrc echo '${result}' | jq -r '.UploadId' 0 + RETURN ${upload_id} + ELSE + RETURN ${result} + END + + +Upload MPU part + [arguments] ${bucket} ${key} ${upload_id} ${part} ${file} ${expected_rc}=0 + + ${result} = Execute AWSS3APICli and checkrc upload-part --bucket ${bucket} --key ${key} --part-number ${part} --body ${file} --upload-id ${upload_id} ${expected_rc} + IF '${expected_rc}' == '0' + Should contain ${result} ETag + ${etag} = Execute echo '${result}' | jq -r '.ETag' + ${md5sum} = Execute md5sum ${file} | awk '{print $1}' + Should Be Equal As Strings ${etag} ${md5sum} + RETURN ${etag} + ELSE + RETURN ${result} + END + + +Complete MPU + [arguments] ${bucket} ${key} ${upload_id} ${parts} ${expected_rc}=0 + + ${result} = Execute AWSS3APICli and checkrc complete-multipart-upload --bucket ${bucket} --key ${key} --upload-id ${upload_id} --multipart-upload 'Parts=[${parts}]' ${expected_rc} + IF '${expected_rc}' == '0' + Should contain ${result} ${bucket} + Should contain ${result} ${key} + Should contain ${result} ETag + ${etag} = Execute echo '${result}' | jq -r '.ETag' + RETURN ${etag} + ELSE + RETURN ${result} + END + + +Abort MPU + [arguments] ${bucket} ${key} ${upload_id} ${expected_rc}=0 + + ${result} = Execute AWSS3APICli and checkrc abort-multipart-upload --bucket ${bucket} --key ${key} --upload-id ${upload_id} ${expected_rc} + + +Upload MPU parts + [arguments] ${bucket} ${key} ${upload_id} @{files} + + @{etags} = Create List + FOR ${i} ${file} IN ENUMERATE @{files} + ${part} = Evaluate ${i} + 1 + ${etag} = Upload MPU part ${bucket} ${key} ${upload_id} ${part} ${file} + Append To List ${etags} {ETag=${etag},PartNumber=${part}} + END + ${parts} = Catenate SEPARATOR=, @{etags} + + RETURN ${parts} + + +Perform Multipart Upload + [arguments] ${bucket} ${key} @{files} + + ${upload_id} = Initiate MPU ${bucket} ${key} + ${parts} = Upload MPU parts ${bucket} ${key} ${upload_id} @{files} + Complete MPU ${bucket} ${key} ${upload_id} ${parts} + + +Verify Multipart Upload + [arguments] ${bucket} ${key} @{files} + + ${random} = Generate Ozone String + + Execute AWSS3APICli get-object --bucket ${bucket} --key ${key} /tmp/verify${random} + ${tmp} = Catenate @{files} + Execute cat ${tmp} > /tmp/original${random} + Compare files /tmp/original${random} /tmp/verify${random} + diff --git a/hadoop-ozone/dist/src/shell/ozone/ozone b/hadoop-ozone/dist/src/shell/ozone/ozone index 22ceed9ed3c6..0d005b3bd785 100755 --- a/hadoop-ozone/dist/src/shell/ozone/ozone +++ b/hadoop-ozone/dist/src/shell/ozone/ozone @@ -61,6 +61,7 @@ function ozone_usage ozone_add_subcommand "debug" client "Ozone debug tool" ozone_add_subcommand "repair" client "Ozone repair tool" ozone_add_subcommand "checknative" client "checks if native libraries are loaded" + ozone_add_subcommand "ratis" client "Ozone ratis tool" ozone_generate_usage "${OZONE_SHELL_EXECNAME}" false } @@ -231,6 +232,10 @@ function ozonecmd_case OZONE_CLASSNAME=org.apache.hadoop.ozone.shell.checknative.CheckNative OZONE_RUN_ARTIFACT_NAME="ozone-tools" ;; + ratis) + OZONE_CLASSNAME=org.apache.hadoop.ozone.shell.OzoneRatis + OZONE_RUN_ARTIFACT_NAME="ozone-tools" + ;; *) OZONE_CLASSNAME="${subcmd}" if ! ozone_validate_classname "${OZONE_CLASSNAME}"; then diff --git a/hadoop-ozone/fault-injection-test/mini-chaos-tests/pom.xml b/hadoop-ozone/fault-injection-test/mini-chaos-tests/pom.xml index 4548459105fa..90961941a463 100644 --- a/hadoop-ozone/fault-injection-test/mini-chaos-tests/pom.xml +++ b/hadoop-ozone/fault-injection-test/mini-chaos-tests/pom.xml @@ -20,9 +20,9 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd"> ozone-fault-injection-test org.apache.ozone - 1.5.0-SNAPSHOT + 2.0.0-SNAPSHOT - 1.5.0-SNAPSHOT + 2.0.0-SNAPSHOT Apache Ozone Mini Ozone Chaos Tests Apache Ozone Mini Ozone Chaos Tests diff --git a/hadoop-ozone/fault-injection-test/network-tests/pom.xml b/hadoop-ozone/fault-injection-test/network-tests/pom.xml index 97d10cbf7615..358749117306 100644 --- a/hadoop-ozone/fault-injection-test/network-tests/pom.xml +++ b/hadoop-ozone/fault-injection-test/network-tests/pom.xml @@ -20,7 +20,7 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd"> org.apache.ozone ozone-fault-injection-test - 1.5.0-SNAPSHOT + 2.0.0-SNAPSHOT ozone-network-tests Apache Ozone Network Tests diff --git a/hadoop-ozone/fault-injection-test/pom.xml b/hadoop-ozone/fault-injection-test/pom.xml index 432faab48777..e62f7e47dc04 100644 --- a/hadoop-ozone/fault-injection-test/pom.xml +++ b/hadoop-ozone/fault-injection-test/pom.xml @@ -20,10 +20,10 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd"> org.apache.ozone ozone - 1.5.0-SNAPSHOT + 2.0.0-SNAPSHOT ozone-fault-injection-test - 1.5.0-SNAPSHOT + 2.0.0-SNAPSHOT Apache Ozone Fault Injection Tests Apache Ozone Fault Injection Tests pom diff --git a/hadoop-ozone/httpfsgateway/pom.xml b/hadoop-ozone/httpfsgateway/pom.xml index 7664643b153e..64eba036a5fa 100644 --- a/hadoop-ozone/httpfsgateway/pom.xml +++ b/hadoop-ozone/httpfsgateway/pom.xml @@ -22,10 +22,10 @@ org.apache.ozone ozone - 1.5.0-SNAPSHOT + 2.0.0-SNAPSHOT ozone-httpfsgateway - 1.5.0-SNAPSHOT + 2.0.0-SNAPSHOT jar Apache Ozone HttpFS diff --git a/hadoop-ozone/httpfsgateway/src/main/resources/httpfs.properties b/hadoop-ozone/httpfsgateway/src/main/resources/httpfs.properties index 164896e1f054..16d13de384a3 100644 --- a/hadoop-ozone/httpfsgateway/src/main/resources/httpfs.properties +++ b/hadoop-ozone/httpfsgateway/src/main/resources/httpfs.properties @@ -16,6 +16,3 @@ httpfs.version=${project.version} httpfs.source.repository=${httpfs.source.repository} httpfs.source.revision=${httpfs.source.revision} - -httpfs.build.username=${user.name} -httpfs.build.timestamp=${httpfs.build.timestamp} diff --git a/hadoop-ozone/insight/pom.xml b/hadoop-ozone/insight/pom.xml index bcfb1660244d..c448411c5f6b 100644 --- a/hadoop-ozone/insight/pom.xml +++ b/hadoop-ozone/insight/pom.xml @@ -20,10 +20,10 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> org.apache.ozone ozone - 1.5.0-SNAPSHOT + 2.0.0-SNAPSHOT ozone-insight - 1.5.0-SNAPSHOT + 2.0.0-SNAPSHOT Apache Ozone Insight Tool Apache Ozone Insight Tool jar diff --git a/hadoop-ozone/integration-test/pom.xml b/hadoop-ozone/integration-test/pom.xml index f66f64d2874f..45a3827a06b9 100644 --- a/hadoop-ozone/integration-test/pom.xml +++ b/hadoop-ozone/integration-test/pom.xml @@ -20,10 +20,10 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> org.apache.ozone ozone - 1.5.0-SNAPSHOT + 2.0.0-SNAPSHOT ozone-integration-test - 1.5.0-SNAPSHOT + 2.0.0-SNAPSHOT Apache Ozone Integration Tests Apache Ozone Integration Tests jar @@ -141,6 +141,10 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> org.slf4j * + + com.sun.jersey + jersey-servlet + @@ -161,6 +165,10 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> org.slf4j * + + com.sun.jersey + jersey-servlet + @@ -216,6 +224,10 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> org.slf4j * + + com.sun.jersey + jersey-servlet + diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/AbstractOzoneFileSystemTest.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/AbstractOzoneFileSystemTest.java index 69242d2b1f0e..e7c4cbee1d56 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/AbstractOzoneFileSystemTest.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/AbstractOzoneFileSystemTest.java @@ -409,7 +409,7 @@ public void testCreateWithInvalidPaths() throws Exception { } private void checkInvalidPath(Path path) { - InvalidPathException pathException = assertThrows( + InvalidPathException pathException = GenericTestUtils.assertThrows( InvalidPathException.class, () -> fs.create(path, false) ); assertThat(pathException.getMessage()).contains("Invalid path Name"); @@ -1831,12 +1831,14 @@ public void testLoopInLinkBuckets() throws Exception { String rootPath = String.format("%s://%s.%s/", OzoneConsts.OZONE_URI_SCHEME, linkBucket1Name, linksVolume); - try { - FileSystem.get(URI.create(rootPath), cluster.getConf()); - fail("Should throw Exception due to loop in Link Buckets"); + try (FileSystem fileSystem = FileSystem.get(URI.create(rootPath), + cluster.getConf())) { + fail("Should throw Exception due to loop in Link Buckets" + + " while initialising fs with URI " + fileSystem.getUri()); } catch (OMException oe) { // Expected exception - assertEquals(OMException.ResultCodes.DETECTED_LOOP_IN_BUCKET_LINKS, oe.getResult()); + assertEquals(OMException.ResultCodes.DETECTED_LOOP_IN_BUCKET_LINKS, + oe.getResult()); } finally { volume.deleteBucket(linkBucket1Name); volume.deleteBucket(linkBucket2Name); @@ -1854,13 +1856,17 @@ public void testLoopInLinkBuckets() throws Exception { String rootPath2 = String.format("%s://%s.%s/", OzoneConsts.OZONE_URI_SCHEME, danglingLinkBucketName, linksVolume); + FileSystem fileSystem = null; try { - FileSystem.get(URI.create(rootPath2), cluster.getConf()); + fileSystem = FileSystem.get(URI.create(rootPath2), cluster.getConf()); } catch (OMException oe) { // Expected exception fail("Should not throw Exception and show orphan buckets"); } finally { volume.deleteBucket(danglingLinkBucketName); + if (fileSystem != null) { + fileSystem.close(); + } } } @@ -2230,7 +2236,8 @@ void testFileSystemWithObjectStoreLayout() throws IOException { OzoneConfiguration config = new OzoneConfiguration(fs.getConf()); config.set(FS_DEFAULT_NAME_KEY, obsRootPath); - IllegalArgumentException e = assertThrows(IllegalArgumentException.class, () -> FileSystem.get(config)); + IllegalArgumentException e = GenericTestUtils.assertThrows(IllegalArgumentException.class, + () -> FileSystem.get(config)); assertThat(e.getMessage()).contains("OBJECT_STORE, which does not support file system semantics"); } } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/OzoneFileSystemTests.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/OzoneFileSystemTests.java index 47c584e048a6..67baea883574 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/OzoneFileSystemTests.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/OzoneFileSystemTests.java @@ -65,15 +65,17 @@ public static void listStatusIteratorOnPageSize(OzoneConfiguration conf, URI uri = FileSystem.getDefaultUri(config); config.setBoolean( String.format("fs.%s.impl.disable.cache", uri.getScheme()), true); - FileSystem subject = FileSystem.get(uri, config); - Path dir = new Path(Objects.requireNonNull(rootPath), "listStatusIterator"); - try { - Set paths = new TreeSet<>(); - for (int dirCount : dirCounts) { - listStatusIterator(subject, dir, paths, dirCount); + try (FileSystem subject = FileSystem.get(uri, config)) { + Path dir = new Path(Objects.requireNonNull(rootPath), + "listStatusIterator"); + try { + Set paths = new TreeSet<>(); + for (int dirCount : dirCounts) { + listStatusIterator(subject, dir, paths, dirCount); + } + } finally { + subject.delete(dir, true); } - } finally { - subject.delete(dir, true); } } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestDirectoryDeletingServiceWithFSO.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestDirectoryDeletingServiceWithFSO.java index 0abfb1336544..8d161dedeb33 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestDirectoryDeletingServiceWithFSO.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestDirectoryDeletingServiceWithFSO.java @@ -18,7 +18,11 @@ package org.apache.hadoop.fs.ozone; +import java.util.List; +import java.util.Random; import java.util.concurrent.CompletableFuture; + +import org.apache.commons.lang3.RandomStringUtils; import org.apache.hadoop.fs.CommonConfigurationKeysPublic; import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileStatus; @@ -32,10 +36,16 @@ import org.apache.hadoop.ozone.MiniOzoneCluster; import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.TestDataUtil; +import org.apache.hadoop.ozone.client.BucketArgs; +import org.apache.hadoop.ozone.client.ObjectStore; import org.apache.hadoop.ozone.client.OzoneBucket; import org.apache.hadoop.ozone.client.OzoneClient; import org.apache.hadoop.ozone.client.OzoneVolume; +import org.apache.hadoop.ozone.client.io.OzoneOutputStream; import org.apache.hadoop.ozone.om.OMMetadataManager; +import org.apache.hadoop.ozone.om.OmSnapshotManager; +import org.apache.hadoop.ozone.om.OzoneManager; +import org.apache.hadoop.ozone.om.helpers.OmKeyArgs; import org.apache.hadoop.ozone.om.helpers.OzoneFileStatus; import org.apache.hadoop.ozone.om.helpers.SnapshotInfo; import org.apache.hadoop.ozone.om.ratis.OzoneManagerDoubleBuffer; @@ -48,12 +58,16 @@ import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; import org.apache.ozone.test.GenericTestUtils; import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.Timeout; +import org.mockito.ArgumentMatchers; +import org.mockito.Mockito; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -64,6 +78,8 @@ import java.util.concurrent.atomic.AtomicLong; import java.util.function.LongSupplier; +import static java.nio.charset.StandardCharsets.UTF_8; +import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_SNAPSHOT_DELETING_SERVICE_INTERVAL; import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertDoesNotThrow; import static org.junit.jupiter.api.Assertions.assertEquals; @@ -72,6 +88,12 @@ import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ACL_ENABLED; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_BLOCK_DELETING_SERVICE_INTERVAL; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_FS_ITERATE_BATCH_SIZE; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.anyInt; +import static org.mockito.ArgumentMatchers.anyList; +import static org.mockito.ArgumentMatchers.anyLong; +import static org.mockito.ArgumentMatchers.eq; +import static org.mockito.Mockito.when; /** * Directory deletion service test cases. @@ -97,6 +119,7 @@ public static void init() throws Exception { conf.setInt(OMConfigKeys.OZONE_PATH_DELETING_LIMIT_PER_TASK, 5); conf.setTimeDuration(OZONE_BLOCK_DELETING_SERVICE_INTERVAL, 100, TimeUnit.MILLISECONDS); + conf.setTimeDuration(OZONE_SNAPSHOT_DELETING_SERVICE_INTERVAL, 1000, TimeUnit.MILLISECONDS); conf.setBoolean(OMConfigKeys.OZONE_OM_RATIS_ENABLE_KEY, omRatisEnabled); conf.setBoolean(OZONE_ACL_ENABLED, true); cluster = MiniOzoneCluster.newBuilder(conf) @@ -460,6 +483,123 @@ public void testDeleteFilesAndSubFiles() throws Exception { assertEquals(prevDeletedKeyCount + 5, currentDeletedKeyCount); } + private void createFileKey(OzoneBucket bucket, String key) + throws Exception { + byte[] value = RandomStringUtils.randomAscii(10240).getBytes(UTF_8); + OzoneOutputStream fileKey = bucket.createKey(key, value.length); + fileKey.write(value); + fileKey.close(); + } + + /* + * Create key d1/k1 + * Create snap1 + * Rename dir1 to dir2 + * Delete dir2 + * Wait for KeyDeletingService to start processing deleted key k2 + * Create snap2 by making the KeyDeletingService thread wait till snap2 is flushed + * Resume KeyDeletingService thread. + * Read d1 from snap1. + */ + @Test + public void testAOSKeyDeletingWithSnapshotCreateParallelExecution() + throws Exception { + OMMetadataManager omMetadataManager = cluster.getOzoneManager().getMetadataManager(); + Table snapshotInfoTable = omMetadataManager.getSnapshotInfoTable(); + Table deletedDirTable = omMetadataManager.getDeletedDirTable(); + Table renameTable = omMetadataManager.getSnapshotRenamedTable(); + cluster.getOzoneManager().getKeyManager().getSnapshotDeletingService().shutdown(); + DirectoryDeletingService dirDeletingService = cluster.getOzoneManager().getKeyManager().getDirDeletingService(); + // Suspend KeyDeletingService + dirDeletingService.suspend(); + GenericTestUtils.waitFor(() -> !dirDeletingService.isRunningOnAOS(), 1000, 10000); + Random random = new Random(); + final String testVolumeName = "volume" + random.nextInt(); + final String testBucketName = "bucket" + random.nextInt(); + // Create Volume and Buckets + ObjectStore store = client.getObjectStore(); + store.createVolume(testVolumeName); + OzoneVolume volume = store.getVolume(testVolumeName); + volume.createBucket(testBucketName, + BucketArgs.newBuilder().setBucketLayout(BucketLayout.FILE_SYSTEM_OPTIMIZED).build()); + OzoneBucket bucket = volume.getBucket(testBucketName); + + OzoneManager ozoneManager = Mockito.spy(cluster.getOzoneManager()); + OmSnapshotManager omSnapshotManager = Mockito.spy(ozoneManager.getOmSnapshotManager()); + when(ozoneManager.getOmSnapshotManager()).thenAnswer(i -> omSnapshotManager); + DirectoryDeletingService service = Mockito.spy(new DirectoryDeletingService(1000, TimeUnit.MILLISECONDS, 1000, + ozoneManager, + cluster.getConf())); + service.shutdown(); + final int initialSnapshotCount = + (int) cluster.getOzoneManager().getMetadataManager().countRowsInTable(snapshotInfoTable); + final int initialDeletedCount = (int) omMetadataManager.countRowsInTable(deletedDirTable); + final int initialRenameCount = (int) omMetadataManager.countRowsInTable(renameTable); + String snap1 = "snap1"; + String snap2 = "snap2"; + createFileKey(bucket, "dir1/key1"); + store.createSnapshot(testVolumeName, testBucketName, "snap1"); + bucket.renameKey("dir1", "dir2"); + OmKeyArgs omKeyArgs = new OmKeyArgs.Builder() + .setVolumeName(testVolumeName) + .setBucketName(testBucketName) + .setKeyName("dir2").build(); + long objectId = store.getClientProxy().getOzoneManagerClient().getKeyInfo(omKeyArgs, false) + .getKeyInfo().getObjectID(); + long volumeId = omMetadataManager.getVolumeId(testVolumeName); + long bucketId = omMetadataManager.getBucketId(testVolumeName, testBucketName); + String deletePathKey = omMetadataManager.getOzoneDeletePathKey(objectId, + omMetadataManager.getOzonePathKey(volumeId, + bucketId, bucketId, "dir2")); + bucket.deleteDirectory("dir2", true); + + + assertTableRowCount(deletedDirTable, initialDeletedCount + 1); + assertTableRowCount(renameTable, initialRenameCount + 1); + Mockito.doAnswer(i -> { + List purgePathRequestList = i.getArgument(5); + for (OzoneManagerProtocolProtos.PurgePathRequest purgeRequest : purgePathRequestList) { + Assertions.assertNotEquals(deletePathKey, purgeRequest.getDeletedDir()); + } + return i.callRealMethod(); + }).when(service).optimizeDirDeletesAndSubmitRequest(anyLong(), anyLong(), anyLong(), + anyLong(), anyList(), anyList(), eq(null), anyLong(), anyInt(), Mockito.any(), any()); + + Mockito.doAnswer(i -> { + store.createSnapshot(testVolumeName, testBucketName, snap2); + GenericTestUtils.waitFor(() -> { + try { + SnapshotInfo snapshotInfo = store.getClientProxy().getOzoneManagerClient() + .getSnapshotInfo(testVolumeName, testBucketName, snap2); + + return OmSnapshotManager.areSnapshotChangesFlushedToDB(cluster.getOzoneManager().getMetadataManager(), + snapshotInfo); + } catch (IOException e) { + throw new RuntimeException(e); + } + }, 1000, 100000); + GenericTestUtils.waitFor(() -> { + try { + return renameTable.get(omMetadataManager.getRenameKey(testVolumeName, testBucketName, objectId)) == null; + } catch (IOException e) { + throw new RuntimeException(e); + } + }, 1000, 10000); + return i.callRealMethod(); + }).when(omSnapshotManager).getSnapshot(ArgumentMatchers.eq(testVolumeName), ArgumentMatchers.eq(testBucketName), + ArgumentMatchers.eq(snap1)); + assertTableRowCount(snapshotInfoTable, initialSnapshotCount + 1); + service.runPeriodicalTaskNow(); + service.runPeriodicalTaskNow(); + assertTableRowCount(snapshotInfoTable, initialSnapshotCount + 2); + store.deleteSnapshot(testVolumeName, testBucketName, snap2); + service.runPeriodicalTaskNow(); + store.deleteSnapshot(testVolumeName, testBucketName, snap1); + cluster.restartOzoneManager(); + assertTableRowCount(cluster.getOzoneManager().getMetadataManager().getSnapshotInfoTable(), initialSnapshotCount); + dirDeletingService.resume(); + } + @Test public void testDirDeletedTableCleanUpForSnapshot() throws Exception { Table deletedDirTable = diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFSBucketLayout.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFSBucketLayout.java index f1cedf59c3a8..8e8cc63a7d91 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFSBucketLayout.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFSBucketLayout.java @@ -122,8 +122,16 @@ void teardown() throws IOException { void fileSystemWithUnsupportedDefaultBucketLayout(String layout) { OzoneConfiguration conf = configWithDefaultBucketLayout(layout); - OMException e = assertThrows(OMException.class, - () -> FileSystem.newInstance(conf)); + OMException e = assertThrows(OMException.class, () -> { + FileSystem fileSystem = null; + try { + fileSystem = FileSystem.newInstance(conf); + } finally { + if (fileSystem != null) { + fileSystem.close(); + } + } + }); assertThat(e.getMessage()) .contains(ERROR_MAP.get(layout)); } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileChecksum.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileChecksum.java index 649ed50a1020..7b5a95808050 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileChecksum.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileChecksum.java @@ -18,6 +18,7 @@ package org.apache.hadoop.fs.ozone; import com.google.common.collect.ImmutableList; +import org.apache.hadoop.conf.StorageUnit; import org.apache.hadoop.fs.CommonConfigurationKeysPublic; import org.apache.hadoop.fs.FileChecksum; import org.apache.hadoop.fs.FileSystem; @@ -39,6 +40,7 @@ import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Timeout; import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.Arguments; import org.junit.jupiter.params.provider.MethodSource; import java.io.IOException; @@ -53,10 +55,13 @@ import static java.nio.charset.StandardCharsets.UTF_8; import static org.apache.commons.lang3.RandomStringUtils.randomAlphabetic; +import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_CHUNK_SIZE_KEY; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_NETWORK_TOPOLOGY_AWARE_READ_KEY; +import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_SCM_BLOCK_SIZE; import static org.apache.hadoop.ozone.TestDataUtil.createBucket; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_ADDRESS_KEY; import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.params.provider.Arguments.arguments; /** * Test FileChecksum API. @@ -68,10 +73,16 @@ public class TestOzoneFileChecksum { true, false }; - private static final int[] DATA_SIZES = DoubleStream.of(0.5, 1, 1.5, 2, 7, 8) - .mapToInt(mb -> (int) (1024 * 1024 * mb)) + private static final int[] DATA_SIZES_1 = DoubleStream.of(0.5, 1, 1.5, 2, 2.5, 3, 3.5, 4, 5, 6, 7, 8, 9, 10) + .mapToInt(mb -> (int) (1024 * 1024 * mb) + 510000) .toArray(); + private static final int[] DATA_SIZES_2 = DoubleStream.of(0.5, 1, 1.5, 2, 2.5, 3, 3.5, 4, 5, 6, 7, 8, 9, 10) + .mapToInt(mb -> (int) (1024 * 1024 * mb) + 820000) + .toArray(); + + private int[] dataSizes = new int[DATA_SIZES_1.length + DATA_SIZES_2.length]; + private OzoneConfiguration conf; private MiniOzoneCluster cluster = null; private FileSystem fs; @@ -84,6 +95,8 @@ public class TestOzoneFileChecksum { void setup() throws IOException, InterruptedException, TimeoutException { conf = new OzoneConfiguration(); + conf.setStorageSize(OZONE_SCM_CHUNK_SIZE_KEY, 1024 * 1024, StorageUnit.BYTES); + conf.setStorageSize(OZONE_SCM_BLOCK_SIZE, 2 * 1024 * 1024, StorageUnit.BYTES); cluster = MiniOzoneCluster.newBuilder(conf) .setNumDatanodes(5) .build(); @@ -95,9 +108,8 @@ void setup() throws IOException, OzoneConsts.OZONE_OFS_URI_SCHEME); conf.setBoolean(disableCache, true); conf.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY, rootPath); - fs = FileSystem.get(conf); - ofs = (RootedOzoneFileSystem) fs; - adapter = (BasicRootedOzoneClientAdapterImpl) ofs.getAdapter(); + System.arraycopy(DATA_SIZES_1, 0, dataSizes, 0, DATA_SIZES_1.length); + System.arraycopy(DATA_SIZES_2, 0, dataSizes, DATA_SIZES_1.length, DATA_SIZES_2.length); } @AfterEach @@ -112,9 +124,13 @@ void teardown() { * Test EC checksum with Replicated checksum. */ @ParameterizedTest - @MethodSource("missingIndexes") - void testEcFileChecksum(List missingIndexes) throws IOException { + @MethodSource("missingIndexesAndChecksumSize") + void testEcFileChecksum(List missingIndexes, double checksumSizeInMB) throws IOException { + conf.setInt("ozone.client.bytes.per.checksum", (int) (checksumSizeInMB * 1024 * 1024)); + fs = FileSystem.get(conf); + ofs = (RootedOzoneFileSystem) fs; + adapter = (BasicRootedOzoneClientAdapterImpl) ofs.getAdapter(); String volumeName = UUID.randomUUID().toString(); String legacyBucket = UUID.randomUUID().toString(); String ecBucketName = UUID.randomUUID().toString(); @@ -139,7 +155,7 @@ void testEcFileChecksum(List missingIndexes) throws IOException { Map replicatedChecksums = new HashMap<>(); - for (int dataLen : DATA_SIZES) { + for (int dataLen : dataSizes) { byte[] data = randomAlphabetic(dataLen).getBytes(UTF_8); try (OutputStream file = adapter.createFile(volumeName + "/" @@ -170,7 +186,7 @@ void testEcFileChecksum(List missingIndexes) throws IOException { clientConf.setBoolean(OZONE_NETWORK_TOPOLOGY_AWARE_READ_KEY, topologyAware); try (FileSystem fsForRead = FileSystem.get(clientConf)) { - for (int dataLen : DATA_SIZES) { + for (int dataLen : dataSizes) { // Compute checksum after failed DNs Path parent = new Path("/" + volumeName + "/" + ecBucketName + "/"); Path ecKey = new Path(parent, "test" + dataLen); @@ -187,14 +203,13 @@ void testEcFileChecksum(List missingIndexes) throws IOException { } } - static Stream> missingIndexes() { + static Stream missingIndexesAndChecksumSize() { return Stream.of( - ImmutableList.of(0, 1), - ImmutableList.of(1, 2), - ImmutableList.of(2, 3), - ImmutableList.of(3, 4), - ImmutableList.of(0, 3), - ImmutableList.of(0, 4) - ); + arguments(ImmutableList.of(0, 1), 0.001), + arguments(ImmutableList.of(1, 2), 0.01), + arguments(ImmutableList.of(2, 3), 0.1), + arguments(ImmutableList.of(3, 4), 0.5), + arguments(ImmutableList.of(0, 3), 1), + arguments(ImmutableList.of(0, 4), 2)); } } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestCommitInRatis.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestCommitInRatis.java similarity index 96% rename from hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestCommitInRatis.java rename to hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestCommitInRatis.java index 4ff671df6163..7a1366ad682b 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestCommitInRatis.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestCommitInRatis.java @@ -15,17 +15,13 @@ * the License. */ -package org.apache.hadoop.ozone.client.rpc; +package org.apache.hadoop.hdds.scm; import org.apache.hadoop.hdds.conf.DatanodeRatisServerConfig; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.conf.StorageUnit; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.ratis.conf.RatisClientConfig; -import org.apache.hadoop.hdds.scm.XceiverClientManager; -import org.apache.hadoop.hdds.scm.XceiverClientRatis; -import org.apache.hadoop.hdds.scm.XceiverClientSpi; -import org.apache.hadoop.hdds.scm.XceiverClientReply; import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline; import org.apache.hadoop.hdds.scm.pipeline.Pipeline; import org.apache.hadoop.hdds.scm.protocolPB. diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestSCMInstallSnapshot.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestSCMInstallSnapshot.java index 400c4868a99e..e90c576e8dd4 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestSCMInstallSnapshot.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestSCMInstallSnapshot.java @@ -128,15 +128,14 @@ private DBCheckpoint downloadSnapshot() throws Exception { public void testInstallCheckPoint() throws Exception { DBCheckpoint checkpoint = downloadSnapshot(); StorageContainerManager scm = cluster.getStorageContainerManager(); - DBStore db = HAUtils - .loadDB(conf, checkpoint.getCheckpointLocation().getParent().toFile(), - checkpoint.getCheckpointLocation().getFileName().toString(), - new SCMDBDefinition()); + final Path location = checkpoint.getCheckpointLocation(); + final DBStore db = HAUtils.loadDB(conf, location.getParent().toFile(), + location.getFileName().toString(), SCMDBDefinition.get()); // Hack the transaction index in the checkpoint so as to ensure the // checkpointed transaction index is higher than when it was downloaded // from. assertNotNull(db); - HAUtils.getTransactionInfoTable(db, new SCMDBDefinition()) + HAUtils.getTransactionInfoTable(db, SCMDBDefinition.get()) .put(OzoneConsts.TRANSACTION_INFO_KEY, TransactionInfo.valueOf(10, 100)); db.close(); ContainerID cid = diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestSCMInstallSnapshotWithHA.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestSCMInstallSnapshotWithHA.java index 10492736144b..e55355525a6d 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestSCMInstallSnapshotWithHA.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestSCMInstallSnapshotWithHA.java @@ -224,9 +224,8 @@ public void testInstallCorruptedCheckpointFailure() throws Exception { DBCheckpoint leaderDbCheckpoint = leaderSCM.getScmMetadataStore().getStore() .getCheckpoint(false); Path leaderCheckpointLocation = leaderDbCheckpoint.getCheckpointLocation(); - TransactionInfo leaderCheckpointTrxnInfo = HAUtils - .getTrxnInfoFromCheckpoint(conf, leaderCheckpointLocation, - new SCMDBDefinition()); + final TransactionInfo leaderCheckpointTrxnInfo = HAUtils.getTrxnInfoFromCheckpoint( + conf, leaderCheckpointLocation, SCMDBDefinition.get()); assertNotNull(leaderCheckpointLocation); // Take a backup of the current DB diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestWatchForCommit.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestWatchForCommit.java similarity index 98% rename from hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestWatchForCommit.java rename to hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestWatchForCommit.java index bec14b23b0f0..4c950e7d725b 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestWatchForCommit.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestWatchForCommit.java @@ -15,7 +15,7 @@ * the License. */ -package org.apache.hadoop.ozone.client.rpc; +package org.apache.hadoop.hdds.scm; import java.io.IOException; import java.io.OutputStream; @@ -35,12 +35,6 @@ import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.ratis.conf.RatisClientConfig; -import org.apache.hadoop.hdds.scm.OzoneClientConfig; -import org.apache.hadoop.hdds.scm.XceiverClientRatis; -import org.apache.hadoop.hdds.scm.XceiverClientManager; -import org.apache.hadoop.hdds.scm.XceiverClientReply; -import org.apache.hadoop.hdds.scm.XceiverClientSpi; -import org.apache.hadoop.hdds.scm.ScmConfigKeys; import org.apache.hadoop.hdds.scm.client.HddsClientUtils; import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline; import org.apache.hadoop.hdds.scm.pipeline.Pipeline; diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneCluster.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneCluster.java index 9c76c0ec0c79..ff55ee83c176 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneCluster.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneCluster.java @@ -35,11 +35,14 @@ import org.apache.hadoop.ozone.client.OzoneClient; import org.apache.hadoop.ozone.om.OzoneManager; import org.apache.hadoop.ozone.recon.ReconServer; +import org.apache.hadoop.ozone.s3.Gateway; import org.apache.hadoop.security.authentication.client.AuthenticationException; import org.apache.ozone.test.GenericTestUtils; import org.apache.ratis.util.ExitUtils; import org.apache.ratis.util.function.CheckedFunction; +import com.amazonaws.services.s3.AmazonS3; + /** * Interface used for MiniOzoneClusters. */ @@ -142,10 +145,17 @@ void waitForPipelineTobeReady(HddsProtos.ReplicationFactor factor, /** * Returns a {@link ReconServer} instance. * - * @return List of {@link ReconServer} + * @return {@link ReconServer} instance if it is initialized, otherwise null. */ ReconServer getReconServer(); + /** + * Returns a {@link Gateway} instance. + * + * @return {@link Gateway} instance if it is initialized, otherwise null. + */ + Gateway getS3G(); + /** * Returns an {@link OzoneClient} to access the {@link MiniOzoneCluster}. * The caller is responsible for closing the client after use. @@ -154,6 +164,11 @@ void waitForPipelineTobeReady(HddsProtos.ReplicationFactor factor, */ OzoneClient newClient() throws IOException; + /** + * Returns an {@link AmazonS3} to access the {@link MiniOzoneCluster}. + */ + AmazonS3 newS3Client(); + /** * Returns StorageContainerLocationClient to communicate with * {@link StorageContainerManager} associated with the MiniOzoneCluster. @@ -219,6 +234,21 @@ void restartHddsDatanode(DatanodeDetails dn, boolean waitForDatanode) */ void stopRecon(); + /** + * Start S3G. + */ + void startS3G(); + + /** + * Restart S3G. + */ + void restartS3G(); + + /** + * Stop S3G. + */ + void stopS3G(); + /** * Shutdown the MiniOzoneCluster and delete the storage dirs. */ @@ -273,6 +303,7 @@ abstract class Builder { protected String omId = UUID.randomUUID().toString(); protected boolean includeRecon = false; + protected boolean includeS3G = false; protected int dnInitialVersion = DatanodeVersion.FUTURE_VERSION.toProtoValue(); protected int dnCurrentVersion = DatanodeVersion.COMBINED_PUTBLOCK_WRITECHUNK_RPC.toProtoValue(); @@ -382,6 +413,11 @@ public Builder includeRecon(boolean include) { return this; } + public Builder includeS3G(boolean include) { + this.includeS3G = include; + return this; + } + /** * Constructs and returns MiniOzoneCluster. * diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java index 50013b57f4c3..3594996856af 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java @@ -32,6 +32,14 @@ import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeoutException; +import com.amazonaws.ClientConfiguration; +import com.amazonaws.auth.AWSCredentialsProvider; +import com.amazonaws.auth.AWSStaticCredentialsProvider; +import com.amazonaws.auth.BasicAWSCredentials; +import com.amazonaws.client.builder.AwsClientBuilder; +import com.amazonaws.regions.Regions; +import com.amazonaws.services.s3.AmazonS3; +import com.amazonaws.services.s3.AmazonS3ClientBuilder; import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.hdds.DatanodeVersion; import org.apache.hadoop.hdds.HddsConfigKeys; @@ -58,6 +66,7 @@ import org.apache.hadoop.hdds.scm.server.StorageContainerManager; import org.apache.hadoop.hdds.security.symmetric.SecretKeyClient; import org.apache.hadoop.hdds.security.x509.certificate.client.CertificateClient; +import org.apache.hadoop.hdds.server.http.HttpConfig; import org.apache.hadoop.hdds.utils.IOUtils; import org.apache.hadoop.hdds.utils.db.CodecBuffer; import org.apache.hadoop.hdds.utils.db.CodecTestUtil; @@ -73,6 +82,10 @@ import org.apache.hadoop.ozone.om.OzoneManager; import org.apache.hadoop.ozone.recon.ConfigurationProvider; import org.apache.hadoop.ozone.recon.ReconServer; +import org.apache.hadoop.ozone.s3.Gateway; +import org.apache.hadoop.ozone.s3.OzoneClientCache; +import org.apache.hadoop.ozone.s3.OzoneConfigurationHolder; +import org.apache.hadoop.ozone.s3.S3GatewayConfigKeys; import org.apache.hadoop.security.authentication.client.AuthenticationException; import org.apache.ozone.test.GenericTestUtils; @@ -84,9 +97,14 @@ import static org.apache.hadoop.hdds.recon.ReconConfigKeys.OZONE_RECON_DATANODE_ADDRESS_KEY; import static org.apache.hadoop.hdds.recon.ReconConfigKeys.OZONE_RECON_HTTP_ADDRESS_KEY; import static org.apache.hadoop.hdds.recon.ReconConfigKeys.OZONE_RECON_TASK_SAFEMODE_WAIT_THRESHOLD; +import static org.apache.hadoop.hdds.server.http.HttpConfig.getHttpPolicy; +import static org.apache.hadoop.hdds.server.http.HttpServer2.HTTPS_SCHEME; +import static org.apache.hadoop.hdds.server.http.HttpServer2.HTTP_SCHEME; import static org.apache.hadoop.ozone.recon.ReconServerConfigKeys.OZONE_RECON_DB_DIR; import static org.apache.hadoop.ozone.recon.ReconServerConfigKeys.OZONE_RECON_OM_SNAPSHOT_DB_DIR; import static org.apache.hadoop.ozone.recon.ReconServerConfigKeys.OZONE_RECON_SCM_DB_DIR; +import static org.apache.hadoop.ozone.s3.S3GatewayConfigKeys.OZONE_S3G_HTTPS_ADDRESS_KEY; +import static org.apache.hadoop.ozone.s3.S3GatewayConfigKeys.OZONE_S3G_HTTP_ADDRESS_KEY; import static org.apache.ozone.test.GenericTestUtils.PortAllocator.anyHostWithFreePort; import static org.apache.ozone.test.GenericTestUtils.PortAllocator.getFreePort; import static org.apache.ozone.test.GenericTestUtils.PortAllocator.localhostWithFreePort; @@ -120,6 +138,7 @@ public class MiniOzoneClusterImpl implements MiniOzoneCluster { private OzoneManager ozoneManager; private final List hddsDatanodes; private ReconServer reconServer; + private Gateway s3g; // Timeout for the cluster to be ready private int waitForClusterToBeReadyTimeout = 120000; // 2 min @@ -136,13 +155,15 @@ private MiniOzoneClusterImpl(OzoneConfiguration conf, OzoneManager ozoneManager, StorageContainerManager scm, List hddsDatanodes, - ReconServer reconServer) { + ReconServer reconServer, + Gateway s3g) { this.conf = conf; this.ozoneManager = ozoneManager; this.scm = scm; this.hddsDatanodes = hddsDatanodes; this.reconServer = reconServer; this.scmConfigurator = scmConfigurator; + this.s3g = s3g; } /** @@ -268,6 +289,11 @@ public ReconServer getReconServer() { return this.reconServer; } + @Override + public Gateway getS3G() { + return this.s3g; + } + @Override public int getHddsDatanodeIndex(DatanodeDetails dn) throws IOException { for (HddsDatanodeService service : hddsDatanodes) { @@ -286,6 +312,54 @@ public OzoneClient newClient() throws IOException { return client; } + @Override + public AmazonS3 newS3Client() { + // TODO: Parameterize tests between Virtual host style and Path style + return createS3Client(true); + } + + public AmazonS3 createS3Client(boolean enablePathStyle) { + final String accessKey = "user"; + final String secretKey = "password"; + final Regions region = Regions.DEFAULT_REGION; + + final String protocol; + final HttpConfig.Policy webPolicy = getHttpPolicy(conf); + String host; + + if (webPolicy.isHttpsEnabled()) { + protocol = HTTPS_SCHEME; + host = conf.get(OZONE_S3G_HTTPS_ADDRESS_KEY); + } else { + protocol = HTTP_SCHEME; + host = conf.get(OZONE_S3G_HTTP_ADDRESS_KEY); + } + + String endpoint = protocol + "://" + host; + + AWSCredentialsProvider credentials = new AWSStaticCredentialsProvider( + new BasicAWSCredentials(accessKey, secretKey) + ); + + + ClientConfiguration clientConfiguration = new ClientConfiguration(); + LOG.info("S3 Endpoint is {}", endpoint); + + AmazonS3 s3Client = + AmazonS3ClientBuilder.standard() + .withPathStyleAccessEnabled(enablePathStyle) + .withEndpointConfiguration( + new AwsClientBuilder.EndpointConfiguration( + endpoint, region.getName() + ) + ) + .withClientConfiguration(clientConfiguration) + .withCredentials(credentials) + .build(); + + return s3Client; + } + protected OzoneClient createClient() throws IOException { return OzoneClientFactory.getRpcClient(conf); } @@ -428,6 +502,7 @@ public void stop() { stopDatanodes(hddsDatanodes); stopSCM(scm); stopRecon(reconServer); + stopS3G(s3g); } private void startHddsDatanode(HddsDatanodeService datanode) { @@ -467,6 +542,23 @@ public void stopRecon() { stopRecon(reconServer); } + @Override + public void startS3G() { + s3g = new Gateway(); + s3g.execute(NO_ARGS); + } + + @Override + public void restartS3G() { + stopS3G(s3g); + startS3G(); + } + + @Override + public void stopS3G() { + stopS3G(s3g); + } + private CertificateClient getCAClient() { return this.caClient; } @@ -521,6 +613,19 @@ private static void stopRecon(ReconServer reconServer) { } } + private static void stopS3G(Gateway s3g) { + try { + if (s3g != null) { + LOG.info("Stopping S3G"); + // TODO (HDDS-11539): Remove this workaround once the @PreDestroy issue is fixed + OzoneClientCache.closeClient(); + s3g.stop(); + } + } catch (Exception e) { + LOG.error("Exception while shutting down S3 Gateway.", e); + } + } + /** * Builder for configuring the MiniOzoneCluster to run. */ @@ -544,15 +649,17 @@ public MiniOzoneCluster build() throws IOException { OzoneManager om = null; ReconServer reconServer = null; List hddsDatanodes = Collections.emptyList(); + Gateway s3g = null; try { scm = createAndStartSingleSCM(); om = createAndStartSingleOM(); + s3g = createS3G(); reconServer = createRecon(); hddsDatanodes = createHddsDatanodes(); MiniOzoneClusterImpl cluster = new MiniOzoneClusterImpl(conf, scmConfigurator, om, scm, - hddsDatanodes, reconServer); + hddsDatanodes, reconServer, s3g); cluster.setCAClient(certClient); cluster.setSecretKeyClient(secretKeyClient); @@ -567,6 +674,9 @@ public MiniOzoneCluster build() throws IOException { if (includeRecon) { stopRecon(reconServer); } + if (includeS3G) { + stopS3G(s3g); + } if (startDataNodes) { stopDatanodes(hddsDatanodes); } @@ -740,6 +850,16 @@ protected ReconServer createRecon() { return reconServer; } + protected Gateway createS3G() { + Gateway s3g = null; + if (includeS3G) { + configureS3G(); + s3g = new Gateway(); + s3g.execute(NO_ARGS); + } + return s3g; + } + /** * Creates HddsDatanodeService(s) instance. * @@ -806,5 +926,14 @@ protected void configureRecon() { ConfigurationProvider.setConfiguration(conf); } + private void configureS3G() { + OzoneConfigurationHolder.resetConfiguration(); + + conf.set(S3GatewayConfigKeys.OZONE_S3G_HTTP_ADDRESS_KEY, localhostWithFreePort()); + conf.set(S3GatewayConfigKeys.OZONE_S3G_HTTPS_ADDRESS_KEY, localhostWithFreePort()); + + OzoneConfigurationHolder.setConfiguration(conf); + } + } } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestContainerOperations.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestContainerOperations.java index cbd1829ef0cb..798e8a159919 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestContainerOperations.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestContainerOperations.java @@ -77,6 +77,7 @@ public static void setup() throws Exception { ozoneConf = new OzoneConfiguration(); ozoneConf.setClass(ScmConfigKeys.OZONE_SCM_CONTAINER_PLACEMENT_IMPL_KEY, SCMContainerPlacementCapacity.class, PlacementPolicy.class); + ozoneConf.set(ScmConfigKeys.OZONE_SCM_CONTAINER_LIST_MAX_COUNT, "1"); cluster = MiniOzoneCluster.newBuilder(ozoneConf).setNumDatanodes(3).build(); storageClient = new ContainerOperationClient(ozoneConf); cluster.waitForClusterToBeReady(); @@ -144,6 +145,24 @@ public void testCreate() throws Exception { .getContainerID()); } + /** + * Test to try to list number of containers over the max number Ozone allows. + * @throws Exception + */ + @Test + public void testListContainerExceedMaxAllowedCountOperations() throws Exception { + // create 2 containers in cluster where the limit of max count for + // listing container is set to 1 + for (int i = 0; i < 2; i++) { + storageClient.createContainer(HddsProtos + .ReplicationType.STAND_ALONE, HddsProtos.ReplicationFactor + .ONE, OzoneConsts.OZONE); + } + + assertEquals(1, storageClient.listContainer(0, 2) + .getContainerInfoList().size()); + } + /** * A simple test to get Pipeline with {@link ContainerOperationClient}. * @throws Exception diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestOMSortDatanodes.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestOMSortDatanodes.java index cef872597e43..2964912c40c4 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestOMSortDatanodes.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestOMSortDatanodes.java @@ -32,6 +32,7 @@ import org.apache.hadoop.hdds.scm.server.StorageContainerManager; import org.apache.hadoop.net.StaticMapping; +import org.apache.hadoop.ozone.client.OzoneClient; import org.apache.hadoop.ozone.om.KeyManagerImpl; import org.apache.hadoop.ozone.om.OmTestManagers; import org.apache.hadoop.ozone.om.OzoneManager; @@ -74,6 +75,8 @@ public class TestOMSortDatanodes { "edge1", "/rack1" ); + private static OzoneClient ozoneClient; + @BeforeAll public static void setup() throws Exception { config = new OzoneConfiguration(); @@ -109,11 +112,15 @@ public static void setup() throws Exception { = new OmTestManagers(config, scm.getBlockProtocolServer(), mockScmContainerClient); om = omTestManagers.getOzoneManager(); + ozoneClient = omTestManagers.getRpcClient(); keyManager = (KeyManagerImpl)omTestManagers.getKeyManager(); } @AfterAll public static void cleanup() throws Exception { + if (ozoneClient != null) { + ozoneClient.close(); + } if (scm != null) { scm.stop(); scm.join(); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/OzoneRpcClientTests.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/OzoneRpcClientTests.java index eb9f35f518c7..6edef789b172 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/OzoneRpcClientTests.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/OzoneRpcClientTests.java @@ -32,12 +32,14 @@ import java.util.Arrays; import java.util.Collections; import java.util.HashMap; +import java.util.HashSet; import java.util.Iterator; import java.util.LinkedHashMap; import java.util.List; import java.util.Locale; import java.util.Map; import java.util.Optional; +import java.util.Set; import java.util.TreeMap; import java.util.UUID; import java.util.concurrent.CountDownLatch; @@ -188,6 +190,7 @@ import static org.junit.jupiter.api.Assertions.fail; import static org.slf4j.event.Level.DEBUG; +import org.apache.ozone.test.tag.Unhealthy; import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.MethodOrderer; import org.junit.jupiter.api.Test; @@ -221,6 +224,7 @@ abstract class OzoneRpcClientTests extends OzoneTestBase { private static OzoneAcl inheritedGroupAcl = new OzoneAcl(GROUP, remoteGroupName, ACCESS, READ); private static MessageDigest eTagProvider; + private static Set ozoneClients = new HashSet<>(); @BeforeAll public static void initialize() throws NoSuchAlgorithmException { @@ -250,6 +254,7 @@ static void startCluster(OzoneConfiguration conf, MiniOzoneCluster.Builder build .build(); cluster.waitForClusterToBeReady(); ozClient = OzoneClientFactory.getRpcClient(conf); + ozoneClients.add(ozClient); store = ozClient.getObjectStore(); storageContainerLocationClient = cluster.getStorageContainerLocationClient(); @@ -259,10 +264,9 @@ static void startCluster(OzoneConfiguration conf, MiniOzoneCluster.Builder build /** * Close OzoneClient and shutdown MiniOzoneCluster. */ - static void shutdownCluster() throws IOException { - if (ozClient != null) { - ozClient.close(); - } + static void shutdownCluster() { + org.apache.hadoop.hdds.utils.IOUtils.closeQuietly(ozoneClients); + ozoneClients.clear(); if (storageContainerLocationClient != null) { storageContainerLocationClient.close(); @@ -274,6 +278,7 @@ static void shutdownCluster() throws IOException { } private static void setOzClient(OzoneClient ozClient) { + ozoneClients.add(ozClient); OzoneRpcClientTests.ozClient = ozClient; } @@ -3140,6 +3145,37 @@ void testMultipartUploadOverride(ReplicationConfig replication) doMultipartUpload(bucket, keyName, (byte)97, replication); } + + /** + * This test prints out that there is a memory leak in the test logs + * which during post-processing is caught by the CI thereby failing the + * CI run. Hence, disabling this for CI. + */ + @Unhealthy + public void testClientLeakDetector() throws Exception { + OzoneClient client = OzoneClientFactory.getRpcClient(cluster.getConf()); + String volumeName = UUID.randomUUID().toString(); + String bucketName = UUID.randomUUID().toString(); + String keyName = UUID.randomUUID().toString(); + GenericTestUtils.LogCapturer ozoneClientFactoryLogCapturer = + GenericTestUtils.LogCapturer.captureLogs( + OzoneClientFactory.getLogger()); + + client.getObjectStore().createVolume(volumeName); + OzoneVolume volume = client.getObjectStore().getVolume(volumeName); + volume.createBucket(bucketName); + OzoneBucket bucket = volume.getBucket(bucketName); + byte[] data = new byte[10]; + Arrays.fill(data, (byte) 1); + try (OzoneOutputStream out = bucket.createKey(keyName, 10, + ReplicationConfig.fromTypeAndFactor(RATIS, ONE), new HashMap<>())) { + out.write(data); + } + client = null; + System.gc(); + GenericTestUtils.waitFor(() -> ozoneClientFactoryLogCapturer.getOutput() + .contains("is not closed properly"), 100, 2000); + } @Test public void testMultipartUploadOwner() throws Exception { // Save the old user, and switch to the old user after test diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBlockOutputStream.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBlockOutputStream.java index eb3709c9a85f..63692c0dfc72 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBlockOutputStream.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBlockOutputStream.java @@ -49,6 +49,7 @@ import org.apache.hadoop.ozone.client.io.OzoneOutputStream; import org.apache.hadoop.ozone.container.TestHelper; +import org.apache.ozone.test.tag.Flaky; import org.junit.jupiter.api.AfterAll; import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.TestInstance; @@ -274,6 +275,7 @@ void testWriteLessThanChunkSize(boolean flushDelay, boolean enablePiggybacking) @ParameterizedTest @MethodSource("clientParameters") + @Flaky("HDDS-11564") void testWriteExactlyFlushSize(boolean flushDelay, boolean enablePiggybacking) throws Exception { OzoneClientConfig config = newClientConfig(cluster.getConf(), flushDelay, enablePiggybacking); try (OzoneClient client = newClient(cluster.getConf(), config)) { @@ -481,6 +483,7 @@ void testWriteMoreThanChunkSize(boolean flushDelay, boolean enablePiggybacking) @ParameterizedTest @MethodSource("clientParameters") + @Flaky("HDDS-11564") void testWriteMoreThanFlushSize(boolean flushDelay, boolean enablePiggybacking) throws Exception { OzoneClientConfig config = newClientConfig(cluster.getConf(), flushDelay, enablePiggybacking); try (OzoneClient client = newClient(cluster.getConf(), config)) { @@ -571,6 +574,7 @@ void testWriteMoreThanFlushSize(boolean flushDelay, boolean enablePiggybacking) @ParameterizedTest @MethodSource("clientParameters") + @Flaky("HDDS-11564") void testWriteExactlyMaxFlushSize(boolean flushDelay, boolean enablePiggybacking) throws Exception { OzoneClientConfig config = newClientConfig(cluster.getConf(), flushDelay, enablePiggybacking); try (OzoneClient client = newClient(cluster.getConf(), config)) { @@ -665,6 +669,7 @@ void testWriteExactlyMaxFlushSize(boolean flushDelay, boolean enablePiggybacking @ParameterizedTest @MethodSource("clientParameters") + @Flaky("HDDS-11564") void testWriteMoreThanMaxFlushSize(boolean flushDelay, boolean enablePiggybacking) throws Exception { OzoneClientConfig config = newClientConfig(cluster.getConf(), flushDelay, enablePiggybacking); try (OzoneClient client = newClient(cluster.getConf(), config)) { diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestBlockDeletion.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestBlockDeletion.java index 5ff8d713649e..719715ac8b3d 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestBlockDeletion.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestBlockDeletion.java @@ -17,11 +17,18 @@ */ package org.apache.hadoop.ozone.container.common.statemachine.commandhandler; +import java.io.IOException; +import java.time.Duration; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.UUID; +import java.util.concurrent.TimeUnit; import java.util.stream.Stream; -import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos; -import org.apache.hadoop.hdds.scm.container.replication.ReplicationManager; -import org.apache.hadoop.hdds.utils.IOUtils; import org.apache.commons.lang3.RandomStringUtils; import org.apache.hadoop.hdds.client.ECReplicationConfig; import org.apache.hadoop.hdds.client.RatisReplicationConfig; @@ -31,20 +38,23 @@ import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; +import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos; import org.apache.hadoop.hdds.ratis.RatisHelper; import org.apache.hadoop.hdds.scm.ScmConfig; import org.apache.hadoop.hdds.scm.ScmConfigKeys; import org.apache.hadoop.hdds.scm.block.DeletedBlockLogImpl; import org.apache.hadoop.hdds.scm.block.SCMBlockDeletingService; import org.apache.hadoop.hdds.scm.block.ScmBlockDeletingServiceMetrics; -import org.apache.hadoop.hdds.scm.container.ContainerInfo; import org.apache.hadoop.hdds.scm.container.ContainerID; +import org.apache.hadoop.hdds.scm.container.ContainerInfo; import org.apache.hadoop.hdds.scm.container.ContainerNotFoundException; import org.apache.hadoop.hdds.scm.container.ContainerReplica; import org.apache.hadoop.hdds.scm.container.ContainerStateManager; import org.apache.hadoop.hdds.scm.container.replication.LegacyReplicationManager; +import org.apache.hadoop.hdds.scm.container.replication.ReplicationManager; import org.apache.hadoop.hdds.scm.server.StorageContainerManager; import org.apache.hadoop.hdds.server.events.EventQueue; +import org.apache.hadoop.hdds.utils.IOUtils; import org.apache.hadoop.hdds.utils.db.Table; import org.apache.hadoop.ozone.HddsDatanodeService; import org.apache.hadoop.ozone.MiniOzoneCluster; @@ -80,17 +90,6 @@ import org.slf4j.LoggerFactory; import org.slf4j.event.Level; -import java.io.IOException; -import java.time.Duration; -import java.util.HashMap; -import java.util.Map; -import java.util.Set; -import java.util.List; -import java.util.HashSet; -import java.util.ArrayList; -import java.util.UUID; -import java.util.concurrent.TimeUnit; - import static java.lang.Math.max; import static java.nio.charset.StandardCharsets.UTF_8; import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_COMMAND_STATUS_REPORT_INTERVAL; @@ -610,7 +609,7 @@ public void testContainerDeleteWithInvalidKeyCount() final int valueSize = value.getBytes(UTF_8).length; final int keyCount = 1; List containerIdList = new ArrayList<>(); - containerInfos.stream().forEach(container -> { + containerInfos.forEach(container -> { assertEquals(valueSize, container.getUsedBytes()); assertEquals(keyCount, container.getNumberOfKeys()); containerIdList.add(container.getContainerID()); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/debug/TestLDBCli.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/debug/TestLDBCli.java index a4327a49bfab..7e16c0a29e35 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/debug/TestLDBCli.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/debug/TestLDBCli.java @@ -120,6 +120,7 @@ public void shutdown() throws IOException { /** * Defines ldb tool test cases. */ + @SuppressWarnings({"methodlength"}) private static Stream scanTestCases() { return Stream.of( Arguments.of( @@ -182,6 +183,43 @@ private static Stream scanTestCases() { Named.of("Filter invalid key", Arrays.asList("--filter", "keyName:equals:key9")), Named.of("Expect key1-key3", null) ), + Arguments.of( + Named.of(KEY_TABLE, Pair.of(KEY_TABLE, false)), + Named.of("Default", Pair.of(0, "")), + Named.of("Filter dataSize<2000", Arrays.asList("--filter", "dataSize:lesser:2000")), + Named.of("Expect key1-key5", Pair.of("key1", "key6")) + ), + Arguments.of( + Named.of(KEY_TABLE, Pair.of(KEY_TABLE, false)), + Named.of("Default", Pair.of(0, "")), + Named.of("Filter dataSize<500", Arrays.asList("--filter", "dataSize:lesser:500")), + Named.of("Expect empty result", null) + ), + Arguments.of( + Named.of(KEY_TABLE, Pair.of(KEY_TABLE, false)), + Named.of("Default", Pair.of(0, "")), + Named.of("Filter dataSize>500", Arrays.asList("--filter", "dataSize:greater:500")), + Named.of("Expect key1-key5", Pair.of("key1", "key6")) + ), + Arguments.of( + Named.of(KEY_TABLE, Pair.of(KEY_TABLE, false)), + Named.of("Default", Pair.of(0, "")), + Named.of("Filter dataSize>2000", Arrays.asList("--filter", "dataSize:greater:2000")), + Named.of("Expect empty result", null) + ), + Arguments.of( + Named.of(KEY_TABLE, Pair.of(KEY_TABLE, false)), + Named.of("Default", Pair.of(0, "")), + Named.of("Filter key3 regex", Arrays.asList("--filter", "keyName:regex:^.*3$")), + Named.of("Expect key3", Pair.of("key3", "key4")) + ), + Arguments.of( + Named.of(KEY_TABLE, Pair.of(KEY_TABLE, false)), + Named.of("Default", Pair.of(0, "")), + Named.of("Filter keys whose dataSize digits start with 5 using regex", + Arrays.asList("--filter", "dataSize:regex:^5.*$")), + Named.of("Expect empty result", null) + ), Arguments.of( Named.of(BLOCK_DATA + " V3", Pair.of(BLOCK_DATA, true)), Named.of("Default", Pair.of(0, "")), diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestKeyManagerImpl.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestKeyManagerImpl.java index efa2963842d4..e7df69a01dd7 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestKeyManagerImpl.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestKeyManagerImpl.java @@ -71,6 +71,7 @@ import org.apache.hadoop.hdds.utils.db.cache.CacheValue; import org.apache.hadoop.ozone.OzoneAcl; import org.apache.hadoop.ozone.OzoneConfigKeys; +import org.apache.hadoop.ozone.client.OzoneClient; import org.apache.hadoop.ozone.om.exceptions.OMException; import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyArgs; @@ -171,6 +172,7 @@ public class TestKeyManagerImpl { private static final String VERSIONED_BUCKET_NAME = "versionedbucket1"; private static final String VOLUME_NAME = "vol1"; private static OzoneManagerProtocol writeClient; + private static OzoneClient rpcClient; private static OzoneManager om; @BeforeAll @@ -219,6 +221,7 @@ public static void setUp() throws Exception { keyManager = (KeyManagerImpl)omTestManagers.getKeyManager(); prefixManager = omTestManagers.getPrefixManager(); writeClient = omTestManagers.getWriteClient(); + rpcClient = omTestManagers.getRpcClient(); mockContainerClient(); @@ -235,6 +238,8 @@ public static void setUp() throws Exception { @AfterAll public static void cleanup() throws Exception { + writeClient.close(); + rpcClient.close(); scm.stop(); scm.join(); om.stop(); @@ -252,10 +257,11 @@ public void init() throws Exception { public void cleanupTest() throws IOException { mockContainerClient(); org.apache.hadoop.fs.Path volumePath = new org.apache.hadoop.fs.Path(OZONE_URI_DELIMITER, VOLUME_NAME); - FileSystem fs = FileSystem.get(conf); - fs.delete(new org.apache.hadoop.fs.Path(volumePath, BUCKET_NAME), true); - fs.delete(new org.apache.hadoop.fs.Path(volumePath, BUCKET2_NAME), true); - fs.delete(new org.apache.hadoop.fs.Path(volumePath, VERSIONED_BUCKET_NAME), true); + try (FileSystem fs = FileSystem.get(conf)) { + fs.delete(new org.apache.hadoop.fs.Path(volumePath, BUCKET_NAME), true); + fs.delete(new org.apache.hadoop.fs.Path(volumePath, BUCKET2_NAME), true); + fs.delete(new org.apache.hadoop.fs.Path(volumePath, VERSIONED_BUCKET_NAME), true); + } } private static void mockContainerClient() { diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmContainerLocationCache.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmContainerLocationCache.java index f25bb47f0db2..5c7a0c31286b 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmContainerLocationCache.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmContainerLocationCache.java @@ -63,6 +63,7 @@ import org.apache.hadoop.ozone.OzoneConfigKeys; import org.apache.hadoop.ozone.client.ObjectStore; import org.apache.hadoop.ozone.client.OzoneBucket; +import org.apache.hadoop.ozone.client.OzoneClient; import org.apache.hadoop.ozone.client.OzoneKeyDetails; import org.apache.hadoop.ozone.client.io.OzoneOutputStream; import org.apache.hadoop.ozone.client.rpc.RpcClient; @@ -162,7 +163,7 @@ public class TestOmContainerLocationCache { private static final DatanodeDetails DN5 = MockDatanodeDetails.createDatanodeDetails(UUID.randomUUID()); private static final AtomicLong CONTAINER_ID = new AtomicLong(1); - + private static OzoneClient ozoneClient; @BeforeAll public static void setUp() throws Exception { @@ -184,6 +185,7 @@ public static void setUp() throws Exception { OmTestManagers omTestManagers = new OmTestManagers(conf, mockScmBlockLocationProtocol, mockScmContainerClient); om = omTestManagers.getOzoneManager(); + ozoneClient = omTestManagers.getRpcClient(); metadataManager = omTestManagers.getMetadataManager(); rpcClient = new RpcClient(conf, null) { @@ -204,6 +206,7 @@ protected XceiverClientFactory createXceiverClientFactory( @AfterAll public static void cleanup() throws Exception { + ozoneClient.close(); om.stop(); FileUtils.deleteDirectory(dir); } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotDeletingService.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotDeletingServiceIntegrationTest.java similarity index 52% rename from hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotDeletingService.java rename to hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotDeletingServiceIntegrationTest.java index be4ea69095be..254de072e05b 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotDeletingService.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotDeletingServiceIntegrationTest.java @@ -19,6 +19,7 @@ package org.apache.hadoop.ozone.om.snapshot; +import org.apache.commons.compress.utils.Lists; import org.apache.hadoop.hdds.client.ReplicationFactor; import org.apache.hadoop.hdds.client.ReplicationType; import org.apache.hadoop.hdds.conf.OzoneConfiguration; @@ -32,20 +33,26 @@ import org.apache.hadoop.ozone.client.BucketArgs; import org.apache.hadoop.ozone.client.OzoneBucket; import org.apache.hadoop.ozone.client.OzoneClient; +import org.apache.hadoop.ozone.om.KeyManager; import org.apache.hadoop.ozone.om.OMConfigKeys; import org.apache.hadoop.ozone.om.OMMetadataManager; import org.apache.hadoop.ozone.om.OmMetadataManagerImpl; import org.apache.hadoop.ozone.om.OmSnapshot; +import org.apache.hadoop.ozone.om.OmSnapshotManager; import org.apache.hadoop.ozone.om.OzoneManager; +import org.apache.hadoop.ozone.om.SnapshotChainManager; import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo; import org.apache.hadoop.ozone.om.helpers.SnapshotInfo; +import org.apache.hadoop.ozone.om.service.DirectoryDeletingService; +import org.apache.hadoop.ozone.om.service.KeyDeletingService; import org.apache.hadoop.ozone.om.service.SnapshotDeletingService; import org.apache.ozone.test.GenericTestUtils; import org.apache.ozone.test.tag.Flaky; import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.MethodOrderer.OrderAnnotation; import org.junit.jupiter.api.Order; @@ -53,25 +60,41 @@ import org.junit.jupiter.api.TestInstance; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.Timeout; +import org.mockito.Mockito; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import java.io.IOException; import java.nio.ByteBuffer; +import java.util.Collections; +import java.util.Iterator; import java.util.List; +import java.util.Map; import java.util.Objects; +import java.util.Random; import java.util.UUID; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.Future; import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeoutException; +import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicLong; +import java.util.stream.Collectors; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_CHUNK_SIZE_KEY; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ACL_ENABLED; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_BLOCK_DELETING_SERVICE_INTERVAL; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_SNAPSHOT_DELETING_SERVICE_INTERVAL; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_SNAPSHOT_DELETING_SERVICE_TIMEOUT; +import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_SNAPSHOT_DEEP_CLEANING_ENABLED; import static org.junit.jupiter.api.Assertions.assertDoesNotThrow; import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.mockito.ArgumentMatchers.anyBoolean; +import static org.mockito.ArgumentMatchers.anyInt; +import static org.mockito.ArgumentMatchers.anyString; +import static org.mockito.Mockito.doAnswer; +import static org.mockito.Mockito.when; /** * Test Snapshot Deleting Service. @@ -80,10 +103,10 @@ @Timeout(300) @TestInstance(TestInstance.Lifecycle.PER_CLASS) @TestMethodOrder(OrderAnnotation.class) -public class TestSnapshotDeletingService { +public class TestSnapshotDeletingServiceIntegrationTest { private static final Logger LOG = - LoggerFactory.getLogger(TestSnapshotDeletingService.class); + LoggerFactory.getLogger(TestSnapshotDeletingServiceIntegrationTest.class); private static boolean omRatisEnabled = true; private static final ByteBuffer CONTENT = ByteBuffer.allocate(1024 * 1024 * 16); @@ -108,6 +131,7 @@ public void setup() throws Exception { 1, StorageUnit.MB); conf.setTimeDuration(OZONE_SNAPSHOT_DELETING_SERVICE_INTERVAL, 500, TimeUnit.MILLISECONDS); + conf.setBoolean(OZONE_SNAPSHOT_DEEP_CLEANING_ENABLED, true); conf.setTimeDuration(OZONE_SNAPSHOT_DELETING_SERVICE_TIMEOUT, 10000, TimeUnit.MILLISECONDS); conf.setInt(OMConfigKeys.OZONE_DIR_DELETING_SERVICE_INTERVAL, 500); @@ -147,7 +171,7 @@ public void testSnapshotSplitAndMove() throws Exception { Table snapshotInfoTable = om.getMetadataManager().getSnapshotInfoTable(); - createSnapshotDataForBucket1(); + createSnapshotDataForBucket(bucket1); assertTableRowCount(snapshotInfoTable, 2); GenericTestUtils.waitFor(() -> snapshotDeletingService @@ -174,7 +198,7 @@ public void testMultipleSnapshotKeyReclaim() throws Exception { om.getMetadataManager().getSnapshotInfoTable(); runIndividualTest = false; - createSnapshotDataForBucket1(); + createSnapshotDataForBucket(bucket1); BucketArgs bucketArgs = new BucketArgs.Builder() .setBucketLayout(BucketLayout.LEGACY) @@ -425,7 +449,7 @@ public void testSnapshotWithFSO() throws Exception { while (iterator.hasNext()) { Table.KeyValue next = iterator.next(); String activeDBDeletedKey = next.getKey(); - if (activeDBDeletedKey.matches(".*/key1.*")) { + if (activeDBDeletedKey.matches(".*/key1/.*")) { RepeatedOmKeyInfo activeDBDeleted = next.getValue(); OMMetadataManager metadataManager = cluster.getOzoneManager().getMetadataManager(); @@ -454,6 +478,228 @@ public void testSnapshotWithFSO() throws Exception { rcSnap1.close(); } + private DirectoryDeletingService getMockedDirectoryDeletingService(AtomicBoolean dirDeletionWaitStarted, + AtomicBoolean dirDeletionStarted) + throws InterruptedException, TimeoutException { + OzoneManager ozoneManager = Mockito.spy(om); + om.getKeyManager().getDirDeletingService().shutdown(); + GenericTestUtils.waitFor(() -> om.getKeyManager().getDirDeletingService().getThreadCount() == 0, 1000, + 100000); + DirectoryDeletingService directoryDeletingService = Mockito.spy(new DirectoryDeletingService(10000, + TimeUnit.MILLISECONDS, 100000, ozoneManager, cluster.getConf())); + directoryDeletingService.shutdown(); + GenericTestUtils.waitFor(() -> directoryDeletingService.getThreadCount() == 0, 1000, + 100000); + when(ozoneManager.getMetadataManager()).thenAnswer(i -> { + // Wait for SDS to reach DDS wait block before processing any deleted directories. + GenericTestUtils.waitFor(dirDeletionWaitStarted::get, 1000, 100000); + dirDeletionStarted.set(true); + return i.callRealMethod(); + }); + return directoryDeletingService; + } + + private KeyDeletingService getMockedKeyDeletingService(AtomicBoolean keyDeletionWaitStarted, + AtomicBoolean keyDeletionStarted) + throws InterruptedException, TimeoutException, IOException { + OzoneManager ozoneManager = Mockito.spy(om); + om.getKeyManager().getDeletingService().shutdown(); + GenericTestUtils.waitFor(() -> om.getKeyManager().getDeletingService().getThreadCount() == 0, 1000, + 100000); + KeyManager keyManager = Mockito.spy(om.getKeyManager()); + when(ozoneManager.getKeyManager()).thenReturn(keyManager); + KeyDeletingService keyDeletingService = Mockito.spy(new KeyDeletingService(ozoneManager, + ozoneManager.getScmClient().getBlockClient(), keyManager, 10000, + 100000, cluster.getConf(), false)); + keyDeletingService.shutdown(); + GenericTestUtils.waitFor(() -> keyDeletingService.getThreadCount() == 0, 1000, + 100000); + when(keyManager.getPendingDeletionKeys(anyInt())).thenAnswer(i -> { + // wait for SDS to reach the KDS wait block before processing any key. + GenericTestUtils.waitFor(keyDeletionWaitStarted::get, 1000, 100000); + keyDeletionStarted.set(true); + return i.callRealMethod(); + }); + return keyDeletingService; + } + + @SuppressWarnings("checkstyle:parameternumber") + private SnapshotDeletingService getMockedSnapshotDeletingService(KeyDeletingService keyDeletingService, + DirectoryDeletingService directoryDeletingService, + AtomicBoolean snapshotDeletionStarted, + AtomicBoolean keyDeletionWaitStarted, + AtomicBoolean dirDeletionWaitStarted, + AtomicBoolean keyDeletionStarted, + AtomicBoolean dirDeletionStarted, + OzoneBucket testBucket) + throws InterruptedException, TimeoutException, IOException { + OzoneManager ozoneManager = Mockito.spy(om); + om.getKeyManager().getSnapshotDeletingService().shutdown(); + GenericTestUtils.waitFor(() -> om.getKeyManager().getSnapshotDeletingService().getThreadCount() == 0, 1000, + 100000); + KeyManager keyManager = Mockito.spy(om.getKeyManager()); + OmMetadataManagerImpl omMetadataManager = Mockito.spy((OmMetadataManagerImpl)om.getMetadataManager()); + SnapshotChainManager unMockedSnapshotChainManager = + ((OmMetadataManagerImpl)om.getMetadataManager()).getSnapshotChainManager(); + SnapshotChainManager snapshotChainManager = Mockito.spy(unMockedSnapshotChainManager); + OmSnapshotManager omSnapshotManager = Mockito.spy(om.getOmSnapshotManager()); + when(ozoneManager.getOmSnapshotManager()).thenReturn(omSnapshotManager); + when(ozoneManager.getKeyManager()).thenReturn(keyManager); + when(ozoneManager.getMetadataManager()).thenReturn(omMetadataManager); + when(omMetadataManager.getSnapshotChainManager()).thenReturn(snapshotChainManager); + when(keyManager.getDeletingService()).thenReturn(keyDeletingService); + when(keyManager.getDirDeletingService()).thenReturn(directoryDeletingService); + SnapshotDeletingService snapshotDeletingService = Mockito.spy(new SnapshotDeletingService(10000, + 100000, ozoneManager)); + snapshotDeletingService.shutdown(); + GenericTestUtils.waitFor(() -> snapshotDeletingService.getThreadCount() == 0, 1000, + 100000); + when(snapshotChainManager.iterator(anyBoolean())).thenAnswer(i -> { + Iterator itr = (Iterator) i.callRealMethod(); + return Lists.newArrayList(itr).stream().filter(uuid -> { + try { + SnapshotInfo snapshotInfo = SnapshotUtils.getSnapshotInfo(om, snapshotChainManager, uuid); + return snapshotInfo.getBucketName().equals(testBucket.getName()) && + snapshotInfo.getVolumeName().equals(testBucket.getVolumeName()); + } catch (IOException e) { + throw new RuntimeException(e); + } + }).iterator(); + }); + when(snapshotChainManager.getLatestGlobalSnapshotId()) + .thenAnswer(i -> unMockedSnapshotChainManager.getLatestGlobalSnapshotId()); + when(snapshotChainManager.getOldestGlobalSnapshotId()) + .thenAnswer(i -> unMockedSnapshotChainManager.getOldestGlobalSnapshotId()); + doAnswer(i -> { + // KDS wait block reached in SDS. + GenericTestUtils.waitFor(() -> { + return keyDeletingService.isRunningOnAOS(); + }, 1000, 100000); + keyDeletionWaitStarted.set(true); + return i.callRealMethod(); + }).when(snapshotDeletingService).waitForKeyDeletingService(); + doAnswer(i -> { + // DDS wait block reached in SDS. + GenericTestUtils.waitFor(directoryDeletingService::isRunningOnAOS, 1000, 100000); + dirDeletionWaitStarted.set(true); + return i.callRealMethod(); + }).when(snapshotDeletingService).waitForDirDeletingService(); + doAnswer(i -> { + // Assert KDS & DDS is not running when SDS starts moving entries & assert all wait block, KDS processing + // AOS block & DDS AOS block have been executed. + Assertions.assertTrue(keyDeletionWaitStarted.get()); + Assertions.assertTrue(dirDeletionWaitStarted.get()); + Assertions.assertTrue(keyDeletionStarted.get()); + Assertions.assertTrue(dirDeletionStarted.get()); + Assertions.assertFalse(keyDeletingService.isRunningOnAOS()); + Assertions.assertFalse(directoryDeletingService.isRunningOnAOS()); + snapshotDeletionStarted.set(true); + return i.callRealMethod(); + }).when(omSnapshotManager).getSnapshot(anyString(), anyString(), anyString()); + return snapshotDeletingService; + } + + @Test + @Order(4) + public void testParallelExcecutionOfKeyDeletionAndSnapshotDeletion() throws Exception { + AtomicBoolean keyDeletionWaitStarted = new AtomicBoolean(false); + AtomicBoolean dirDeletionWaitStarted = new AtomicBoolean(false); + AtomicBoolean keyDeletionStarted = new AtomicBoolean(false); + AtomicBoolean dirDeletionStarted = new AtomicBoolean(false); + AtomicBoolean snapshotDeletionStarted = new AtomicBoolean(false); + Random random = new Random(); + String bucketName = "bucket" + random.nextInt(); + BucketArgs bucketArgs = new BucketArgs.Builder() + .setBucketLayout(BucketLayout.FILE_SYSTEM_OPTIMIZED) + .build(); + OzoneBucket testBucket = TestDataUtil.createBucket( + client, VOLUME_NAME, bucketArgs, bucketName); + // mock keyDeletingService + KeyDeletingService keyDeletingService = getMockedKeyDeletingService(keyDeletionWaitStarted, keyDeletionStarted); + + // mock dirDeletingService + DirectoryDeletingService directoryDeletingService = getMockedDirectoryDeletingService(dirDeletionWaitStarted, + dirDeletionStarted); + + // mock snapshotDeletingService. + SnapshotDeletingService snapshotDeletingService = getMockedSnapshotDeletingService(keyDeletingService, + directoryDeletingService, snapshotDeletionStarted, keyDeletionWaitStarted, dirDeletionWaitStarted, + keyDeletionStarted, dirDeletionStarted, testBucket); + createSnapshotFSODataForBucket(testBucket); + List> renamesKeyEntries; + List>> deletedKeyEntries; + List> deletedDirEntries; + try (ReferenceCounted snapshot = om.getOmSnapshotManager().getSnapshot(testBucket.getVolumeName(), + testBucket.getName(), testBucket.getName() + "snap2")) { + renamesKeyEntries = snapshot.get().getKeyManager().getRenamesKeyEntries(testBucket.getVolumeName(), + testBucket.getName(), "", 1000); + deletedKeyEntries = snapshot.get().getKeyManager().getDeletedKeyEntries(testBucket.getVolumeName(), + testBucket.getName(), "", 1000); + deletedDirEntries = snapshot.get().getKeyManager().getDeletedDirEntries(testBucket.getVolumeName(), + testBucket.getName(), 1000); + } + Thread keyDeletingThread = new Thread(() -> { + try { + keyDeletingService.runPeriodicalTaskNow(); + } catch (Exception e) { + throw new RuntimeException(e); + } + }); + Thread directoryDeletingThread = new Thread(() -> { + try { + directoryDeletingService.runPeriodicalTaskNow(); + } catch (Exception e) { + throw new RuntimeException(e); + } + }); + ExecutorService snapshotDeletingThread = Executors.newFixedThreadPool(1); + Runnable snapshotDeletionRunnable = () -> { + try { + snapshotDeletingService.runPeriodicalTaskNow(); + } catch (Exception e) { + throw new RuntimeException(e); + } + }; + keyDeletingThread.start(); + directoryDeletingThread.start(); + Future future = snapshotDeletingThread.submit(snapshotDeletionRunnable); + GenericTestUtils.waitFor(snapshotDeletionStarted::get, 1000, 30000); + future.get(); + try (ReferenceCounted snapshot = om.getOmSnapshotManager().getSnapshot(testBucket.getVolumeName(), + testBucket.getName(), testBucket.getName() + "snap2")) { + Assertions.assertEquals(Collections.emptyList(), + snapshot.get().getKeyManager().getRenamesKeyEntries(testBucket.getVolumeName(), + testBucket.getName(), "", 1000)); + Assertions.assertEquals(Collections.emptyList(), + snapshot.get().getKeyManager().getDeletedKeyEntries(testBucket.getVolumeName(), + testBucket.getName(), "", 1000)); + Assertions.assertEquals(Collections.emptyList(), + snapshot.get().getKeyManager().getDeletedDirEntries(testBucket.getVolumeName(), + testBucket.getName(), 1000)); + } + List> aosRenamesKeyEntries = + om.getKeyManager().getRenamesKeyEntries(testBucket.getVolumeName(), + testBucket.getName(), "", 1000); + List>> aosDeletedKeyEntries = + om.getKeyManager().getDeletedKeyEntries(testBucket.getVolumeName(), + testBucket.getName(), "", 1000); + List> aosDeletedDirEntries = + om.getKeyManager().getDeletedDirEntries(testBucket.getVolumeName(), + testBucket.getName(), 1000); + renamesKeyEntries.forEach(entry -> Assertions.assertTrue(aosRenamesKeyEntries.contains(entry))); + deletedKeyEntries.forEach(entry -> Assertions.assertTrue(aosDeletedKeyEntries.contains(entry))); + deletedDirEntries.forEach(entry -> Assertions.assertTrue(aosDeletedDirEntries.contains(entry))); + Mockito.reset(snapshotDeletingService); + SnapshotInfo snap2 = SnapshotUtils.getSnapshotInfo(om, testBucket.getVolumeName(), + testBucket.getName(), testBucket.getName() + "snap2"); + Assertions.assertEquals(snap2.getSnapshotStatus(), SnapshotInfo.SnapshotStatus.SNAPSHOT_DELETED); + future = snapshotDeletingThread.submit(snapshotDeletionRunnable); + future.get(); + Assertions.assertThrows(IOException.class, () -> SnapshotUtils.getSnapshotInfo(om, testBucket.getVolumeName(), + testBucket.getName(), testBucket.getName() + "snap2")); + cluster.restartOzoneManager(); + } + /* Flow ---- @@ -472,7 +718,7 @@ public void testSnapshotWithFSO() throws Exception { create snapshot3 delete snapshot2 */ - private void createSnapshotDataForBucket1() throws Exception { + private synchronized void createSnapshotDataForBucket(OzoneBucket bucket) throws Exception { Table snapshotInfoTable = om.getMetadataManager().getSnapshotInfoTable(); Table deletedTable = @@ -482,70 +728,147 @@ private void createSnapshotDataForBucket1() throws Exception { OmMetadataManagerImpl metadataManager = (OmMetadataManagerImpl) om.getMetadataManager(); - TestDataUtil.createKey(bucket1, "bucket1key0", ReplicationFactor.THREE, + TestDataUtil.createKey(bucket, bucket.getName() + "key0", ReplicationFactor.THREE, ReplicationType.RATIS, CONTENT); - TestDataUtil.createKey(bucket1, "bucket1key1", ReplicationFactor.THREE, + TestDataUtil.createKey(bucket, bucket.getName() + "key1", ReplicationFactor.THREE, ReplicationType.RATIS, CONTENT); assertTableRowCount(keyTable, 2); // Create Snapshot 1. - client.getProxy().createSnapshot(VOLUME_NAME, BUCKET_NAME_ONE, - "bucket1snap1"); + client.getProxy().createSnapshot(bucket.getVolumeName(), bucket.getName(), + bucket.getName() + "snap1"); assertTableRowCount(snapshotInfoTable, 1); // Overwrite bucket1key0, This is a newer version of the key which should // reclaimed as this is a different version of the key. - TestDataUtil.createKey(bucket1, "bucket1key0", ReplicationFactor.THREE, + TestDataUtil.createKey(bucket, bucket.getName() + "key0", ReplicationFactor.THREE, ReplicationType.RATIS, CONTENT); - TestDataUtil.createKey(bucket1, "bucket1key2", ReplicationFactor.THREE, + TestDataUtil.createKey(bucket, bucket.getName() + "key2", ReplicationFactor.THREE, ReplicationType.RATIS, CONTENT); // Key 1 cannot be reclaimed as it is still referenced by Snapshot 1. - client.getProxy().deleteKey(VOLUME_NAME, BUCKET_NAME_ONE, - "bucket1key1", false); + client.getProxy().deleteKey(bucket.getVolumeName(), bucket.getName(), + bucket.getName() + "key1", false); // Key 2 is deleted here, which will be reclaimed here as // it is not being referenced by previous snapshot. - client.getProxy().deleteKey(VOLUME_NAME, BUCKET_NAME_ONE, - "bucket1key2", false); - client.getProxy().deleteKey(VOLUME_NAME, BUCKET_NAME_ONE, - "bucket1key0", false); + client.getProxy().deleteKey(bucket.getVolumeName(), bucket.getName(), + bucket.getName() + "key2", false); + client.getProxy().deleteKey(bucket.getVolumeName(), bucket.getName(), + bucket.getName() + "key0", false); assertTableRowCount(keyTable, 0); // one copy of bucket1key0 should also be reclaimed as it not same // but original deleted key created during overwrite should not be deleted assertTableRowCount(deletedTable, 2); // Create Snapshot 2. - client.getProxy().createSnapshot(VOLUME_NAME, BUCKET_NAME_ONE, - "bucket1snap2"); + client.getProxy().createSnapshot(bucket.getVolumeName(), bucket.getName(), + bucket.getName() + "snap2"); assertTableRowCount(snapshotInfoTable, 2); // Key 2 is removed from the active Db's // deletedTable when Snapshot 2 is taken. assertTableRowCount(deletedTable, 0); - TestDataUtil.createKey(bucket1, "bucket1key3", ReplicationFactor.THREE, + TestDataUtil.createKey(bucket, bucket.getName() + "key3", ReplicationFactor.THREE, ReplicationType.RATIS, CONTENT); - TestDataUtil.createKey(bucket1, "bucket1key4", ReplicationFactor.THREE, + TestDataUtil.createKey(bucket, bucket.getName() + "key4", ReplicationFactor.THREE, ReplicationType.RATIS, CONTENT); - client.getProxy().deleteKey(VOLUME_NAME, BUCKET_NAME_ONE, - "bucket1key4", false); + client.getProxy().deleteKey(bucket.getVolumeName(), bucket.getName(), + bucket.getName() + "key4", false); assertTableRowCount(keyTable, 1); assertTableRowCount(deletedTable, 0); // Create Snapshot 3. - client.getProxy().createSnapshot(VOLUME_NAME, BUCKET_NAME_ONE, - "bucket1snap3"); + client.getProxy().createSnapshot(bucket.getVolumeName(), bucket.getName(), + bucket.getName() + "snap3"); assertTableRowCount(snapshotInfoTable, 3); SnapshotInfo snapshotInfo = metadataManager.getSnapshotInfoTable() - .get("/vol1/bucket1/bucket1snap2"); + .get(String.format("/%s/%s/%ssnap2", bucket.getVolumeName(), bucket.getName(), bucket.getName())); // Delete Snapshot 2. - client.getProxy().deleteSnapshot(VOLUME_NAME, BUCKET_NAME_ONE, - "bucket1snap2"); + client.getProxy().deleteSnapshot(bucket.getVolumeName(), bucket.getName(), + bucket.getName() + "snap2"); assertTableRowCount(snapshotInfoTable, 2); - verifySnapshotChain(snapshotInfo, "/vol1/bucket1/bucket1snap3"); + verifySnapshotChain(snapshotInfo, String.format("/%s/%s/%ssnap3", bucket.getVolumeName(), bucket.getName(), + bucket.getName())); + } + + + /* + Flow + ---- + create dir0/key0 + create dir1/key1 + overwrite dir0/key0 + create dir2/key2 + create snap1 + rename dir1/key1 -> dir1/key10 + delete dir1/key10 + delete dir2 + create snap2 + delete snap2 + */ + private synchronized void createSnapshotFSODataForBucket(OzoneBucket bucket) throws Exception { + Table snapshotInfoTable = + om.getMetadataManager().getSnapshotInfoTable(); + Table deletedTable = + om.getMetadataManager().getDeletedTable(); + Table deletedDirTable = + om.getMetadataManager().getDeletedDirTable(); + Table keyTable = + om.getMetadataManager().getKeyTable(BucketLayout.FILE_SYSTEM_OPTIMIZED); + Table dirTable = + om.getMetadataManager().getDirectoryTable(); + Table renameTable = om.getMetadataManager().getSnapshotRenamedTable(); + OmMetadataManagerImpl metadataManager = (OmMetadataManagerImpl) + om.getMetadataManager(); + Map countMap = + metadataManager.listTables().entrySet().stream() + .collect(Collectors.toMap(Map.Entry::getKey, e -> { + try { + return (int)metadataManager.countRowsInTable(e.getValue()); + } catch (IOException ex) { + throw new RuntimeException(ex); + } + })); + TestDataUtil.createKey(bucket, "dir0/" + bucket.getName() + "key0", ReplicationFactor.THREE, + ReplicationType.RATIS, CONTENT); + TestDataUtil.createKey(bucket, "dir1/" + bucket.getName() + "key1", ReplicationFactor.THREE, + ReplicationType.RATIS, CONTENT); + assertTableRowCount(keyTable, countMap.get(keyTable.getName()) + 2); + assertTableRowCount(dirTable, countMap.get(dirTable.getName()) + 2); + + // Overwrite bucket1key0, This is a newer version of the key which should + // reclaimed as this is a different version of the key. + TestDataUtil.createKey(bucket, "dir0/" + bucket.getName() + "key0", ReplicationFactor.THREE, + ReplicationType.RATIS, CONTENT); + TestDataUtil.createKey(bucket, "dir2/" + bucket.getName() + "key2", ReplicationFactor.THREE, + ReplicationType.RATIS, CONTENT); + assertTableRowCount(keyTable, countMap.get(keyTable.getName()) + 3); + assertTableRowCount(dirTable, countMap.get(dirTable.getName()) + 3); + assertTableRowCount(deletedTable, countMap.get(deletedTable.getName()) + 1); + // create snap1 + client.getProxy().createSnapshot(bucket.getVolumeName(), bucket.getName(), + bucket.getName() + "snap1"); + bucket.renameKey("dir1/" + bucket.getName() + "key1", "dir1/" + bucket.getName() + "key10"); + bucket.renameKey("dir1/", "dir10/"); + assertTableRowCount(renameTable, countMap.get(renameTable.getName()) + 2); + client.getProxy().deleteKey(bucket.getVolumeName(), bucket.getName(), + "dir10/" + bucket.getName() + "key10", false); + assertTableRowCount(deletedTable, countMap.get(deletedTable.getName()) + 1); + // Key 2 is deleted here, which will be reclaimed here as + // it is not being referenced by previous snapshot. + client.getProxy().deleteKey(bucket.getVolumeName(), bucket.getName(), "dir2", true); + assertTableRowCount(deletedDirTable, countMap.get(deletedDirTable.getName()) + 1); + client.getProxy().createSnapshot(bucket.getVolumeName(), bucket.getName(), + bucket.getName() + "snap2"); + // Delete Snapshot 2. + client.getProxy().deleteSnapshot(bucket.getVolumeName(), bucket.getName(), + bucket.getName() + "snap2"); + assertTableRowCount(snapshotInfoTable, countMap.get(snapshotInfoTable.getName()) + 2); } + private void verifySnapshotChain(SnapshotInfo deletedSnapshot, String nextSnapshot) throws Exception { diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotDirectoryCleaningService.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotDirectoryCleaningService.java index 03df331087b8..3be0725a0093 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotDirectoryCleaningService.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotDirectoryCleaningService.java @@ -57,6 +57,7 @@ import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ACL_ENABLED; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_BLOCK_DELETING_SERVICE_INTERVAL; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_FS_ITERATE_BATCH_SIZE; +import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_SNAPSHOT_DEEP_CLEANING_ENABLED; import static org.junit.jupiter.api.Assertions.assertDoesNotThrow; import static org.junit.jupiter.api.Assertions.assertEquals; @@ -79,6 +80,7 @@ public class TestSnapshotDirectoryCleaningService { public static void init() throws Exception { OzoneConfiguration conf = new OzoneConfiguration(); conf.setInt(OMConfigKeys.OZONE_SNAPSHOT_DIRECTORY_SERVICE_INTERVAL, 2500); + conf.setBoolean(OZONE_SNAPSHOT_DEEP_CLEANING_ENABLED, true); conf.setTimeDuration(OZONE_BLOCK_DELETING_SERVICE_INTERVAL, 2500, TimeUnit.MILLISECONDS); conf.setBoolean(OZONE_ACL_ENABLED, true); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/s3/awssdk/v1/AbstractS3SDKV1Tests.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/s3/awssdk/v1/AbstractS3SDKV1Tests.java new file mode 100644 index 000000000000..15f1e10a6305 --- /dev/null +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/s3/awssdk/v1/AbstractS3SDKV1Tests.java @@ -0,0 +1,860 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + *

    + * http://www.apache.org/licenses/LICENSE-2.0 + *

    + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package org.apache.hadoop.ozone.s3.awssdk.v1; + +import com.amazonaws.AmazonServiceException; +import com.amazonaws.AmazonServiceException.ErrorType; +import com.amazonaws.services.s3.AmazonS3; +import com.amazonaws.services.s3.model.AbortMultipartUploadRequest; +import com.amazonaws.services.s3.model.AccessControlList; +import com.amazonaws.services.s3.model.Bucket; +import com.amazonaws.services.s3.model.CanonicalGrantee; +import com.amazonaws.services.s3.model.CompleteMultipartUploadRequest; +import com.amazonaws.services.s3.model.CompleteMultipartUploadResult; +import com.amazonaws.services.s3.model.CreateBucketRequest; +import com.amazonaws.services.s3.model.Grantee; +import com.amazonaws.services.s3.model.InitiateMultipartUploadRequest; +import com.amazonaws.services.s3.model.InitiateMultipartUploadResult; +import com.amazonaws.services.s3.model.ListMultipartUploadsRequest; +import com.amazonaws.services.s3.model.ListObjectsRequest; +import com.amazonaws.services.s3.model.ListObjectsV2Request; +import com.amazonaws.services.s3.model.ListObjectsV2Result; +import com.amazonaws.services.s3.model.ListPartsRequest; +import com.amazonaws.services.s3.model.MultipartUpload; +import com.amazonaws.services.s3.model.MultipartUploadListing; +import com.amazonaws.services.s3.model.ObjectListing; +import com.amazonaws.services.s3.model.ObjectMetadata; +import com.amazonaws.services.s3.model.ObjectTagging; +import com.amazonaws.services.s3.model.Owner; +import com.amazonaws.services.s3.model.PartETag; +import com.amazonaws.services.s3.model.PartListing; +import com.amazonaws.services.s3.model.PartSummary; +import com.amazonaws.services.s3.model.Permission; +import com.amazonaws.services.s3.model.PutObjectRequest; +import com.amazonaws.services.s3.model.PutObjectResult; +import com.amazonaws.services.s3.model.S3Object; +import com.amazonaws.services.s3.model.S3ObjectInputStream; +import com.amazonaws.services.s3.model.S3ObjectSummary; +import com.amazonaws.services.s3.model.Tag; +import com.amazonaws.services.s3.model.UploadPartRequest; +import com.amazonaws.services.s3.model.UploadPartResult; +import com.amazonaws.services.s3.transfer.TransferManager; +import com.amazonaws.services.s3.transfer.TransferManagerBuilder; +import com.amazonaws.services.s3.transfer.Upload; +import com.amazonaws.services.s3.transfer.model.UploadResult; +import org.apache.commons.lang3.RandomStringUtils; +import org.apache.hadoop.hdds.client.ReplicationConfig; +import org.apache.hadoop.hdds.client.ReplicationFactor; +import org.apache.hadoop.hdds.client.ReplicationType; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.ozone.MiniOzoneCluster; +import org.apache.hadoop.ozone.client.ObjectStore; +import org.apache.hadoop.ozone.client.OzoneBucket; +import org.apache.hadoop.ozone.client.OzoneClient; +import org.apache.hadoop.ozone.client.OzoneClientFactory; +import org.apache.hadoop.ozone.client.OzoneVolume; +import org.apache.hadoop.ozone.client.io.OzoneOutputStream; +import org.apache.hadoop.utils.InputSubstream; +import org.apache.ozone.test.OzoneTestBase; +import org.junit.jupiter.api.MethodOrderer; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.TestMethodOrder; +import org.junit.jupiter.api.io.TempDir; + +import javax.xml.bind.DatatypeConverter; +import java.io.ByteArrayInputStream; +import java.io.ByteArrayOutputStream; +import java.io.File; +import java.io.FileInputStream; +import java.io.IOException; +import java.io.InputStream; +import java.io.RandomAccessFile; +import java.nio.charset.StandardCharsets; +import java.nio.file.Files; +import java.nio.file.Path; +import java.security.MessageDigest; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Locale; +import java.util.Map; +import java.util.Random; +import java.util.stream.Collectors; + +import static org.apache.hadoop.ozone.OzoneConsts.MB; +import static org.assertj.core.api.Assertions.assertThat; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertNull; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.api.Assertions.assertTrue; + +/** + * This is an abstract class to test the AWS Java S3 SDK operations. + * This class should be extended for OM standalone and OM HA (Ratis) cluster setup. + * + * The test scenarios are adapted from + * - https://github.com/awsdocs/aws-doc-sdk-examples/tree/main/java/example_code/s3/ + * - https://github.com/ceph/s3-tests + * + * TODO: Currently we are using AWS SDK V1, need to also add tests for AWS SDK V2. + */ +@TestMethodOrder(MethodOrderer.MethodName.class) +public abstract class AbstractS3SDKV1Tests extends OzoneTestBase { + + /** + * There are still some unsupported S3 operations. + * Current unsupported S3 operations (non-exhaustive): + * - Cross Region Replication (CrossRegionReplication.java) + * - Versioned enabled buckets + * - DeleteObjectVersionEnabledBucket.java + * - DeleteMultipleObjectsVersionEnabledBucket.java + * - ListKeysVersioningEnabledBucket.java + * - Website configurations + * - WebsiteConfiguration.java + * - SetWebsiteConfiguration.java + * - GetWebsiteConfiguration.java + * - DeleteWebsiteConfiguration.java + * - S3 Event Notifications + * - EnableNotificationOnABucket.java + * - Object tags + * - GetObjectTags.java + * - GetObjectTags2.java + * - Bucket policy + * - SetBucketPolicy.java + * - GetBucketPolicy.java + * - DeleteBucketPolicy.java + * - Bucket lifecycle configuration + * - LifecycleConfiguration.java + * - Canned Bucket ACL + * - CreateBucketWithACL.java + * - Object ACL + * - SetAcl.java + * - ModifyACLExistingObject.java + * - GetAcl.java + * - S3 Encryption + * - S3Encrypt.java + * - S3EncryptV2.java + * - Client-side encryption + * - S3ClientSideEncryptionAsymmetricMasterKey.java + * - S3ClientSideEncryptionSymMasterKey.java + * - Server-side encryption + * - SpecifyServerSideEncryption.ajva + * - ServerSideEncryptionCopyObjectUsingHLWithSSEC.java + * - ServerSideEncryptionUsingClientSideEncryptionKey.java + * - Dual stack endpoints + * - DualStackEndpoints.java + * - Transfer acceleration + * - TransferAcceleration.java + * - Temp credentials + * - MakingRequestsWithFederatedTempCredentials.java + * - MakingRequestsWithIAMTempCredentials.java + * - Object archival + * - RestoreArchivedObject + * - KMS key + * - UploadObjectKMSKey.java + */ + + private static MiniOzoneCluster cluster = null; + private static AmazonS3 s3Client = null; + + /** + * Create a MiniOzoneCluster with S3G enabled for testing. + * @param conf Configurations to start the cluster + * @throws Exception exception thrown when waiting for the cluster to be ready. + */ + static void startCluster(OzoneConfiguration conf) throws Exception { + cluster = MiniOzoneCluster.newBuilder(conf) + .includeS3G(true) + .setNumDatanodes(5) + .build(); + cluster.waitForClusterToBeReady(); + s3Client = cluster.newS3Client(); + } + + /** + * Shutdown the MiniOzoneCluster. + */ + static void shutdownCluster() throws IOException { + if (cluster != null) { + cluster.shutdown(); + } + } + + public static void setCluster(MiniOzoneCluster cluster) { + AbstractS3SDKV1Tests.cluster = cluster; + } + + public static MiniOzoneCluster getCluster() { + return AbstractS3SDKV1Tests.cluster; + } + + @Test + public void testCreateBucket() { + final String bucketName = getBucketName(); + + Bucket b = s3Client.createBucket(bucketName); + + assertEquals(bucketName, b.getName()); + assertTrue(s3Client.doesBucketExist(bucketName)); + assertTrue(s3Client.doesBucketExistV2(bucketName)); + assertTrue(isBucketEmpty(b)); + } + + @Test + public void testBucketACLOperations() { + // TODO: Uncomment assertions when bucket S3 ACL logic has been fixed + final String bucketName = getBucketName(); + + AccessControlList aclList = new AccessControlList(); + Owner owner = new Owner("owner", "owner"); + aclList.withOwner(owner); + Grantee grantee = new CanonicalGrantee("testGrantee"); + aclList.grantPermission(grantee, Permission.Read); + + + CreateBucketRequest createBucketRequest = new CreateBucketRequest(bucketName) + .withAccessControlList(aclList); + + s3Client.createBucket(createBucketRequest); + +// AccessControlList retrievedAclList = s3.getBucketAcl(bucketName); +// assertEquals(aclList, retrievedAclList); + +// aclList.grantPermission(grantee, Permission.Write); +// s3.setBucketAcl(bucketName, aclList); + +// retrievedAclList = s3.getBucketAcl(bucketName); +// assertEquals(aclList, retrievedAclList); + + } + + @Test + public void testListBuckets() { + List bucketNames = new ArrayList<>(); + for (int i = 0; i <= 5; i++) { + String bucketName = getBucketName(String.valueOf(i)); + s3Client.createBucket(bucketName); + bucketNames.add(bucketName); + } + + List bucketList = s3Client.listBuckets(); + List listBucketNames = bucketList.stream() + .map(Bucket::getName).collect(Collectors.toList()); + + assertThat(listBucketNames).containsAll(bucketNames); + } + + @Test + public void testDeleteBucket() { + final String bucketName = getBucketName(); + + s3Client.createBucket(bucketName); + + s3Client.deleteBucket(bucketName); + + assertFalse(s3Client.doesBucketExist(bucketName)); + assertFalse(s3Client.doesBucketExistV2(bucketName)); + } + + @Test + public void testDeleteBucketNotExist() { + final String bucketName = getBucketName(); + + AmazonServiceException ase = assertThrows(AmazonServiceException.class, + () -> s3Client.deleteBucket(bucketName)); + + assertEquals(ErrorType.Client, ase.getErrorType()); + assertEquals(404, ase.getStatusCode()); + assertEquals("NoSuchBucket", ase.getErrorCode()); + } + + @Test + public void testDeleteBucketNonEmptyWithKeys() { + final String bucketName = getBucketName(); + s3Client.createBucket(bucketName); + + // Upload some objects to the bucket + for (int i = 1; i <= 10; i++) { + s3Client.putObject(bucketName, "key-" + i, RandomStringUtils.randomAlphanumeric(1024)); + } + + // Bucket deletion should fail if there are still keys in the bucket + AmazonServiceException ase = assertThrows(AmazonServiceException.class, + () -> s3Client.deleteBucket(bucketName) + ); + assertEquals(ErrorType.Client, ase.getErrorType()); + assertEquals(409, ase.getStatusCode()); + assertEquals("BucketNotEmpty", ase.getErrorCode()); + + // Delete all the keys + ObjectListing objectListing = s3Client.listObjects(bucketName); + while (true) { + for (S3ObjectSummary summary : objectListing.getObjectSummaries()) { + s3Client.deleteObject(bucketName, summary.getKey()); + } + + // more object_listing to retrieve? + if (objectListing.isTruncated()) { + objectListing = s3Client.listNextBatchOfObjects(objectListing); + } else { + break; + } + } + } + + @Test + public void testDeleteBucketNonEmptyWithIncompleteMultipartUpload(@TempDir Path tempDir) throws Exception { + final String bucketName = getBucketName(); + final String keyName = getKeyName(); + s3Client.createBucket(bucketName); + + File multipartUploadFile = Files.createFile(tempDir.resolve("multipartupload.txt")).toFile(); + + createFile(multipartUploadFile, (int) (5 * MB)); + + // Create an incomplete multipart upload by initiating multipart upload, + // uploading some parts, but not actually completing it. + String uploadId = initiateMultipartUpload(bucketName, keyName, null, null, null); + + uploadParts(bucketName, keyName, uploadId, multipartUploadFile, 1 * MB); + + // Bucket deletion should fail if there are still keys in the bucket + AmazonServiceException ase = assertThrows(AmazonServiceException.class, + () -> s3Client.deleteBucket(bucketName) + ); + assertEquals(ErrorType.Client, ase.getErrorType()); + assertEquals(409, ase.getStatusCode()); + assertEquals("BucketNotEmpty", ase.getErrorCode()); + + // After the multipart upload is aborted, the bucket deletion should succeed + abortMultipartUpload(bucketName, keyName, uploadId); + + s3Client.deleteBucket(bucketName); + + assertFalse(s3Client.doesBucketExistV2(bucketName)); + } + + @Test + public void testPutObject() { + final String bucketName = getBucketName(); + final String keyName = getKeyName(); + final String content = "bar"; + s3Client.createBucket(bucketName); + + InputStream is = new ByteArrayInputStream(content.getBytes(StandardCharsets.UTF_8)); + + PutObjectResult putObjectResult = s3Client.putObject(bucketName, keyName, is, new ObjectMetadata()); + assertEquals("37b51d194a7513e45b56f6524f2d51f2", putObjectResult.getETag()); + } + + @Test + public void testPutObjectEmpty() { + final String bucketName = getBucketName(); + final String keyName = getKeyName(); + final String content = ""; + s3Client.createBucket(bucketName); + + InputStream is = new ByteArrayInputStream(content.getBytes(StandardCharsets.UTF_8)); + + PutObjectResult putObjectResult = s3Client.putObject(bucketName, keyName, is, new ObjectMetadata()); + assertEquals("d41d8cd98f00b204e9800998ecf8427e", putObjectResult.getETag()); + } + + @Test + public void testGetObject() throws Exception { + final String bucketName = getBucketName(); + final String keyName = getKeyName(); + final String content = "bar"; + final byte[] contentBytes = content.getBytes(StandardCharsets.UTF_8); + s3Client.createBucket(bucketName); + + InputStream is = new ByteArrayInputStream(contentBytes); + ObjectMetadata objectMetadata = new ObjectMetadata(); + Map userMetadata = new HashMap<>(); + userMetadata.put("key1", "value1"); + userMetadata.put("key2", "value2"); + objectMetadata.setUserMetadata(userMetadata); + + List tags = Arrays.asList(new Tag("tag1", "value1"), new Tag("tag2", "value2")); + ObjectTagging objectTagging = new ObjectTagging(tags); + + + PutObjectRequest putObjectRequest = new PutObjectRequest(bucketName, keyName, is, objectMetadata) + .withTagging(objectTagging); + + s3Client.putObject(putObjectRequest); + + S3Object s3Object = s3Client.getObject(bucketName, keyName); + assertEquals(tags.size(), s3Object.getTaggingCount()); + + try (S3ObjectInputStream s3is = s3Object.getObjectContent(); + ByteArrayOutputStream bos = new ByteArrayOutputStream(contentBytes.length)) { + byte[] readBuf = new byte[1024]; + int readLen = 0; + while ((readLen = s3is.read(readBuf)) > 0) { + bos.write(readBuf, 0, readLen); + } + assertEquals(content, bos.toString("UTF-8")); + } + } + + @Test + public void testGetObjectWithoutETag() throws Exception { + // Object uploaded using other protocols (e.g. ofs / ozone cli) will not + // have ETag. Ensure that ETag will not do ETag validation on GetObject if there + // is no ETag present. + final String bucketName = getBucketName(); + final String keyName = getKeyName(); + + s3Client.createBucket(bucketName); + + String value = "sample value"; + byte[] valueBytes = value.getBytes(StandardCharsets.UTF_8); + + OzoneConfiguration conf = cluster.getConf(); + try (OzoneClient ozoneClient = OzoneClientFactory.getRpcClient(conf)) { + ObjectStore store = ozoneClient.getObjectStore(); + + OzoneVolume volume = store.getS3Volume(); + OzoneBucket bucket = volume.getBucket(bucketName); + + try (OzoneOutputStream out = bucket.createKey(keyName, + valueBytes.length, + ReplicationConfig.fromTypeAndFactor(ReplicationType.RATIS, ReplicationFactor.ONE), + Collections.emptyMap())) { + out.write(valueBytes); + } + } + + S3Object s3Object = s3Client.getObject(bucketName, keyName); + assertNull(s3Object.getObjectMetadata().getETag()); + + try (S3ObjectInputStream s3is = s3Object.getObjectContent(); + ByteArrayOutputStream bos = new ByteArrayOutputStream(valueBytes.length)) { + byte[] readBuf = new byte[1024]; + int readLen = 0; + while ((readLen = s3is.read(readBuf)) > 0) { + bos.write(readBuf, 0, readLen); + } + assertEquals(value, bos.toString("UTF-8")); + } + } + + @Test + public void testListObjectsMany() { + final String bucketName = getBucketName(); + s3Client.createBucket(bucketName); + final List keyNames = Arrays.asList( + getKeyName("1"), + getKeyName("2"), + getKeyName("3") + ); + + for (String keyName: keyNames) { + s3Client.putObject(bucketName, keyName, RandomStringUtils.randomAlphanumeric(5)); + } + + ListObjectsRequest listObjectsRequest = new ListObjectsRequest() + .withBucketName(bucketName) + .withMaxKeys(2); + ObjectListing listObjectsResponse = s3Client.listObjects(listObjectsRequest); + assertThat(listObjectsResponse.getObjectSummaries()).hasSize(2); + assertEquals(bucketName, listObjectsResponse.getBucketName()); + assertEquals(listObjectsResponse.getObjectSummaries().stream() + .map(S3ObjectSummary::getKey).collect(Collectors.toList()), + keyNames.subList(0, 2)); + assertTrue(listObjectsResponse.isTruncated()); + + + listObjectsRequest = new ListObjectsRequest() + .withBucketName(bucketName) + .withMaxKeys(2) + .withMarker(listObjectsResponse.getNextMarker()); + listObjectsResponse = s3Client.listObjects(listObjectsRequest); + assertThat(listObjectsResponse.getObjectSummaries()).hasSize(1); + assertEquals(bucketName, listObjectsResponse.getBucketName()); + assertEquals(listObjectsResponse.getObjectSummaries().stream() + .map(S3ObjectSummary::getKey).collect(Collectors.toList()), + keyNames.subList(2, keyNames.size())); + assertFalse(listObjectsResponse.isTruncated()); + } + + @Test + public void testListObjectsManyV2() { + final String bucketName = getBucketName(); + s3Client.createBucket(bucketName); + final List keyNames = Arrays.asList( + getKeyName("1"), + getKeyName("2"), + getKeyName("3") + ); + + for (String keyName: keyNames) { + s3Client.putObject(bucketName, keyName, RandomStringUtils.randomAlphanumeric(5)); + } + + ListObjectsV2Request listObjectsRequest = new ListObjectsV2Request() + .withBucketName(bucketName) + .withMaxKeys(2); + ListObjectsV2Result listObjectsResponse = s3Client.listObjectsV2(listObjectsRequest); + assertThat(listObjectsResponse.getObjectSummaries()).hasSize(2); + assertEquals(bucketName, listObjectsResponse.getBucketName()); + assertEquals(listObjectsResponse.getObjectSummaries().stream() + .map(S3ObjectSummary::getKey).collect(Collectors.toList()), + keyNames.subList(0, 2)); + assertTrue(listObjectsResponse.isTruncated()); + + + listObjectsRequest = new ListObjectsV2Request() + .withBucketName(bucketName) + .withMaxKeys(2) + .withContinuationToken(listObjectsResponse.getNextContinuationToken()); + listObjectsResponse = s3Client.listObjectsV2(listObjectsRequest); + assertThat(listObjectsResponse.getObjectSummaries()).hasSize(1); + assertEquals(bucketName, listObjectsResponse.getBucketName()); + assertEquals(listObjectsResponse.getObjectSummaries().stream() + .map(S3ObjectSummary::getKey).collect(Collectors.toList()), + keyNames.subList(2, keyNames.size())); + assertFalse(listObjectsResponse.isTruncated()); + } + + @Test + public void testListObjectsBucketNotExist() { + final String bucketName = getBucketName(); + ListObjectsRequest listObjectsRequest = new ListObjectsRequest() + .withBucketName(bucketName); + AmazonServiceException ase = assertThrows(AmazonServiceException.class, + () -> s3Client.listObjects(listObjectsRequest)); + assertEquals(ErrorType.Client, ase.getErrorType()); + assertEquals(404, ase.getStatusCode()); + assertEquals("NoSuchBucket", ase.getErrorCode()); + } + + @Test + public void testListObjectsV2BucketNotExist() { + final String bucketName = getBucketName(); + ListObjectsV2Request listObjectsRequest = new ListObjectsV2Request() + .withBucketName(bucketName); + AmazonServiceException ase = assertThrows(AmazonServiceException.class, + () -> s3Client.listObjectsV2(listObjectsRequest)); + assertEquals(ErrorType.Client, ase.getErrorType()); + assertEquals(404, ase.getStatusCode()); + assertEquals("NoSuchBucket", ase.getErrorCode()); + } + + @Test + public void testHighLevelMultipartUpload(@TempDir Path tempDir) throws Exception { + TransferManager tm = TransferManagerBuilder.standard() + .withS3Client(s3Client) + .build(); + + final String bucketName = getBucketName(); + final String keyName = getKeyName(); + + s3Client.createBucket(bucketName); + + // The minimum file size to for TransferManager to initiate multipart upload is 16MB, so create a file + // larger than the threshold. + // See TransferManagerConfiguration#getMultipartUploadThreshold + int fileSize = (int) (20 * MB); + File multipartUploadFile = Files.createFile(tempDir.resolve("multipartupload.txt")).toFile(); + + createFile(multipartUploadFile, fileSize); + + // TransferManager processes all transfers asynchronously, + // so this call returns immediately. + Upload upload = tm.upload(bucketName, keyName, multipartUploadFile); + + upload.waitForCompletion(); + UploadResult uploadResult = upload.waitForUploadResult(); + assertEquals(bucketName, uploadResult.getBucketName()); + assertEquals(keyName, uploadResult.getKey()); + } + + @Test + public void testLowLevelMultipartUpload(@TempDir Path tempDir) throws Exception { + final String bucketName = getBucketName(); + final String keyName = getKeyName(); + final Map userMetadata = new HashMap<>(); + userMetadata.put("key1", "value1"); + userMetadata.put("key2", "value2"); + + List tags = Arrays.asList(new Tag("tag1", "value1"), new Tag("tag2", "value2")); + + s3Client.createBucket(bucketName); + + File multipartUploadFile = Files.createFile(tempDir.resolve("multipartupload.txt")).toFile(); + + createFile(multipartUploadFile, (int) (25 * MB)); + + multipartUpload(bucketName, keyName, multipartUploadFile, 5 * MB, null, userMetadata, tags); + + S3Object s3Object = s3Client.getObject(bucketName, keyName); + assertEquals(keyName, s3Object.getKey()); + assertEquals(bucketName, s3Object.getBucketName()); + assertEquals(tags.size(), s3Object.getTaggingCount()); + + ObjectMetadata objectMetadata = s3Client.getObjectMetadata(bucketName, keyName); + assertEquals(userMetadata, objectMetadata.getUserMetadata()); + } + + @Test + public void testListMultipartUploads() { + final String bucketName = getBucketName(); + final String multipartKey1 = getKeyName("multipart1"); + final String multipartKey2 = getKeyName("multipart2"); + + s3Client.createBucket(bucketName); + + List uploadIds = new ArrayList<>(); + + String uploadId1 = initiateMultipartUpload(bucketName, multipartKey1, null, null, null); + uploadIds.add(uploadId1); + String uploadId2 = initiateMultipartUpload(bucketName, multipartKey1, null, null, null); + uploadIds.add(uploadId2); + // TODO: Currently, Ozone sorts based on uploadId instead of MPU init time within the same key. + // Remove this sorting step once HDDS-11532 has been implemented + Collections.sort(uploadIds); + String uploadId3 = initiateMultipartUpload(bucketName, multipartKey2, null, null, null); + uploadIds.add(uploadId3); + + // TODO: Add test for max uploads threshold and marker once HDDS-11530 has been implemented + ListMultipartUploadsRequest listMultipartUploadsRequest = new ListMultipartUploadsRequest(bucketName); + + MultipartUploadListing result = s3Client.listMultipartUploads(listMultipartUploadsRequest); + + List listUploadIds = result.getMultipartUploads().stream() + .map(MultipartUpload::getUploadId) + .collect(Collectors.toList()); + + assertEquals(uploadIds, listUploadIds); + } + + @Test + public void testListParts(@TempDir Path tempDir) throws Exception { + final String bucketName = getBucketName(); + final String keyName = getKeyName(); + final long fileSize = 5 * MB; + final long partSize = 1 * MB; + final int maxParts = 2; + + s3Client.createBucket(bucketName); + + String uploadId = initiateMultipartUpload(bucketName, keyName, null, null, null); + + File multipartUploadFile = Files.createFile(tempDir.resolve("multipartupload.txt")).toFile(); + + createFile(multipartUploadFile, (int) fileSize); + + List partETags = uploadParts(bucketName, keyName, uploadId, multipartUploadFile, partSize); + + List listPartETags = new ArrayList<>(); + int partNumberMarker = 0; + int expectedNumOfParts = 5; + PartListing listPartsResult; + do { + ListPartsRequest listPartsRequest = new ListPartsRequest(bucketName, keyName, uploadId) + .withMaxParts(maxParts) + .withPartNumberMarker(partNumberMarker); + listPartsResult = s3Client.listParts(listPartsRequest); + if (expectedNumOfParts > maxParts) { + assertTrue(listPartsResult.isTruncated()); + partNumberMarker = listPartsResult.getNextPartNumberMarker(); + expectedNumOfParts -= maxParts; + } else { + assertFalse(listPartsResult.isTruncated()); + } + for (PartSummary partSummary : listPartsResult.getParts()) { + listPartETags.add(new PartETag(partSummary.getPartNumber(), partSummary.getETag())); + } + } while (listPartsResult.isTruncated()); + + assertEquals(partETags.size(), listPartETags.size()); + for (int i = 0; i < partETags.size(); i++) { + assertEquals(partETags.get(i).getPartNumber(), listPartETags.get(i).getPartNumber()); + assertEquals(partETags.get(i).getETag(), listPartETags.get(i).getETag()); + } + } + + @Test + public void testListPartsNotFound() { + final String bucketName = getBucketName(); + final String keyName = getKeyName(); + + s3Client.createBucket(bucketName); + + ListPartsRequest listPartsRequest = + new ListPartsRequest(bucketName, keyName, "nonexist"); + + AmazonServiceException ase = assertThrows(AmazonServiceException.class, + () -> s3Client.listParts(listPartsRequest)); + + assertEquals(ErrorType.Client, ase.getErrorType()); + assertEquals(404, ase.getStatusCode()); + assertEquals("NoSuchUpload", ase.getErrorCode()); + } + + private boolean isBucketEmpty(Bucket bucket) { + ObjectListing objectListing = s3Client.listObjects(bucket.getName()); + return objectListing.getObjectSummaries().isEmpty(); + } + + private String getBucketName() { + return getBucketName(null); + } + + private String getBucketName(String suffix) { + return (getTestName() + "bucket" + suffix).toLowerCase(Locale.ROOT); + } + + private String getKeyName() { + return getKeyName(null); + } + + private String getKeyName(String suffix) { + return (getTestName() + "key" + suffix).toLowerCase(Locale.ROOT); + } + + private String multipartUpload(String bucketName, String key, File file, long partSize, String contentType, + Map userMetadata, List tags) throws Exception { + String uploadId = initiateMultipartUpload(bucketName, key, contentType, userMetadata, tags); + + List partETags = uploadParts(bucketName, key, uploadId, file, partSize); + + completeMultipartUpload(bucketName, key, uploadId, partETags); + + return uploadId; + } + + private String initiateMultipartUpload(String bucketName, String key, String contentType, + Map metadata, List tags) { + InitiateMultipartUploadRequest initRequest; + if (metadata == null || metadata.isEmpty()) { + initRequest = new InitiateMultipartUploadRequest(bucketName, key); + } else { + ObjectMetadata objectMetadata = new ObjectMetadata(); + objectMetadata.setUserMetadata(metadata); + if (contentType != null) { + objectMetadata.setContentType(contentType); + } + + initRequest = new InitiateMultipartUploadRequest(bucketName, key, objectMetadata) + .withTagging(new ObjectTagging(tags)); + } + + InitiateMultipartUploadResult initResponse = s3Client.initiateMultipartUpload(initRequest); + assertEquals(bucketName, initResponse.getBucketName()); + assertEquals(key, initResponse.getKey()); + // TODO: Once bucket lifecycle configuration is supported, should check for "abortDate" and "abortRuleId" + + return initResponse.getUploadId(); + } + + // TODO: Also support async upload parts (similar to v2 asyncClient) + private List uploadParts(String bucketName, String key, String uploadId, File file, long partSize) + throws Exception { + // Create a list of ETag objects. You retrieve ETags for each object part + // uploaded, + // then, after each individual part has been uploaded, pass the list of ETags to + // the request to complete the upload. + List partETags = new ArrayList<>(); + + // Upload the file parts. + long filePosition = 0; + long fileLength = file.length(); + try (FileInputStream fileInputStream = new FileInputStream(file)) { + for (int i = 1; filePosition < fileLength; i++) { + // Because the last part could be less than 5 MB, adjust the part size as + // needed. + partSize = Math.min(partSize, (fileLength - filePosition)); + + // Create the request to upload a part. + UploadPartRequest uploadRequest = new UploadPartRequest() + .withBucketName(bucketName) + .withKey(key) + .withUploadId(uploadId) + .withPartNumber(i) + .withFileOffset(filePosition) + .withFile(file) + .withPartSize(partSize); + + // Upload the part and add the response's ETag to our list. + UploadPartResult uploadResult = s3Client.uploadPart(uploadRequest); + PartETag partETag = uploadResult.getPartETag(); + assertEquals(i, partETag.getPartNumber()); + assertEquals(DatatypeConverter.printHexBinary( + calculateDigest(fileInputStream, 0, (int) partSize)).toLowerCase(), partETag.getETag()); + partETags.add(partETag); + + filePosition += partSize; + } + } + + return partETags; + } + + private void completeMultipartUpload(String bucketName, String key, String uploadId, List partETags) { + CompleteMultipartUploadRequest compRequest = new CompleteMultipartUploadRequest(bucketName, key, + uploadId, partETags); + CompleteMultipartUploadResult compResponse = s3Client.completeMultipartUpload(compRequest); + assertEquals(bucketName, compResponse.getBucketName()); + assertEquals(key, compResponse.getKey()); + } + + private void abortMultipartUpload(String bucketName, String key, String uploadId) { + AbortMultipartUploadRequest abortRequest = new AbortMultipartUploadRequest(bucketName, key, uploadId); + s3Client.abortMultipartUpload(abortRequest); + } + + private static byte[] calculateDigest(InputStream inputStream, int skip, int length) throws Exception { + int numRead; + byte[] buffer = new byte[1024]; + + MessageDigest complete = MessageDigest.getInstance("MD5"); + if (skip > -1 && length > -1) { + inputStream = new InputSubstream(inputStream, skip, length); + } + + do { + numRead = inputStream.read(buffer); + if (numRead > 0) { + complete.update(buffer, 0, numRead); + } + } while (numRead != -1); + + return complete.digest(); + } + + private static void createFile(File newFile, int size) throws IOException { + // write random data so that filesystems with compression enabled (e.g. ZFS) + // can't compress the file + Random random = new Random(); + byte[] data = new byte[size]; + random.nextBytes(data); + + RandomAccessFile file = new RandomAccessFile(newFile, "rws"); + + file.write(data); + + file.getFD().sync(); + file.close(); + } +} diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/s3/awssdk/v1/TestS3SDKV1.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/s3/awssdk/v1/TestS3SDKV1.java new file mode 100644 index 000000000000..5e9b3633be06 --- /dev/null +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/s3/awssdk/v1/TestS3SDKV1.java @@ -0,0 +1,47 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + *

    + * http://www.apache.org/licenses/LICENSE-2.0 + *

    + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package org.apache.hadoop.ozone.s3.awssdk.v1; + +import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.scm.ScmConfigKeys; +import org.apache.hadoop.ozone.om.OMConfigKeys; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Timeout; + +import java.io.IOException; + +/** + * Tests the AWS S3 SDK basic operations with OM Ratis disabled. + */ +@Timeout(300) +public class TestS3SDKV1 extends AbstractS3SDKV1Tests { + + @BeforeAll + public static void init() throws Exception { + OzoneConfiguration conf = new OzoneConfiguration(); + conf.setBoolean(OMConfigKeys.OZONE_OM_RATIS_ENABLE_KEY, false); + conf.setInt(ScmConfigKeys.OZONE_SCM_PIPELINE_OWNER_CONTAINER_COUNT, 1); + startCluster(conf); + } + + @AfterAll + public static void shutdown() throws IOException { + shutdownCluster(); + } +} diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/s3/awssdk/v1/TestS3SDKV1WithRatis.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/s3/awssdk/v1/TestS3SDKV1WithRatis.java new file mode 100644 index 000000000000..cb614453f69f --- /dev/null +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/s3/awssdk/v1/TestS3SDKV1WithRatis.java @@ -0,0 +1,49 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + *

    + * http://www.apache.org/licenses/LICENSE-2.0 + *

    + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package org.apache.hadoop.ozone.s3.awssdk.v1; + +import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.scm.ScmConfigKeys; +import org.apache.hadoop.ozone.OzoneConfigKeys; +import org.apache.hadoop.ozone.om.OMConfigKeys; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.BeforeAll; + +import java.io.IOException; + +/** + * Tests the AWS S3 SDK basic operations with OM Ratis enabled. + */ +public class TestS3SDKV1WithRatis extends AbstractS3SDKV1Tests { + + @BeforeAll + public static void init() throws Exception { + OzoneConfiguration conf = new OzoneConfiguration(); + conf.setBoolean(ScmConfigKeys.OZONE_SCM_PIPELINE_AUTO_CREATE_FACTOR_ONE, + false); + conf.setBoolean(OMConfigKeys.OZONE_OM_RATIS_ENABLE_KEY, true); + conf.setBoolean(OzoneConfigKeys.OZONE_NETWORK_TOPOLOGY_AWARE_READ_KEY, + true); + startCluster(conf); + } + + @AfterAll + public static void shutdown() throws IOException { + shutdownCluster(); + } +} diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneShellHA.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneShellHA.java index 4c5325edab10..1a661ecdd74d 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneShellHA.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneShellHA.java @@ -23,7 +23,6 @@ import java.io.IOException; import java.io.PrintStream; import java.io.UnsupportedEncodingException; -import java.net.URI; import java.util.Map; import java.util.Arrays; import java.util.HashSet; @@ -35,6 +34,7 @@ import org.apache.hadoop.crypto.key.KeyProvider; import org.apache.hadoop.crypto.key.kms.KMSClientProvider; import org.apache.hadoop.crypto.key.kms.server.MiniKMS; +import org.apache.hadoop.hdds.scm.ScmConfigKeys; import org.apache.hadoop.hdds.utils.IOUtils; import org.apache.hadoop.fs.CommonConfigurationKeysPublic; import org.apache.hadoop.fs.FileChecksum; @@ -155,6 +155,8 @@ public class TestOzoneShellHA { private static String omServiceId; private static int numOfOMs; + private static OzoneConfiguration ozoneConfiguration; + /** * Create a MiniOzoneCluster for testing with using distributed Ozone * handler type. @@ -199,6 +201,8 @@ protected static void startCluster(OzoneConfiguration conf) throws Exception { getKeyProviderURI(miniKMS)); conf.setInt(OMConfigKeys.OZONE_DIR_DELETING_SERVICE_INTERVAL, 10); conf.setBoolean(OMConfigKeys.OZONE_OM_ENABLE_FILESYSTEM_PATHS, true); + conf.setInt(ScmConfigKeys.OZONE_SCM_CONTAINER_LIST_MAX_COUNT, 1); + ozoneConfiguration = conf; MiniOzoneHAClusterImpl.Builder builder = MiniOzoneCluster.newHABuilder(conf); builder.setOMServiceId(omServiceId) .setNumOfOzoneManagers(numOfOMs) @@ -234,7 +238,7 @@ public static void shutdown() { @BeforeEach public void setup() throws UnsupportedEncodingException { ozoneShell = new OzoneShell(); - ozoneAdminShell = new OzoneAdmin(); + ozoneAdminShell = new OzoneAdmin(ozoneConfiguration); System.setOut(new PrintStream(out, false, DEFAULT_ENCODING)); System.setErr(new PrintStream(err, false, DEFAULT_ENCODING)); } @@ -958,6 +962,33 @@ private String getStdOut() throws UnsupportedEncodingException { return res; } + @Test + public void testOzoneAdminCmdListAllContainer() + throws UnsupportedEncodingException { + String[] args = new String[] {"container", "create", "--scm", + "localhost:" + cluster.getStorageContainerManager().getClientRpcPort()}; + for (int i = 0; i < 2; i++) { + execute(ozoneAdminShell, args); + } + + String[] args1 = new String[] {"container", "list", "-c", "10", "--scm", + "localhost:" + cluster.getStorageContainerManager().getClientRpcPort()}; + execute(ozoneAdminShell, args1); + //results will be capped at the maximum allowed count + assertEquals(1, getNumOfContainers()); + + String[] args2 = new String[] {"container", "list", "-a", "--scm", + "localhost:" + cluster.getStorageContainerManager().getClientRpcPort()}; + execute(ozoneAdminShell, args2); + //Lists all containers + assertNotEquals(1, getNumOfContainers()); + } + + private int getNumOfContainers() + throws UnsupportedEncodingException { + return out.toString(DEFAULT_ENCODING).split("\"containerID\" :").length - 1; + } + /** * Helper function to retrieve Ozone client configuration for trash testing. * @param hostPrefix Scheme + Authority. e.g. ofs://om-service-test1 @@ -1149,8 +1180,6 @@ public void testListBucket() throws Exception { getClientConfForOFS(hostPrefix, cluster.getConf()); int pageSize = 20; clientConf.setInt(OZONE_FS_LISTING_PAGE_SIZE, pageSize); - URI uri = FileSystem.getDefaultUri(clientConf); - clientConf.setBoolean(String.format("fs.%s.impl.disable.cache", uri.getScheme()), true); OzoneFsShell shell = new OzoneFsShell(clientConf); String volName = "testlistbucket"; diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/utils/InputSubstream.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/utils/InputSubstream.java new file mode 100644 index 000000000000..4908ecabf2ed --- /dev/null +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/utils/InputSubstream.java @@ -0,0 +1,115 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + *

    + * http://www.apache.org/licenses/LICENSE-2.0 + *

    + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package org.apache.hadoop.utils; + +import com.google.common.base.Preconditions; + +import java.io.FilterInputStream; +import java.io.IOException; +import java.io.InputStream; + +/** + * A filter input stream implementation that exposes a range of the underlying input stream. + */ +public class InputSubstream extends FilterInputStream { + private static final int MAX_SKIPS = 100; + private long currentPosition; + private final long requestedSkipOffset; + private final long requestedLength; + private long markedPosition = 0; + + public InputSubstream(InputStream in, long skip, long length) { + super(in); + Preconditions.checkNotNull(in); + this.currentPosition = 0; + this.requestedSkipOffset = skip; + this.requestedLength = length; + } + + @Override + public int read() throws IOException { + byte[] b = new byte[1]; + int bytesRead = read(b, 0, 1); + + if (bytesRead == -1) { + return bytesRead; + } + return b[0]; + } + + @Override + public int read(byte[] b, int off, int len) throws IOException { + int count = 0; + while (currentPosition < requestedSkipOffset) { + long skippedBytes = super.skip(requestedSkipOffset - currentPosition); + if (skippedBytes == 0) { + count++; + if (count > MAX_SKIPS) { + throw new IOException( + "Unable to position the currentPosition from " + + currentPosition + " to " + + requestedSkipOffset); + } + } + currentPosition += skippedBytes; + } + + long bytesRemaining = + (requestedLength + requestedSkipOffset) - currentPosition; + if (bytesRemaining <= 0) { + return -1; + } + + len = (int) Math.min(len, bytesRemaining); + int bytesRead = super.read(b, off, len); + currentPosition += bytesRead; + + return bytesRead; + } + + @Override + public synchronized void mark(int readlimit) { + markedPosition = currentPosition; + super.mark(readlimit); + } + + @Override + public synchronized void reset() throws IOException { + currentPosition = markedPosition; + super.reset(); + } + + @Override + public void close() throws IOException { + // No-op operation since we don't want to close the underlying stream + // when the susbtream has been read + } + + @Override + public int available() throws IOException { + long bytesRemaining; + if (currentPosition < requestedSkipOffset) { + bytesRemaining = requestedLength; + } else { + bytesRemaining = + (requestedLength + requestedSkipOffset) - currentPosition; + } + + return (int) Math.min(bytesRemaining, super.available()); + } +} diff --git a/hadoop-ozone/integration-test/src/test/resources/log4j.properties b/hadoop-ozone/integration-test/src/test/resources/log4j.properties index 564b729d5fc1..c732a15c48a1 100644 --- a/hadoop-ozone/integration-test/src/test/resources/log4j.properties +++ b/hadoop-ozone/integration-test/src/test/resources/log4j.properties @@ -21,3 +21,4 @@ log4j.logger.org.apache.hadoop.security.ShellBasedUnixGroupsMapping=ERROR log4j.logger.org.apache.hadoop.util.NativeCodeLoader=ERROR log4j.logger.org.apache.hadoop.hdds.utils.db.managed=TRACE log4j.logger.org.apache.hadoop.hdds.utils.db.CodecBuffer=DEBUG +log4j.logger.org.apache.hadoop.ozone.client.OzoneClientFactory=DEBUG diff --git a/hadoop-ozone/interface-client/pom.xml b/hadoop-ozone/interface-client/pom.xml index 18d9584fbc85..4db4dbd5ad44 100644 --- a/hadoop-ozone/interface-client/pom.xml +++ b/hadoop-ozone/interface-client/pom.xml @@ -20,10 +20,10 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> org.apache.ozone ozone - 1.5.0-SNAPSHOT + 2.0.0-SNAPSHOT ozone-interface-client - 1.5.0-SNAPSHOT + 2.0.0-SNAPSHOT Apache Ozone Client interface Apache Ozone Client Interface jar diff --git a/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto b/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto index eefcfa7552ca..f71dc44fec51 100644 --- a/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto +++ b/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto @@ -150,9 +150,9 @@ enum Type { RenameSnapshot = 131; ListOpenFiles = 132; QuotaRepair = 133; - GetServerDefaults = 134; GetQuotaRepairStatus = 135; StartQuotaRepair = 136; + SnapshotMoveTableKeys = 137; } enum SafeMode { @@ -292,9 +292,9 @@ message OMRequest { optional RenameSnapshotRequest RenameSnapshotRequest = 129; optional ListOpenFilesRequest ListOpenFilesRequest = 130; optional QuotaRepairRequest QuotaRepairRequest = 131; - optional ServerDefaultsRequest ServerDefaultsRequest = 132; optional GetQuotaRepairStatusRequest GetQuotaRepairStatusRequest = 133; optional StartQuotaRepairRequest StartQuotaRepairRequest = 134; + optional SnapshotMoveTableKeysRequest SnapshotMoveTableKeysRequest = 135; } message OMResponse { @@ -422,7 +422,6 @@ message OMResponse { optional RenameSnapshotResponse RenameSnapshotResponse = 132; optional ListOpenFilesResponse ListOpenFilesResponse = 133; optional QuotaRepairResponse QuotaRepairResponse = 134; - optional ServerDefaultsResponse ServerDefaultsResponse = 135; optional GetQuotaRepairStatusResponse GetQuotaRepairStatusResponse = 136; optional StartQuotaRepairResponse StartQuotaRepairResponse = 137; } @@ -1379,6 +1378,8 @@ message PurgeKeysRequest { // if set, will purge keys in a snapshot DB instead of active DB optional string snapshotTableKey = 2; repeated SnapshotMoveKeyInfos keysToUpdate = 3; + // previous snapshotID can also be null & this field would be absent in older requests. + optional NullableUUID expectedPreviousSnapshotID = 4; } message PurgeKeysResponse { @@ -1401,6 +1402,12 @@ message PurgePathsResponse { message PurgeDirectoriesRequest { repeated PurgePathRequest deletedPath = 1; optional string snapshotTableKey = 2; + // previous snapshotID can also be null & this field would be absent in older requests. + optional NullableUUID expectedPreviousSnapshotID = 3; +} + +message NullableUUID { + optional hadoop.hdds.UUID uuid = 1; } message PurgeDirectoriesResponse { @@ -1630,6 +1637,7 @@ message ServiceInfo { repeated ServicePort servicePorts = 3; optional OMRoleInfo omRole = 4; optional int32 OMVersion = 5 [default = 0]; + optional FsServerDefaultsProto serverDefaults = 6; } message MultipartInfoInitiateRequest { @@ -1981,6 +1989,13 @@ message SnapshotMoveDeletedKeysRequest { repeated string deletedDirsToMove = 5; } +message SnapshotMoveTableKeysRequest { + optional hadoop.hdds.UUID fromSnapshotID = 1; + repeated SnapshotMoveKeyInfos deletedKeys = 2; + repeated SnapshotMoveKeyInfos deletedDirs = 3; + repeated hadoop.hdds.KeyValue renamedKeys = 4; +} + message SnapshotMoveKeyInfos { optional string key = 1; repeated KeyInfo keyInfos = 2; @@ -2222,17 +2237,10 @@ message BucketQuotaCount { message QuotaRepairResponse { } -message ServerDefaultsRequest { -} - message FsServerDefaultsProto { optional string keyProviderUri = 1; } -message ServerDefaultsResponse { - required FsServerDefaultsProto serverDefaults = 1; -} - message GetQuotaRepairStatusRequest { } message GetQuotaRepairStatusResponse { diff --git a/hadoop-ozone/interface-storage/pom.xml b/hadoop-ozone/interface-storage/pom.xml index ab1cc275ac1d..cd2e1e347831 100644 --- a/hadoop-ozone/interface-storage/pom.xml +++ b/hadoop-ozone/interface-storage/pom.xml @@ -20,10 +20,10 @@ org.apache.ozone ozone - 1.5.0-SNAPSHOT + 2.0.0-SNAPSHOT ozone-interface-storage - 1.5.0-SNAPSHOT + 2.0.0-SNAPSHOT Apache Ozone Storage Interface Apache Ozone Storage Interface jar diff --git a/hadoop-ozone/interface-storage/src/main/java/org/apache/hadoop/ozone/om/OMMetadataManager.java b/hadoop-ozone/interface-storage/src/main/java/org/apache/hadoop/ozone/om/OMMetadataManager.java index cf0819ca527c..ae57c18354d2 100644 --- a/hadoop-ozone/interface-storage/src/main/java/org/apache/hadoop/ozone/om/OMMetadataManager.java +++ b/hadoop-ozone/interface-storage/src/main/java/org/apache/hadoop/ozone/om/OMMetadataManager.java @@ -116,6 +116,22 @@ public interface OMMetadataManager extends DBStoreHAManager { */ String getBucketKey(String volume, String bucket); + /** + * Given a volume and bucket, return the corresponding DB key prefix. + * + * @param volume - Volume name + * @param bucket - Bucket name + */ + String getBucketKeyPrefix(String volume, String bucket); + + /** + * Given a volume and bucket, return the corresponding DB key prefix for FSO buckets. + * + * @param volume - Volume name + * @param bucket - Bucket name + */ + String getBucketKeyPrefixFSO(String volume, String bucket) throws IOException; + /** * Given a volume, bucket and a key, return the corresponding DB key. * @@ -631,7 +647,7 @@ String getMultipartKey(long volumeId, long bucketId, long getBucketId(String volume, String bucket) throws IOException; /** - * Returns List<{@link BlockGroup}> for a key in the deletedTable. + * Returns {@code List} for a key in the deletedTable. * @param deletedKey - key to be purged from the deletedTable * @return {@link BlockGroup} */ diff --git a/hadoop-ozone/interface-storage/src/main/java/org/apache/hadoop/ozone/om/codec/TokenIdentifierCodec.java b/hadoop-ozone/interface-storage/src/main/java/org/apache/hadoop/ozone/om/codec/TokenIdentifierCodec.java index ba54a44ac79d..edf65ae22477 100644 --- a/hadoop-ozone/interface-storage/src/main/java/org/apache/hadoop/ozone/om/codec/TokenIdentifierCodec.java +++ b/hadoop-ozone/interface-storage/src/main/java/org/apache/hadoop/ozone/om/codec/TokenIdentifierCodec.java @@ -42,6 +42,11 @@ private TokenIdentifierCodec() { // singleton } + @Override + public Class getTypeClass() { + return OzoneTokenIdentifier.class; + } + @Override public byte[] toPersistedFormat(OzoneTokenIdentifier object) { Preconditions diff --git a/hadoop-ozone/interface-storage/src/main/java/org/apache/hadoop/ozone/om/helpers/OmPrefixInfo.java b/hadoop-ozone/interface-storage/src/main/java/org/apache/hadoop/ozone/om/helpers/OmPrefixInfo.java index 30fe6d69b765..a2fdfb99c54a 100644 --- a/hadoop-ozone/interface-storage/src/main/java/org/apache/hadoop/ozone/om/helpers/OmPrefixInfo.java +++ b/hadoop-ozone/interface-storage/src/main/java/org/apache/hadoop/ozone/om/helpers/OmPrefixInfo.java @@ -42,7 +42,8 @@ public final class OmPrefixInfo extends WithObjectID implements CopyObject CODEC = new DelegatedCodec<>( Proto2Codec.get(PersistedPrefixInfo.getDefaultInstance()), OmPrefixInfo::getFromProtobuf, - OmPrefixInfo::getProtobuf); + OmPrefixInfo::getProtobuf, + OmPrefixInfo.class); public static Codec getCodec() { return CODEC; diff --git a/hadoop-ozone/ozone-manager/pom.xml b/hadoop-ozone/ozone-manager/pom.xml index ae427727def5..7ccca37c8047 100644 --- a/hadoop-ozone/ozone-manager/pom.xml +++ b/hadoop-ozone/ozone-manager/pom.xml @@ -20,10 +20,10 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> org.apache.ozone ozone - 1.5.0-SNAPSHOT + 2.0.0-SNAPSHOT ozone-manager - 1.5.0-SNAPSHOT + 2.0.0-SNAPSHOT Apache Ozone Manager Server Apache Ozone Manager Server jar diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/BucketManagerImpl.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/BucketManagerImpl.java index 68429c36d084..e4174efcfccf 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/BucketManagerImpl.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/BucketManagerImpl.java @@ -43,7 +43,7 @@ * BucketManager uses MetadataDB to store bucket level information. * Keys used in BucketManager for storing data into MetadataDB * for BucketInfo: - * {volume/bucket} -> bucketInfo + * {volume/bucket} -> bucketInfo */ public class BucketManagerImpl implements BucketManager { private static final Logger LOG = diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/GrpcOzoneManagerServer.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/GrpcOzoneManagerServer.java index a83304ade459..bb6825085247 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/GrpcOzoneManagerServer.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/GrpcOzoneManagerServer.java @@ -63,7 +63,7 @@ import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_GRPC_WORKERGROUP_SIZE_KEY; /** - * Separated network server for gRPC transport OzoneManagerService s3g->OM. + * Separated network server for gRPC transport OzoneManagerService s3g->OM. */ public class GrpcOzoneManagerServer { private static final Logger LOG = diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManager.java index b7fa5d746fb0..a0f3053d731c 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManager.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManager.java @@ -18,6 +18,7 @@ import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.utils.db.Table; +import org.apache.hadoop.hdds.utils.db.TableIterator; import org.apache.hadoop.ozone.common.BlockGroup; import org.apache.hadoop.ozone.om.exceptions.OMException; import org.apache.hadoop.ozone.om.helpers.BucketLayout; @@ -28,6 +29,7 @@ import org.apache.hadoop.ozone.om.helpers.OmMultipartUploadListParts; import org.apache.hadoop.ozone.om.fs.OzoneManagerFS; import org.apache.hadoop.hdds.utils.BackgroundService; +import org.apache.hadoop.ozone.om.service.DirectoryDeletingService; import org.apache.hadoop.ozone.om.service.KeyDeletingService; import org.apache.hadoop.ozone.om.service.SnapshotDeletingService; import org.apache.hadoop.ozone.om.service.SnapshotDirectoryCleaningService; @@ -35,6 +37,7 @@ import java.io.IOException; import java.time.Duration; +import java.util.ArrayList; import java.util.List; /** @@ -46,7 +49,6 @@ public interface KeyManager extends OzoneManagerFS, IOzoneAcl { * Start key manager. * * @param configuration - * @throws IOException */ void start(OzoneConfiguration configuration); @@ -119,6 +121,29 @@ ListKeysResult listKeys(String volumeName, String bucketName, String startKey, */ PendingKeysDeletion getPendingDeletionKeys(int count) throws IOException; + /** + * Returns a list rename entries from the snapshotRenamedTable. + * + * @param size max number of keys to return. + * @return a Pair of list of {@link org.apache.hadoop.hdds.utils.db.Table.KeyValue} representing the keys in the + * underlying metadataManager. + * @throws IOException + */ + List> getRenamesKeyEntries( + String volume, String bucket, String startKey, int size) throws IOException; + + + /** + * Returns a list deleted entries from the deletedTable. + * + * @param size max number of keys to return. + * @return a Pair of list of {@link org.apache.hadoop.hdds.utils.db.Table.KeyValue} representing the keys in the + * underlying metadataManager. + * @throws IOException + */ + List>> getDeletedKeyEntries( + String volume, String bucket, String startKey, int size) throws IOException; + /** * Returns the names of up to {@code count} open keys whose age is * greater than or equal to {@code expireThreshold}. @@ -216,6 +241,26 @@ OmMultipartUploadListParts listParts(String volumeName, String bucketName, */ Table.KeyValue getPendingDeletionDir() throws IOException; + /** + * Returns an iterator for pending deleted directories. + * @throws IOException + */ + TableIterator> getDeletedDirEntries( + String volume, String bucket) throws IOException; + + default List> getDeletedDirEntries(String volume, String bucket, int size) + throws IOException { + List> deletedDirEntries = new ArrayList<>(size); + try (TableIterator> iterator = + getDeletedDirEntries(volume, bucket)) { + while (deletedDirEntries.size() < size && iterator.hasNext()) { + Table.KeyValue kv = iterator.next(); + deletedDirEntries.add(Table.newKeyValue(kv.getKey(), kv.getValue())); + } + return deletedDirEntries; + } + } + /** * Returns all sub directories under the given parent directory. * @@ -243,7 +288,7 @@ List getPendingDeletionSubFiles(long volumeId, * Returns the instance of Directory Deleting Service. * @return Background service. */ - BackgroundService getDirDeletingService(); + DirectoryDeletingService getDirDeletingService(); /** * Returns the instance of Open Key Cleanup Service. diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java index 6d276d95284e..e99bdea85ea4 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java @@ -37,9 +37,15 @@ import java.util.Stack; import java.util.TreeMap; import java.util.concurrent.TimeUnit; +import java.util.function.Function; import java.util.stream.Collectors; import java.util.stream.Stream; +import com.google.common.annotations.VisibleForTesting; +import com.google.common.base.Preconditions; +import com.google.common.base.Strings; +import jakarta.annotation.Nonnull; +import org.apache.commons.lang3.StringUtils; import org.apache.commons.lang3.tuple.Pair; import org.apache.hadoop.conf.StorageUnit; import org.apache.hadoop.crypto.key.KeyProviderCryptoExtension; @@ -56,6 +62,7 @@ import org.apache.hadoop.hdds.scm.net.NodeImpl; import org.apache.hadoop.hdds.scm.pipeline.Pipeline; import org.apache.hadoop.hdds.scm.storage.BlockLocationInfo; +import org.apache.hadoop.hdds.security.token.OzoneBlockTokenSecretManager; import org.apache.hadoop.hdds.utils.BackgroundService; import org.apache.hadoop.hdds.utils.db.StringCodec; import org.apache.hadoop.hdds.utils.db.Table; @@ -70,6 +77,7 @@ import org.apache.hadoop.ozone.om.exceptions.OMException; import org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes; import org.apache.hadoop.ozone.om.helpers.BucketEncryptionKeyInfo; +import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.ozone.om.helpers.ListKeysResult; import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo; @@ -85,7 +93,7 @@ import org.apache.hadoop.ozone.om.helpers.OzoneAclUtil; import org.apache.hadoop.ozone.om.helpers.OzoneFSUtils; import org.apache.hadoop.ozone.om.helpers.OzoneFileStatus; -import org.apache.hadoop.ozone.om.helpers.BucketLayout; +import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo; import org.apache.hadoop.ozone.om.request.OMClientRequest; import org.apache.hadoop.ozone.om.request.file.OMFileRequest; import org.apache.hadoop.ozone.om.request.util.OMMultipartUploadUtils; @@ -97,18 +105,14 @@ import org.apache.hadoop.ozone.om.service.SnapshotDirectoryCleaningService; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.ExpiredMultipartUploadsBucket; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.PartKeyInfo; -import org.apache.hadoop.hdds.security.token.OzoneBlockTokenSecretManager; import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer; import org.apache.hadoop.ozone.security.acl.OzoneObj; import org.apache.hadoop.ozone.security.acl.RequestContext; import org.apache.hadoop.security.SecurityUtil; import org.apache.hadoop.util.ReflectionUtils; import org.apache.hadoop.util.Time; - -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Preconditions; -import com.google.common.base.Strings; -import org.apache.commons.lang3.StringUtils; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import static java.lang.String.format; import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_SECURITY_KEY_PROVIDER_PATH; @@ -141,6 +145,8 @@ import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_OPEN_KEY_CLEANUP_SERVICE_INTERVAL_DEFAULT; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_OPEN_KEY_CLEANUP_SERVICE_TIMEOUT; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_OPEN_KEY_CLEANUP_SERVICE_TIMEOUT_DEFAULT; +import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_SNAPSHOT_DEEP_CLEANING_ENABLED; +import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_SNAPSHOT_DEEP_CLEANING_ENABLED_DEFAULT; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_SNAPSHOT_DIRECTORY_SERVICE_INTERVAL; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_SNAPSHOT_DIRECTORY_SERVICE_INTERVAL_DEFAULT; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_SNAPSHOT_DIRECTORY_SERVICE_TIMEOUT; @@ -155,15 +161,11 @@ import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.KEY_NOT_FOUND; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.SCM_GET_PIPELINE_EXCEPTION; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.VOLUME_NOT_FOUND; -import static org.apache.hadoop.ozone.util.MetricUtil.captureLatencyNs; import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.Resource.BUCKET_LOCK; import static org.apache.hadoop.ozone.security.acl.OzoneObj.ResourceType.KEY; +import static org.apache.hadoop.ozone.util.MetricUtil.captureLatencyNs; import static org.apache.hadoop.util.Time.monotonicNow; -import jakarta.annotation.Nonnull; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - /** * Implementation of keyManager. */ @@ -189,7 +191,7 @@ public class KeyManagerImpl implements KeyManager { private final KeyProviderCryptoExtension kmsProvider; private final boolean enableFileSystemPaths; - private BackgroundService dirDeletingService; + private DirectoryDeletingService dirDeletingService; private final OMPerformanceMetrics metrics; private BackgroundService openKeyCleanupService; @@ -228,6 +230,8 @@ public KeyManagerImpl(OzoneManager om, ScmClient scmClient, @Override public void start(OzoneConfiguration configuration) { + boolean isSnapshotDeepCleaningEnabled = configuration.getBoolean(OZONE_SNAPSHOT_DEEP_CLEANING_ENABLED, + OZONE_SNAPSHOT_DEEP_CLEANING_ENABLED_DEFAULT); if (keyDeletingService == null) { long blockDeleteInterval = configuration.getTimeDuration( OZONE_BLOCK_DELETING_SERVICE_INTERVAL, @@ -239,7 +243,7 @@ public void start(OzoneConfiguration configuration) { TimeUnit.MILLISECONDS); keyDeletingService = new KeyDeletingService(ozoneManager, scmClient.getBlockClient(), this, blockDeleteInterval, - serviceTimeout, configuration); + serviceTimeout, configuration, isSnapshotDeepCleaningEnabled); keyDeletingService.start(); } @@ -305,14 +309,14 @@ public void start(OzoneConfiguration configuration) { try { snapshotDeletingService = new SnapshotDeletingService( snapshotServiceInterval, snapshotServiceTimeout, - ozoneManager, scmClient.getBlockClient()); + ozoneManager); snapshotDeletingService.start(); } catch (IOException e) { LOG.error("Error starting Snapshot Deleting Service", e); } } - if (snapshotDirectoryCleaningService == null && + if (isSnapshotDeepCleaningEnabled && snapshotDirectoryCleaningService == null && ozoneManager.isFilesystemSnapshotEnabled()) { long dirDeleteInterval = configuration.getTimeDuration( OZONE_SNAPSHOT_DIRECTORY_SERVICE_INTERVAL, @@ -662,6 +666,60 @@ public PendingKeysDeletion getPendingDeletionKeys(final int count) .getPendingDeletionKeys(count, ozoneManager.getOmSnapshotManager()); } + private List> getTableEntries(String startKey, + TableIterator> tableIterator, + Function valueFunction, int size) throws IOException { + List> entries = new ArrayList<>(); + /* Seek to the start key if it not null. The next key in queue is ensured to start with the bucket + prefix, {@link org.apache.hadoop.hdds.utils.db.Table#iterator(bucketPrefix)} would ensure this. + */ + if (startKey != null) { + tableIterator.seek(startKey); + tableIterator.seekToFirst(); + } + int currentCount = 0; + while (tableIterator.hasNext() && currentCount < size) { + Table.KeyValue kv = tableIterator.next(); + if (kv != null) { + entries.add(Table.newKeyValue(kv.getKey(), valueFunction.apply(kv.getValue()))); + currentCount++; + } + } + return entries; + } + + private Optional getBucketPrefix(String volumeName, String bucketName, boolean isFSO) throws IOException { + // Bucket prefix would be empty if both volume & bucket is empty i.e. either null or "". + if (StringUtils.isEmpty(volumeName) && StringUtils.isEmpty(bucketName)) { + return Optional.empty(); + } else if (StringUtils.isEmpty(bucketName) || StringUtils.isEmpty(volumeName)) { + throw new IOException("One of volume : " + volumeName + ", bucket: " + bucketName + " is empty." + + " Either both should be empty or none of the arguments should be empty"); + } + return isFSO ? Optional.of(metadataManager.getBucketKeyPrefixFSO(volumeName, bucketName)) : + Optional.of(metadataManager.getBucketKeyPrefix(volumeName, bucketName)); + } + + @Override + public List> getRenamesKeyEntries( + String volume, String bucket, String startKey, int size) throws IOException { + Optional bucketPrefix = getBucketPrefix(volume, bucket, false); + try (TableIterator> + renamedKeyIter = metadataManager.getSnapshotRenamedTable().iterator(bucketPrefix.orElse(""))) { + return getTableEntries(startKey, renamedKeyIter, Function.identity(), size); + } + } + + @Override + public List>> getDeletedKeyEntries( + String volume, String bucket, String startKey, int size) throws IOException { + Optional bucketPrefix = getBucketPrefix(volume, bucket, false); + try (TableIterator> + delKeyIter = metadataManager.getDeletedTable().iterator(bucketPrefix.orElse(""))) { + return getTableEntries(startKey, delKeyIter, RepeatedOmKeyInfo::cloneOmKeyInfoList, size); + } + } + @Override public ExpiredOpenKeys getExpiredOpenKeys(Duration expireThreshold, int count, BucketLayout bucketLayout, Duration leaseThreshold) throws IOException { @@ -688,7 +746,7 @@ public KeyDeletingService getDeletingService() { } @Override - public BackgroundService getDirDeletingService() { + public DirectoryDeletingService getDirDeletingService() { return dirDeletingService; } @@ -723,8 +781,7 @@ public boolean isSstFilteringSvcEnabled() { TimeUnit.MILLISECONDS); return serviceInterval != DISABLE_VALUE; } - - + @Override public OmMultipartUploadList listMultipartUploads(String volumeName, String bucketName, String prefix) throws OMException { @@ -1325,7 +1382,6 @@ private OmKeyInfo createFakeDirIfShould(String volume, String bucket, return null; } - private OzoneFileStatus getOzoneFileStatusFSO(OmKeyArgs args, String clientAddress, boolean skipFileNotFoundError) throws IOException { final String volumeName = args.getVolumeName(); @@ -1663,7 +1719,7 @@ public List listStatus(OmKeyArgs args, boolean recursive, cacheKeyMap.clear(); List keyInfoList = new ArrayList<>(fileStatusList.size()); - fileStatusList.stream().map(s -> s.getKeyInfo()).forEach(keyInfoList::add); + fileStatusList.stream().map(OzoneFileStatus::getKeyInfo).forEach(keyInfoList::add); if (args.getLatestVersionLocation()) { slimLocationVersion(keyInfoList.toArray(new OmKeyInfo[0])); } @@ -1784,17 +1840,13 @@ private List buildFinalStatusList( } fileStatusFinalList.add(fileStatus); } - return sortPipelineInfo(fileStatusFinalList, keyInfoList, omKeyArgs, clientAddress); } - private List sortPipelineInfo( List fileStatusFinalList, List keyInfoList, OmKeyArgs omKeyArgs, String clientAddress) throws IOException { - - if (omKeyArgs.getLatestVersionLocation()) { slimLocationVersion(keyInfoList.toArray(new OmKeyInfo[0])); } @@ -1976,6 +2028,13 @@ public Table.KeyValue getPendingDeletionDir() return null; } + @Override + public TableIterator> getDeletedDirEntries( + String volume, String bucket) throws IOException { + Optional bucketPrefix = getBucketPrefix(volume, bucket, true); + return metadataManager.getDeletedDirTable().iterator(bucketPrefix.orElse("")); + } + @Override public List getPendingDeletionSubDirs(long volumeId, long bucketId, OmKeyInfo parentInfo, long numEntries) throws IOException { diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ListIterator.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ListIterator.java index 86d8352697ae..36edda8941db 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ListIterator.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ListIterator.java @@ -43,7 +43,7 @@ /** * Common class to do listing of resources after merging - * rocksDB table cache & actual table. + * rocksDB table cache and actual table. */ public class ListIterator { diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMMultiTenantManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMMultiTenantManager.java index f68789b5394b..2c66dd5035ed 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMMultiTenantManager.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMMultiTenantManager.java @@ -129,7 +129,7 @@ boolean isTenantAdmin(UserGroupInformation callerUgi, String tenantId, boolean delegated); /** - * List all the user & accessIDs of all users that belong to this Tenant. + * List all the user and accessIDs of all users that belong to this Tenant. * Note this read is unprotected. See OzoneManager#listUserInTenant * @param tenantID * @return List of users diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMMultiTenantManagerImpl.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMMultiTenantManagerImpl.java index 1d25a49fc562..a5954485bbd4 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMMultiTenantManagerImpl.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMMultiTenantManagerImpl.java @@ -246,7 +246,6 @@ private void checkAcquiredAuthorizerWriteLock() throws OMException { * @param tenantId tenant name * @param userRoleName user role name * @param adminRoleName admin role name - * @return Tenant * @throws IOException */ @Override diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java index ee92dbc2fde9..4873a7db4916 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java @@ -34,7 +34,6 @@ import java.util.Set; import java.util.TreeMap; import java.util.TreeSet; -import java.util.UUID; import java.util.concurrent.TimeUnit; import java.util.stream.Collectors; import java.util.stream.Stream; @@ -140,7 +139,8 @@ public class OmMetadataManagerImpl implements OMMetadataManager, *

    * OM DB Schema: * - * + *

    +   * {@code
        * Common Tables:
        * |----------------------------------------------------------------------|
        * |  Column Family     |        VALUE                                    |
    @@ -161,7 +161,10 @@ public class OmMetadataManagerImpl implements OMMetadataManager,
        * |----------------------------------------------------------------------|
        * | transactionInfoTable| #TRANSACTIONINFO -> OMTransactionInfo          |
        * |----------------------------------------------------------------------|
    -   *
    +   * }
    +   * 
    + *
    +   * {@code
        * Multi-Tenant Tables:
        * |----------------------------------------------------------------------|
        * | tenantStateTable          | tenantId -> OmDBTenantState              |
    @@ -170,8 +173,10 @@ public class OmMetadataManagerImpl implements OMMetadataManager,
        * |----------------------------------------------------------------------|
        * | principalToAccessIdsTable | userPrincipal -> OmDBUserPrincipalInfo   |
        * |----------------------------------------------------------------------|
    -   *
    -   *
    +   * }
    +   * 
    + *
    +   * {@code
        * Simple Tables:
        * |----------------------------------------------------------------------|
        * |  Column Family     |        VALUE                                    |
    @@ -182,7 +187,10 @@ public class OmMetadataManagerImpl implements OMMetadataManager,
        * |----------------------------------------------------------------------|
        * | openKey            | /volumeName/bucketName/keyName/id->KeyInfo      |
        * |----------------------------------------------------------------------|
    -   *
    +   * }
    +   * 
    + *
    +   * {@code
        * Prefix Tables:
        * |----------------------------------------------------------------------|
        * |  Column Family   |        VALUE                                      |
    @@ -196,7 +204,10 @@ public class OmMetadataManagerImpl implements OMMetadataManager,
        * |  deletedDirTable | /volumeId/bucketId/parentId/dirName/objectId ->   |
        * |                  |                                      KeyInfo      |
        * |----------------------------------------------------------------------|
    -   *
    +   * }
    +   * 
    + *
    +   * {@code
        * Snapshot Tables:
        * |-------------------------------------------------------------------------|
        * |  Column Family        |        VALUE                                    |
    @@ -210,6 +221,8 @@ public class OmMetadataManagerImpl implements OMMetadataManager,
        * |-------------------------------------------------------------------------|
        * | compactionLogTable    | dbTrxId-compactionTime -> compactionLogEntry    |
        * |-------------------------------------------------------------------------|
    +   * }
    +   * 
    */ public static final String USER_TABLE = "userTable"; @@ -824,7 +837,7 @@ public String getUserKey(String user) { /** * Given a volume and bucket, return the corresponding DB key. * - * @param volume - User name + * @param volume - Volume name * @param bucket - Bucket name */ @Override @@ -838,6 +851,22 @@ public String getBucketKey(String volume, String bucket) { return builder.toString(); } + /** + * {@inheritDoc} + */ + @Override + public String getBucketKeyPrefix(String volume, String bucket) { + return getOzoneKey(volume, bucket, OM_KEY_PREFIX); + } + + /** + * {@inheritDoc} + */ + @Override + public String getBucketKeyPrefixFSO(String volume, String bucket) throws IOException { + return getOzoneKeyFSO(volume, bucket, OM_KEY_PREFIX); + } + @Override public String getOzoneKey(String volume, String bucket, String key) { StringBuilder builder = new StringBuilder() @@ -1595,11 +1624,22 @@ public PendingKeysDeletion getPendingDeletionKeys(final int keyCount, String[] keySplit = kv.getKey().split(OM_KEY_PREFIX); String bucketKey = getBucketKey(keySplit[1], keySplit[2]); OmBucketInfo bucketInfo = getBucketTable().get(bucketKey); - + // If Bucket deleted bucketInfo would be null, thus making previous snapshot also null. + SnapshotInfo previousSnapshotInfo = bucketInfo == null ? null : + SnapshotUtils.getLatestSnapshotInfo(bucketInfo.getVolumeName(), + bucketInfo.getBucketName(), ozoneManager, snapshotChainManager); + // previous snapshot is not active or it has not been flushed to disk then don't process the key in this + // iteration. + if (previousSnapshotInfo != null && + (previousSnapshotInfo.getSnapshotStatus() != SnapshotInfo.SnapshotStatus.SNAPSHOT_ACTIVE || + !OmSnapshotManager.areSnapshotChangesFlushedToDB(ozoneManager.getMetadataManager(), + previousSnapshotInfo))) { + continue; + } // Get the latest snapshot in snapshot path. - try (ReferenceCounted - rcLatestSnapshot = getLatestActiveSnapshot( - keySplit[1], keySplit[2], omSnapshotManager)) { + try (ReferenceCounted rcLatestSnapshot = previousSnapshotInfo == null ? null : + omSnapshotManager.getSnapshot(previousSnapshotInfo.getVolumeName(), + previousSnapshotInfo.getBucketName(), previousSnapshotInfo.getName())) { // Multiple keys with the same path can be queued in one DB entry RepeatedOmKeyInfo infoList = kv.getValue(); @@ -1676,17 +1716,24 @@ public PendingKeysDeletion getPendingDeletionKeys(final int keyCount, List notReclaimableKeyInfoList = notReclaimableKeyInfo.getOmKeyInfoList(); + // If Bucket deleted bucketInfo would be null, thus making previous snapshot also null. + SnapshotInfo newPreviousSnapshotInfo = bucketInfo == null ? null : + SnapshotUtils.getLatestSnapshotInfo(bucketInfo.getVolumeName(), + bucketInfo.getBucketName(), ozoneManager, snapshotChainManager); + // Check if the previous snapshot in the chain hasn't changed. + if (Objects.equals(Optional.ofNullable(newPreviousSnapshotInfo).map(SnapshotInfo::getSnapshotId), + Optional.ofNullable(previousSnapshotInfo).map(SnapshotInfo::getSnapshotId))) { + // If all the versions are not reclaimable, then do nothing. + if (notReclaimableKeyInfoList.size() > 0 && + notReclaimableKeyInfoList.size() != + infoList.getOmKeyInfoList().size()) { + keysToModify.put(kv.getKey(), notReclaimableKeyInfo); + } - // If all the versions are not reclaimable, then do nothing. - if (notReclaimableKeyInfoList.size() > 0 && - notReclaimableKeyInfoList.size() != - infoList.getOmKeyInfoList().size()) { - keysToModify.put(kv.getKey(), notReclaimableKeyInfo); - } - - if (notReclaimableKeyInfoList.size() != - infoList.getOmKeyInfoList().size()) { - keyBlocksList.addAll(blockGroupList); + if (notReclaimableKeyInfoList.size() != + infoList.getOmKeyInfoList().size()) { + keyBlocksList.addAll(blockGroupList); + } } } } @@ -1703,55 +1750,6 @@ private boolean versionExistsInPreviousSnapshot(OmKeyInfo omKeyInfo, delOmKeyInfo != null; } - /** - * Get the latest OmSnapshot for a snapshot path. - */ - public ReferenceCounted getLatestActiveSnapshot( - String volumeName, String bucketName, - OmSnapshotManager snapshotManager) - throws IOException { - - String snapshotPath = volumeName + OM_KEY_PREFIX + bucketName; - Optional latestPathSnapshot = Optional.ofNullable( - snapshotChainManager.getLatestPathSnapshotId(snapshotPath)); - - Optional snapshotInfo = Optional.empty(); - - while (latestPathSnapshot.isPresent()) { - Optional snapTableKey = latestPathSnapshot - .map(uuid -> snapshotChainManager.getTableKey(uuid)); - - snapshotInfo = snapTableKey.isPresent() ? - Optional.ofNullable(getSnapshotInfoTable().get(snapTableKey.get())) : - Optional.empty(); - - if (snapshotInfo.isPresent() && snapshotInfo.get().getSnapshotStatus() == - SnapshotInfo.SnapshotStatus.SNAPSHOT_ACTIVE) { - break; - } - - // Update latestPathSnapshot if current snapshot is deleted. - if (snapshotChainManager.hasPreviousPathSnapshot(snapshotPath, - latestPathSnapshot.get())) { - latestPathSnapshot = Optional.ofNullable(snapshotChainManager - .previousPathSnapshot(snapshotPath, latestPathSnapshot.get())); - } else { - latestPathSnapshot = Optional.empty(); - } - } - - Optional> rcOmSnapshot = - snapshotInfo.isPresent() ? - Optional.ofNullable( - snapshotManager.getSnapshot(volumeName, - bucketName, - snapshotInfo.get().getName()) - ) : - Optional.empty(); - - return rcOmSnapshot.orElse(null); - } - /** * Decide whether the open key is a multipart upload related key. * @param openKeyInfo open key related to multipart upload diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java index 0038bca2e32a..8cb081028fd3 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java @@ -274,7 +274,7 @@ import static org.apache.hadoop.ozone.OzoneConsts.OM_METRICS_TEMP_FILE; import static org.apache.hadoop.ozone.OzoneConsts.OM_SNAPSHOT_DIR; import static org.apache.hadoop.ozone.OzoneConsts.PREPARE_MARKER_KEY; -import static org.apache.hadoop.ozone.OzoneConsts.OM_RATIS_SNAPSHOT_DIR; +import static org.apache.hadoop.ozone.OzoneConsts.OZONE_RATIS_SNAPSHOT_DIR; import static org.apache.hadoop.ozone.OzoneConsts.RPC_PORT; import static org.apache.hadoop.ozone.OzoneConsts.TRANSACTION_INFO_KEY; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_KEY_DELETING_LIMIT_PER_TASK; @@ -437,7 +437,6 @@ public final class OzoneManager extends ServiceRuntimeInfoImpl private List ratisReporterList = null; private KeyProviderCryptoExtension kmsProvider; - private OzoneFsServerDefaults serverDefaults; private final OMLayoutVersionManager versionManager; private final ReplicationConfigValidator replicationConfigValidator; @@ -655,14 +654,6 @@ private OzoneManager(OzoneConfiguration conf, StartupOption startupOption) kmsProvider = null; LOG.error("Fail to create Key Provider"); } - Configuration hadoopConfig = - LegacyHadoopConfigurationSource.asHadoopConfiguration(configuration); - URI keyProviderUri = KMSUtil.getKeyProviderUri( - hadoopConfig, - CommonConfigurationKeysPublic.HADOOP_SECURITY_KEY_PROVIDER_PATH); - String keyProviderUriStr = - (keyProviderUri != null) ? keyProviderUri.toString() : null; - serverDefaults = new OzoneFsServerDefaults(keyProviderUriStr); if (secConfig.isSecurityEnabled()) { omComponent = OM_DAEMON + "-" + omId; HddsProtos.OzoneManagerDetailsProto omInfo = @@ -791,8 +782,9 @@ private void setInstanceVariablesFromConf() { * * @param conf OzoneConfiguration * @return OM instance - * @throws IOException, AuthenticationException in case OM instance - * creation fails. + * @throws IOException AuthenticationException in case OM instance + * creation fails, + * @throws AuthenticationException */ public static OzoneManager createOm(OzoneConfiguration conf) throws IOException, AuthenticationException { @@ -874,7 +866,13 @@ private void instantiateServices(boolean withNewSnapshot) throws IOException { prefixManager = new PrefixManagerImpl(this, metadataManager, isRatisEnabled); keyManager = new KeyManagerImpl(this, scmClient, configuration, perfMetrics); - accessAuthorizer = OzoneAuthorizerFactory.forOM(this); + // If authorizer is not initialized or the authorizer is Native + // re-initialize the authorizer, else for non-native authorizer + // like ranger we can reuse previous value if it is initialized + if (null == accessAuthorizer || accessAuthorizer.isNative()) { + accessAuthorizer = OzoneAuthorizerFactory.forOM(this); + } + omMetadataReader = new OmMetadataReader(keyManager, prefixManager, this, LOG, AUDIT, metrics, accessAuthorizer); // Active DB's OmMetadataReader instance does not need to be reference @@ -1496,7 +1494,7 @@ private void initializeRatisDirs(OzoneConfiguration conf) throws IOException { // snapshot directory in Ratis storage directory. if yes, move it to // new snapshot directory. - File snapshotDir = new File(omRatisDirectory, OM_RATIS_SNAPSHOT_DIR); + File snapshotDir = new File(omRatisDirectory, OZONE_RATIS_SNAPSHOT_DIR); if (snapshotDir.isDirectory()) { FileUtils.moveDirectory(snapshotDir.toPath(), @@ -3139,6 +3137,15 @@ public List getServiceList() throws IOException { .setType(ServicePort.Type.RPC) .setValue(omRpcAddress.getPort()) .build()); + Configuration hadoopConfig = + LegacyHadoopConfigurationSource.asHadoopConfiguration(configuration); + URI keyProviderUri = KMSUtil.getKeyProviderUri( + hadoopConfig, + CommonConfigurationKeysPublic.HADOOP_SECURITY_KEY_PROVIDER_PATH); + String keyProviderUriStr = + (keyProviderUri != null) ? keyProviderUri.toString() : null; + omServiceInfoBuilder.setServerDefaults( + new OzoneFsServerDefaults(keyProviderUriStr)); if (httpServer != null && httpServer.getHttpAddress() != null) { omServiceInfoBuilder.addServicePort(ServicePort.newBuilder() @@ -4748,11 +4755,6 @@ public boolean setSafeMode(SafeModeAction action, boolean isChecked) } } - @Override - public OzoneFsServerDefaults getServerDefaults() { - return serverDefaults; - } - @Override public String getQuotaRepairStatus() throws IOException { checkAdminUserPrivilege("quota repair status"); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManagerUtils.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManagerUtils.java index 2301bbbdbf28..c693e529580c 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManagerUtils.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManagerUtils.java @@ -51,6 +51,8 @@ private OzoneManagerUtils() { * OzoneManagerStateMachine#runCommand function and ensures sequential * execution path. * Below is the call trace to perform OM client request operation: + *
    +   * {@code
        * OzoneManagerStateMachine#applyTransaction ->
        * OzoneManagerStateMachine#runCommand ->
        * OzoneManagerRequestHandler#handleWriteRequest ->
    @@ -60,6 +62,8 @@ private OzoneManagerUtils() {
        * OzoneManagerUtils#getBucketLayout ->
        * OzoneManagerUtils#getOmBucketInfo ->
        * omMetadataManager().getBucketTable().get(buckKey)
    +   * }
    +   * 
    */ public static OmBucketInfo getBucketInfo(OMMetadataManager metaMgr, diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/SnapshotChainManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/SnapshotChainManager.java index b069a174cd0f..e4102665d623 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/SnapshotChainManager.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/SnapshotChainManager.java @@ -24,8 +24,10 @@ import org.slf4j.LoggerFactory; import java.io.IOException; +import java.io.UncheckedIOException; import java.util.Collections; import java.util.HashMap; +import java.util.Iterator; import java.util.LinkedHashMap; import java.util.Map; import java.util.NoSuchElementException; @@ -56,6 +58,7 @@ public class SnapshotChainManager { private final ConcurrentMap snapshotIdToTableKey; private UUID latestGlobalSnapshotId; private final boolean snapshotChainCorrupted; + private UUID oldestGlobalSnapshotId; public SnapshotChainManager(OMMetadataManager metadataManager) { globalSnapshotChain = Collections.synchronizedMap(new LinkedHashMap<>()); @@ -104,6 +107,8 @@ private void addSnapshotGlobal(UUID snapshotID, UUID prevGlobalID) // On add snapshot, set previous snapshot entry nextSnapshotID = // snapshotID globalSnapshotChain.get(prevGlobalID).setNextSnapshotId(snapshotID); + } else { + oldestGlobalSnapshotId = snapshotID; } globalSnapshotChain.put(snapshotID, @@ -171,7 +176,6 @@ private boolean deleteSnapshotGlobal(UUID snapshotID) throws IOException { // for node removal UUID next = globalSnapshotChain.get(snapshotID).getNextSnapshotId(); UUID prev = globalSnapshotChain.get(snapshotID).getPreviousSnapshotId(); - if (prev != null && !globalSnapshotChain.containsKey(prev)) { throw new IOException(String.format( "Global snapshot chain corruption. " + @@ -197,6 +201,9 @@ private boolean deleteSnapshotGlobal(UUID snapshotID) throws IOException { if (latestGlobalSnapshotId.equals(snapshotID)) { latestGlobalSnapshotId = prev; } + if (snapshotID.equals(oldestGlobalSnapshotId)) { + oldestGlobalSnapshotId = next; + } return true; } else { // snapshotID not found in snapshot chain, log warning and return @@ -382,6 +389,42 @@ public UUID getLatestGlobalSnapshotId() throws IOException { return latestGlobalSnapshotId; } + /** + * Get oldest of global snapshot in snapshot chain. + */ + public UUID getOldestGlobalSnapshotId() throws IOException { + validateSnapshotChain(); + return oldestGlobalSnapshotId; + } + + public Iterator iterator(final boolean reverse) throws IOException { + validateSnapshotChain(); + return new Iterator() { + private UUID currentSnapshotId = reverse ? getLatestGlobalSnapshotId() : getOldestGlobalSnapshotId(); + @Override + public boolean hasNext() { + return currentSnapshotId != null; + } + + @Override + public UUID next() { + try { + UUID prevSnapshotId = currentSnapshotId; + if (reverse && hasPreviousGlobalSnapshot(currentSnapshotId) || + !reverse && hasNextGlobalSnapshot(currentSnapshotId)) { + currentSnapshotId = + reverse ? previousGlobalSnapshot(currentSnapshotId) : nextGlobalSnapshot(currentSnapshotId); + } else { + currentSnapshotId = null; + } + return prevSnapshotId; + } catch (IOException e) { + throw new UncheckedIOException("Error while getting next snapshot for " + currentSnapshotId, e); + } + } + }; + } + /** * Get latest path snapshot in snapshot chain. */ diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/codec/OMDBDefinition.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/codec/OMDBDefinition.java index de567447ae36..be57a7b74517 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/codec/OMDBDefinition.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/codec/OMDBDefinition.java @@ -50,145 +50,115 @@ /** * Class defines the structure and types of the om.db. */ -public class OMDBDefinition extends DBDefinition.WithMap { +public final class OMDBDefinition extends DBDefinition.WithMap { public static final DBColumnFamilyDefinition DELETED_TABLE = new DBColumnFamilyDefinition<>( OmMetadataManagerImpl.DELETED_TABLE, - String.class, StringCodec.get(), - RepeatedOmKeyInfo.class, RepeatedOmKeyInfo.getCodec(true)); public static final DBColumnFamilyDefinition USER_TABLE = new DBColumnFamilyDefinition<>( OmMetadataManagerImpl.USER_TABLE, - String.class, StringCodec.get(), - PersistedUserVolumeInfo.class, Proto2Codec.get(PersistedUserVolumeInfo.getDefaultInstance())); public static final DBColumnFamilyDefinition VOLUME_TABLE = new DBColumnFamilyDefinition<>( OmMetadataManagerImpl.VOLUME_TABLE, - String.class, StringCodec.get(), - OmVolumeArgs.class, OmVolumeArgs.getCodec()); public static final DBColumnFamilyDefinition OPEN_KEY_TABLE = new DBColumnFamilyDefinition<>( OmMetadataManagerImpl.OPEN_KEY_TABLE, - String.class, StringCodec.get(), - OmKeyInfo.class, OmKeyInfo.getCodec(true)); public static final DBColumnFamilyDefinition KEY_TABLE = new DBColumnFamilyDefinition<>( OmMetadataManagerImpl.KEY_TABLE, - String.class, StringCodec.get(), - OmKeyInfo.class, OmKeyInfo.getCodec(true)); public static final DBColumnFamilyDefinition BUCKET_TABLE = new DBColumnFamilyDefinition<>( OmMetadataManagerImpl.BUCKET_TABLE, - String.class, StringCodec.get(), - OmBucketInfo.class, OmBucketInfo.getCodec()); public static final DBColumnFamilyDefinition MULTIPART_INFO_TABLE = new DBColumnFamilyDefinition<>( OmMetadataManagerImpl.MULTIPARTINFO_TABLE, - String.class, StringCodec.get(), - OmMultipartKeyInfo.class, OmMultipartKeyInfo.getCodec()); public static final DBColumnFamilyDefinition PREFIX_TABLE = new DBColumnFamilyDefinition<>( OmMetadataManagerImpl.PREFIX_TABLE, - String.class, StringCodec.get(), - OmPrefixInfo.class, OmPrefixInfo.getCodec()); public static final DBColumnFamilyDefinition DTOKEN_TABLE = new DBColumnFamilyDefinition<>( OmMetadataManagerImpl.DELEGATION_TOKEN_TABLE, - OzoneTokenIdentifier.class, TokenIdentifierCodec.get(), - Long.class, LongCodec.get()); public static final DBColumnFamilyDefinition S3_SECRET_TABLE = new DBColumnFamilyDefinition<>( OmMetadataManagerImpl.S3_SECRET_TABLE, - String.class, StringCodec.get(), - S3SecretValue.class, S3SecretValue.getCodec()); public static final DBColumnFamilyDefinition TRANSACTION_INFO_TABLE = new DBColumnFamilyDefinition<>( OmMetadataManagerImpl.TRANSACTION_INFO_TABLE, - String.class, StringCodec.get(), - TransactionInfo.class, TransactionInfo.getCodec()); public static final DBColumnFamilyDefinition DIRECTORY_TABLE = new DBColumnFamilyDefinition<>( OmMetadataManagerImpl.DIRECTORY_TABLE, - String.class, StringCodec.get(), - OmDirectoryInfo.class, OmDirectoryInfo.getCodec()); public static final DBColumnFamilyDefinition FILE_TABLE = new DBColumnFamilyDefinition<>( OmMetadataManagerImpl.FILE_TABLE, - String.class, StringCodec.get(), - OmKeyInfo.class, OmKeyInfo.getCodec(true)); public static final DBColumnFamilyDefinition OPEN_FILE_TABLE = new DBColumnFamilyDefinition<>( OmMetadataManagerImpl.OPEN_FILE_TABLE, - String.class, StringCodec.get(), - OmKeyInfo.class, OmKeyInfo.getCodec(true)); public static final DBColumnFamilyDefinition DELETED_DIR_TABLE = new DBColumnFamilyDefinition<>(OmMetadataManagerImpl.DELETED_DIR_TABLE, - String.class, StringCodec.get(), OmKeyInfo.class, + StringCodec.get(), OmKeyInfo.getCodec(true)); public static final DBColumnFamilyDefinition META_TABLE = new DBColumnFamilyDefinition<>( OmMetadataManagerImpl.META_TABLE, - String.class, StringCodec.get(), - String.class, StringCodec.get()); // Tables for multi-tenancy @@ -197,27 +167,26 @@ public class OMDBDefinition extends DBDefinition.WithMap { TENANT_ACCESS_ID_TABLE = new DBColumnFamilyDefinition<>( OmMetadataManagerImpl.TENANT_ACCESS_ID_TABLE, - String.class, // accessId + // accessId StringCodec.get(), - OmDBAccessIdInfo.class, // tenantId, secret, principal + // tenantId, secret, principal OmDBAccessIdInfo.getCodec()); public static final DBColumnFamilyDefinition PRINCIPAL_TO_ACCESS_IDS_TABLE = new DBColumnFamilyDefinition<>( OmMetadataManagerImpl.PRINCIPAL_TO_ACCESS_IDS_TABLE, - String.class, // User principal + // User principal StringCodec.get(), - OmDBUserPrincipalInfo.class, // List of accessIds + // List of accessIds OmDBUserPrincipalInfo.getCodec()); public static final DBColumnFamilyDefinition TENANT_STATE_TABLE = new DBColumnFamilyDefinition<>( OmMetadataManagerImpl.TENANT_STATE_TABLE, - String.class, // tenantId (tenant name) + // tenantId (tenant name) StringCodec.get(), - OmDBTenantState.class, OmDBTenantState.getCodec()); // End tables for S3 multi-tenancy @@ -226,18 +195,15 @@ public class OMDBDefinition extends DBDefinition.WithMap { SNAPSHOT_INFO_TABLE = new DBColumnFamilyDefinition<>( OmMetadataManagerImpl.SNAPSHOT_INFO_TABLE, - String.class, // snapshot path + // snapshot path StringCodec.get(), - SnapshotInfo.class, SnapshotInfo.getCodec()); public static final DBColumnFamilyDefinition COMPACTION_LOG_TABLE = new DBColumnFamilyDefinition<>( OmMetadataManagerImpl.COMPACTION_LOG_TABLE, - String.class, StringCodec.get(), - CompactionLogEntry.class, CompactionLogEntry.getCodec()); /** @@ -254,9 +220,9 @@ public class OMDBDefinition extends DBDefinition.WithMap { SNAPSHOT_RENAMED_TABLE = new DBColumnFamilyDefinition<>( OmMetadataManagerImpl.SNAPSHOT_RENAMED_TABLE, - String.class, // /volumeName/bucketName/objectID + // /volumeName/bucketName/objectID StringCodec.get(), - String.class, // path to key in prev snapshot's key(file)/dir Table. + // path to key in prev snapshot's key(file)/dir Table. StringCodec.get()); private static final Map> @@ -284,7 +250,13 @@ public class OMDBDefinition extends DBDefinition.WithMap { USER_TABLE, VOLUME_TABLE); - public OMDBDefinition() { + private static final OMDBDefinition INSTANCE = new OMDBDefinition(); + + public static OMDBDefinition get() { + return INSTANCE; + } + + private OMDBDefinition() { super(COLUMN_FAMILIES); } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/multitenant/AuthorizerLock.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/multitenant/AuthorizerLock.java index 1dcb0f0cd618..2d59c6259ad7 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/multitenant/AuthorizerLock.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/multitenant/AuthorizerLock.java @@ -43,7 +43,7 @@ public interface AuthorizerLock { /** * @return stamp that can be passed to - * {@link this#validateOptimisticRead(long)} to check if a write lock was + * {@link #validateOptimisticRead(long)} to check if a write lock was * acquired since the stamp was issued. * @throws IOException If an ongoing write prevents the lock from moving to * the read state for longer than the timeout. diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerDoubleBuffer.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerDoubleBuffer.java index 8e4cc9fbf4db..42ae90b91819 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerDoubleBuffer.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerDoubleBuffer.java @@ -40,7 +40,6 @@ import org.apache.hadoop.hdds.tracing.TracingUtil; import org.apache.hadoop.hdds.utils.TransactionInfo; import org.apache.hadoop.hdds.utils.db.BatchOperation; -import org.apache.hadoop.hdds.utils.db.DBColumnFamilyDefinition; import org.apache.hadoop.ozone.om.OMMetadataManager; import org.apache.hadoop.ozone.om.S3SecretManager; import org.apache.hadoop.ozone.om.codec.OMDBDefinition; @@ -217,8 +216,8 @@ private OzoneManagerDoubleBuffer(Builder b) { } public OzoneManagerDoubleBuffer start() { - daemon.start(); isRunning.set(true); + daemon.start(); return this; } @@ -478,10 +477,7 @@ private void addCleanupEntry(Entry entry, Map> cleanupEpochs) if (cleanupTableInfo != null) { final List cleanupTables; if (cleanupTableInfo.cleanupAll()) { - cleanupTables = new OMDBDefinition().getColumnFamilies() - .stream() - .map(DBColumnFamilyDefinition::getName) - .collect(Collectors.toList()); + cleanupTables = OMDBDefinition.get().getColumnFamilyNames(); } else { cleanupTables = Arrays.asList(cleanupTableInfo.cleanupTables()); } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerStateMachine.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerStateMachine.java index 463afba94218..aa7211fa036a 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerStateMachine.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerStateMachine.java @@ -46,6 +46,7 @@ import org.apache.hadoop.ozone.protocolPB.OzoneManagerRequestHandler; import org.apache.hadoop.ozone.protocolPB.RequestHandler; import org.apache.hadoop.security.UserGroupInformation; +import org.apache.hadoop.util.Time; import org.apache.hadoop.util.concurrent.HadoopExecutors; import org.apache.ratis.proto.RaftProtos; import org.apache.ratis.proto.RaftProtos.StateMachineLogEntryProto; @@ -88,7 +89,6 @@ public class OzoneManagerStateMachine extends BaseStateMachine { new SimpleStateMachineStorage(); private final OzoneManager ozoneManager; private RequestHandler handler; - private RaftGroupId raftGroupId; private volatile OzoneManagerDoubleBuffer ozoneManagerDoubleBuffer; private final ExecutorService executorService; private final ExecutorService installSnapshotExecutor; @@ -134,8 +134,8 @@ public void initialize(RaftServer server, RaftGroupId id, RaftStorage raftStorage) throws IOException { getLifeCycle().startAndTransition(() -> { super.initialize(server, id, raftStorage); - this.raftGroupId = id; storage.init(raftStorage); + LOG.info("{}: initialize {} with {}", getId(), id, getLastAppliedTermIndex()); }); } @@ -143,8 +143,9 @@ public void initialize(RaftServer server, RaftGroupId id, public synchronized void reinitialize() throws IOException { loadSnapshotInfoFromDB(); if (getLifeCycleState() == LifeCycle.State.PAUSED) { - unpause(getLastAppliedTermIndex().getIndex(), - getLastAppliedTermIndex().getTerm()); + final TermIndex lastApplied = getLastAppliedTermIndex(); + unpause(lastApplied.getIndex(), lastApplied.getTerm()); + LOG.info("{}: reinitialize {} with {}", getId(), getGroupId(), lastApplied); } } @@ -160,6 +161,7 @@ public void notifyLeaderChanged(RaftGroupMemberId groupMemberId, RaftPeerId newLeaderId) { // Initialize OMHAMetrics ozoneManager.omHAMetricsInit(newLeaderId.toString()); + LOG.info("{}: leader changed to {}", groupMemberId, newLeaderId); } /** Notified by Ratis for non-StateMachine term-index update. */ @@ -263,7 +265,7 @@ public TransactionContext startTransaction( messageContent); Preconditions.checkArgument(raftClientRequest.getRaftGroupId().equals( - raftGroupId)); + getGroupId())); try { handler.validateRequest(omRequest); } catch (IOException ioe) { @@ -293,6 +295,10 @@ public TransactionContext preAppendTransaction(TransactionContext trx) OzoneManagerPrepareState prepareState = ozoneManager.getPrepareState(); + if (LOG.isDebugEnabled()) { + LOG.debug("{}: preAppendTransaction {}", getId(), TermIndex.valueOf(trx.getLogEntry())); + } + if (cmdType == OzoneManagerProtocolProtos.Type.Prepare) { // Must authenticate prepare requests here, since we must determine // whether or not to apply the prepare gate before proceeding with the @@ -303,8 +309,7 @@ public TransactionContext preAppendTransaction(TransactionContext trx) if (ozoneManager.getAclsEnabled() && !ozoneManager.isAdmin(userGroupInformation)) { String message = "Access denied for user " + userGroupInformation - + ". " - + "Superuser privilege is required to prepare ozone managers."; + + ". Superuser privilege is required to prepare upgrade/downgrade."; OMException cause = new OMException(message, OMException.ResultCodes.ACCESS_DENIED); // Leader should not step down because of this failure. @@ -341,6 +346,7 @@ public CompletableFuture applyTransaction(TransactionContext trx) { : OMRatisHelper.convertByteStringToOMRequest( trx.getStateMachineLogEntry().getLogData()); final TermIndex termIndex = TermIndex.valueOf(trx.getLogEntry()); + LOG.debug("{}: applyTransaction {}", getId(), termIndex); // In the current approach we have one single global thread executor. // with single thread. Right now this is being done for correctness, as // applyTransaction will be run on multiple OM's we want to execute the @@ -427,12 +433,14 @@ public synchronized void pause() { */ public synchronized void unpause(long newLastAppliedSnaphsotIndex, long newLastAppliedSnapShotTermIndex) { - LOG.info("OzoneManagerStateMachine is un-pausing"); if (statePausedCount.decrementAndGet() == 0) { getLifeCycle().startAndTransition(() -> { this.ozoneManagerDoubleBuffer = buildDoubleBufferForRatis(); this.setLastAppliedTermIndex(TermIndex.valueOf( newLastAppliedSnapShotTermIndex, newLastAppliedSnaphsotIndex)); + LOG.info("{}: OzoneManagerStateMachine un-pause completed. " + + "newLastAppliedSnaphsotIndex: {}, newLastAppliedSnapShotTermIndex: {}", + getId(), newLastAppliedSnaphsotIndex, newLastAppliedSnapShotTermIndex); }); } } @@ -482,15 +490,15 @@ private synchronized long takeSnapshotImpl() throws IOException { final TermIndex applied = getLastAppliedTermIndex(); final TermIndex notified = getLastNotifiedTermIndex(); final TermIndex snapshot = applied.compareTo(notified) > 0 ? applied : notified; - LOG.info(" applied = {}", applied); - LOG.info(" skipped = {}", lastSkippedIndex); - LOG.info("notified = {}", notified); - LOG.info("snapshot = {}", snapshot); + long startTime = Time.monotonicNow(); final TransactionInfo transactionInfo = TransactionInfo.valueOf(snapshot); ozoneManager.setTransactionInfo(transactionInfo); ozoneManager.getMetadataManager().getTransactionInfoTable().put(TRANSACTION_INFO_KEY, transactionInfo); ozoneManager.getMetadataManager().getStore().flushDB(); + LOG.info("{}: taking snapshot. applied = {}, skipped = {}, " + + "notified = {}, current snapshot index = {}, took {} ms", + getId(), applied, lastSkippedIndex, notified, snapshot, Time.monotonicNow() - startTime); return snapshot.getIndex(); } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/utils/OzoneManagerRatisUtils.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/utils/OzoneManagerRatisUtils.java index ffaedaa06a99..30e14bc017e2 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/utils/OzoneManagerRatisUtils.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/utils/OzoneManagerRatisUtils.java @@ -78,6 +78,7 @@ import org.apache.hadoop.ozone.om.request.snapshot.OMSnapshotCreateRequest; import org.apache.hadoop.ozone.om.request.snapshot.OMSnapshotDeleteRequest; import org.apache.hadoop.ozone.om.request.snapshot.OMSnapshotMoveDeletedKeysRequest; +import org.apache.hadoop.ozone.om.request.snapshot.OMSnapshotMoveTableKeysRequest; import org.apache.hadoop.ozone.om.request.snapshot.OMSnapshotPurgeRequest; import org.apache.hadoop.ozone.om.request.snapshot.OMSnapshotRenameRequest; import org.apache.hadoop.ozone.om.request.snapshot.OMSnapshotSetPropertyRequest; @@ -109,7 +110,7 @@ import org.slf4j.LoggerFactory; import static org.apache.hadoop.hdds.HddsConfigKeys.OZONE_METADATA_DIRS; -import static org.apache.hadoop.ozone.OzoneConsts.OM_RATIS_SNAPSHOT_DIR; +import static org.apache.hadoop.ozone.OzoneConsts.OZONE_RATIS_SNAPSHOT_DIR; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_RATIS_SNAPSHOT_DIR; import static org.apache.hadoop.ozone.om.OzoneManagerUtils.getBucketLayout; @@ -232,6 +233,8 @@ public static OMClientRequest createClientRequest(OMRequest omRequest, return new OMSnapshotRenameRequest(omRequest); case SnapshotMoveDeletedKeys: return new OMSnapshotMoveDeletedKeysRequest(omRequest); + case SnapshotMoveTableKeys: + return new OMSnapshotMoveTableKeysRequest(omRequest); case SnapshotPurge: return new OMSnapshotPurgeRequest(omRequest); case SetSnapshotProperty: @@ -404,9 +407,9 @@ private static OMClientRequest getOMAclRequest(OMRequest omRequest, } /** - * Convert exception result to {@link OzoneManagerProtocolProtos.Status}. + * Convert exception result to {@link org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Status}. * @param exception - * @return OzoneManagerProtocolProtos.Status + * @return Status */ public static Status exceptionToResponseStatus(Exception exception) { if (exception instanceof OMException) { @@ -436,8 +439,7 @@ public static Status exceptionToResponseStatus(Exception exception) { */ public static TransactionInfo getTrxnInfoFromCheckpoint( OzoneConfiguration conf, Path dbPath) throws Exception { - return HAUtils - .getTrxnInfoFromCheckpoint(conf, dbPath, new OMDBDefinition()); + return HAUtils.getTrxnInfoFromCheckpoint(conf, dbPath, OMDBDefinition.get()); } /** @@ -482,7 +484,7 @@ public static String getOMRatisSnapshotDirectory(ConfigurationSource conf) { OZONE_OM_RATIS_SNAPSHOT_DIR, OZONE_METADATA_DIRS); File metaDirPath = ServerUtils.getOzoneMetaDirPath(conf); snapshotDir = Paths.get(metaDirPath.getPath(), - OM_RATIS_SNAPSHOT_DIR).toString(); + OZONE_RATIS_SNAPSHOT_DIR).toString(); } return snapshotDir; } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/OMClientRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/OMClientRequest.java index 25a204ded271..17f9663ae1f2 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/OMClientRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/OMClientRequest.java @@ -438,7 +438,6 @@ public InetAddress getRemoteAddress() throws IOException { * Return String created from OMRequest userInfo. If userInfo is not * set, returns null. * @return String - * @throws IOException */ @VisibleForTesting public String getHostName() { diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/RequestAuditor.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/RequestAuditor.java index 9ae6b7e5d508..f73255da1173 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/RequestAuditor.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/RequestAuditor.java @@ -45,7 +45,6 @@ public interface RequestAuditor { * @param auditMap * @param throwable * @param userInfo - * @return */ OMAuditLogger.Builder buildAuditMessage( AuditAction op, Map auditMap, Throwable throwable, UserInfo userInfo); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMDirectoryCreateRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMDirectoryCreateRequest.java index 53d4c83c3a96..802cfa54e604 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMDirectoryCreateRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMDirectoryCreateRequest.java @@ -257,7 +257,7 @@ dirKeyInfo, missingParentInfos, result, getBucketLayout(), * @param bucketInfo * @param omPathInfo * @param trxnLogIndex - * @return + * @return {@code List} * @throws IOException */ public static List getAllParentInfo(OzoneManager ozoneManager, diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileRequest.java index 3e7549b176e2..8f2a768c5257 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileRequest.java @@ -1050,7 +1050,7 @@ public static long getParentID(long volumeId, long bucketId, String keyName, * @param volumeName - volume name. * @param bucketName - bucket name. * @param keyName - key name. - * @return + * @return {@code long} * @throws IOException */ public static long getParentId(OMMetadataManager omMetadataManager, diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMDirectoriesPurgeRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMDirectoriesPurgeRequestWithFSO.java index dd08ff171654..29ed5d9fc7b5 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMDirectoriesPurgeRequestWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMDirectoriesPurgeRequestWithFSO.java @@ -24,15 +24,17 @@ import java.util.List; import java.util.Map; import java.util.Set; +import java.util.UUID; + import org.apache.commons.lang3.tuple.Pair; import org.apache.hadoop.hdds.utils.TransactionInfo; import org.apache.hadoop.hdds.utils.db.cache.CacheKey; import org.apache.hadoop.hdds.utils.db.cache.CacheValue; import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.om.OMMetrics; +import org.apache.hadoop.ozone.om.OmMetadataManagerImpl; import org.apache.hadoop.ozone.om.snapshot.SnapshotUtils; import org.apache.ratis.server.protocol.TermIndex; -import org.apache.hadoop.ozone.om.OMMetadataManager; import org.apache.hadoop.ozone.om.OzoneManager; import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; @@ -45,8 +47,10 @@ import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse; +import static org.apache.hadoop.hdds.HddsUtils.fromProtobuf; import static org.apache.hadoop.ozone.OzoneConsts.DELETED_HSYNC_KEY; import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.Resource.BUCKET_LOCK; +import static org.apache.hadoop.ozone.om.snapshot.SnapshotUtils.validatePreviousSnapshotId; /** * Handles purging of keys from OM DB. @@ -66,19 +70,34 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIn List purgeRequests = purgeDirsRequest.getDeletedPathList(); - - SnapshotInfo fromSnapshotInfo = null; Set> lockSet = new HashSet<>(); Map, OmBucketInfo> volBucketInfoMap = new HashMap<>(); - OMMetadataManager omMetadataManager = ozoneManager.getMetadataManager(); + OmMetadataManagerImpl omMetadataManager = (OmMetadataManagerImpl) ozoneManager.getMetadataManager(); Map openKeyInfoMap = new HashMap<>(); - OMMetrics omMetrics = ozoneManager.getMetrics(); + OMResponse.Builder omResponse = OmResponseUtil.getOMResponseBuilder( + getOmRequest()); + final SnapshotInfo fromSnapshotInfo; try { - if (fromSnapshot != null) { - fromSnapshotInfo = SnapshotUtils.getSnapshotInfo(ozoneManager, fromSnapshot); + fromSnapshotInfo = fromSnapshot != null ? SnapshotUtils.getSnapshotInfo(ozoneManager, + fromSnapshot) : null; + // Checking if this request is an old request or new one. + if (purgeDirsRequest.hasExpectedPreviousSnapshotID()) { + // Validating previous snapshot since while purging deletes, a snapshot create request could make this purge + // directory request invalid on AOS since the deletedDirectory would be in the newly created snapshot. Adding + // subdirectories could lead to not being able to reclaim sub-files and subdirectories since the + // file/directory would be present in the newly created snapshot. + // Validating previous snapshot can ensure the chain hasn't changed. + UUID expectedPreviousSnapshotId = purgeDirsRequest.getExpectedPreviousSnapshotID().hasUuid() + ? fromProtobuf(purgeDirsRequest.getExpectedPreviousSnapshotID().getUuid()) : null; + validatePreviousSnapshotId(fromSnapshotInfo, omMetadataManager.getSnapshotChainManager(), + expectedPreviousSnapshotId); } - + } catch (IOException e) { + LOG.error("Error occurred while performing OMDirectoriesPurge. ", e); + return new OMDirectoriesPurgeResponseWithFSO(createErrorOMResponse(omResponse, e)); + } + try { for (OzoneManagerProtocolProtos.PurgePathRequest path : purgeRequests) { for (OzoneManagerProtocolProtos.KeyInfo key : path.getMarkDeletedSubDirsList()) { @@ -170,12 +189,8 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIn } } - OMResponse.Builder omResponse = OmResponseUtil.getOMResponseBuilder( - getOmRequest()); - OMClientResponse omClientResponse = new OMDirectoriesPurgeResponseWithFSO( + return new OMDirectoriesPurgeResponseWithFSO( omResponse.build(), purgeRequests, ozoneManager.isRatisEnabled(), getBucketLayout(), volBucketInfoMap, fromSnapshotInfo, openKeyInfoMap); - - return omClientResponse; } } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCommitRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCommitRequest.java index cb507cd0f347..378e0cb12ce3 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCommitRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCommitRequest.java @@ -458,7 +458,6 @@ protected List getOmKeyLocationInfos( * @param omMetrics om metrics * @param exception exception trace * @param omKeyInfo omKeyInfo - * @param result result * @param result stores the result of the execution */ @SuppressWarnings("parameternumber") diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCreateRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCreateRequestWithFSO.java index b370c286e0fc..f40adb7495f8 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCreateRequestWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCreateRequestWithFSO.java @@ -246,7 +246,7 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIn * @param keyName - key name. * @param uploadID - Multi part upload ID for this key. * @param omMetadataManager - * @return + * @return {@code String} * @throws IOException */ @Override diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyPurgeRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyPurgeRequest.java index 14c80bb7a93b..a5e8cb145255 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyPurgeRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyPurgeRequest.java @@ -25,6 +25,7 @@ import org.apache.hadoop.hdds.utils.db.cache.CacheKey; import org.apache.hadoop.hdds.utils.db.cache.CacheValue; import org.apache.hadoop.ozone.om.OmMetadataManagerImpl; +import org.apache.hadoop.ozone.om.exceptions.OMException; import org.apache.hadoop.ozone.om.snapshot.SnapshotUtils; import org.apache.ratis.server.protocol.TermIndex; import org.apache.hadoop.ozone.om.OzoneManager; @@ -42,6 +43,10 @@ import org.slf4j.LoggerFactory; import java.util.List; +import java.util.UUID; + +import static org.apache.hadoop.hdds.HddsUtils.fromProtobuf; +import static org.apache.hadoop.ozone.om.snapshot.SnapshotUtils.validatePreviousSnapshotId; /** * Handles purging of keys from OM DB. @@ -58,30 +63,44 @@ public OMKeyPurgeRequest(OMRequest omRequest) { @Override public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIndex termIndex) { PurgeKeysRequest purgeKeysRequest = getOmRequest().getPurgeKeysRequest(); - List bucketDeletedKeysList = purgeKeysRequest - .getDeletedKeysList(); - List keysToUpdateList = purgeKeysRequest - .getKeysToUpdateList(); - String fromSnapshot = purgeKeysRequest.hasSnapshotTableKey() ? - purgeKeysRequest.getSnapshotTableKey() : null; - List keysToBePurgedList = new ArrayList<>(); + List bucketDeletedKeysList = purgeKeysRequest.getDeletedKeysList(); + List keysToUpdateList = purgeKeysRequest.getKeysToUpdateList(); + String fromSnapshot = purgeKeysRequest.hasSnapshotTableKey() ? purgeKeysRequest.getSnapshotTableKey() : null; OmMetadataManagerImpl omMetadataManager = (OmMetadataManagerImpl) ozoneManager.getMetadataManager(); OMResponse.Builder omResponse = OmResponseUtil.getOMResponseBuilder( getOmRequest()); - OMClientResponse omClientResponse = null; - for (DeletedKeys bucketWithDeleteKeys : bucketDeletedKeysList) { - for (String deletedKey : bucketWithDeleteKeys.getKeysList()) { - keysToBePurgedList.add(deletedKey); + + final SnapshotInfo fromSnapshotInfo; + try { + fromSnapshotInfo = fromSnapshot != null ? SnapshotUtils.getSnapshotInfo(ozoneManager, + fromSnapshot) : null; + // Checking if this request is an old request or new one. + if (purgeKeysRequest.hasExpectedPreviousSnapshotID()) { + // Validating previous snapshot since while purging deletes, a snapshot create request could make this purge + // key request invalid on AOS since the deletedKey would be in the newly created snapshot. This would add an + // redundant tombstone entry in the deletedTable. It is better to skip the transaction. + UUID expectedPreviousSnapshotId = purgeKeysRequest.getExpectedPreviousSnapshotID().hasUuid() + ? fromProtobuf(purgeKeysRequest.getExpectedPreviousSnapshotID().getUuid()) : null; + validatePreviousSnapshotId(fromSnapshotInfo, omMetadataManager.getSnapshotChainManager(), + expectedPreviousSnapshotId); } + } catch (IOException e) { + LOG.error("Error occurred while performing OmKeyPurge. ", e); + return new OMKeyPurgeResponse(createErrorOMResponse(omResponse, e)); } - final SnapshotInfo fromSnapshotInfo; - try { - fromSnapshotInfo = fromSnapshot == null ? null : SnapshotUtils.getSnapshotInfo(ozoneManager, fromSnapshot); - } catch (IOException ex) { - return new OMKeyPurgeResponse(createErrorOMResponse(omResponse, ex)); + List keysToBePurgedList = new ArrayList<>(); + + for (DeletedKeys bucketWithDeleteKeys : bucketDeletedKeysList) { + keysToBePurgedList.addAll(bucketWithDeleteKeys.getKeysList()); + } + + if (keysToBePurgedList.isEmpty()) { + return new OMKeyPurgeResponse(createErrorOMResponse(omResponse, + new OMException("None of the keys can be purged be purged since a new snapshot was created for all the " + + "buckets, making this request invalid", OMException.ResultCodes.KEY_DELETION_ERROR))); } // Setting transaction info for snapshot, this is to prevent duplicate purge requests to OM from background @@ -95,10 +114,9 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIn } catch (IOException e) { return new OMKeyPurgeResponse(createErrorOMResponse(omResponse, e)); } - omClientResponse = new OMKeyPurgeResponse(omResponse.build(), keysToBePurgedList, fromSnapshotInfo, - keysToUpdateList); - return omClientResponse; + return new OMKeyPurgeResponse(omResponse.build(), + keysToBePurgedList, fromSnapshotInfo, keysToUpdateList); } } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRenameRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRenameRequestWithFSO.java index 72365221d3bd..e57b6d99fd4e 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRenameRequestWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRenameRequestWithFSO.java @@ -395,7 +395,7 @@ private Map buildAuditMap( * level, e.g. source is /vol1/buck1/dir1/key1 and dest is /vol1/buck1). * * @param request - * @return + * @return {@code String} * @throws OMException */ @Override @@ -410,7 +410,7 @@ protected String extractDstKey(RenameKeyRequest request) throws OMException { * Returns the validated and normalized source key name. * * @param keyArgs - * @return + * @return {@code String} * @throws OMException */ @Override diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRequest.java index 09e5d8bca060..88c5ad914054 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRequest.java @@ -611,7 +611,7 @@ protected void getFileEncryptionInfoForMpuKey(KeyArgs keyArgs, /** * Get FileEncryptionInfoProto from KeyArgs. * @param keyArgs - * @return + * @return FileEncryptionInfo */ protected FileEncryptionInfo getFileEncryptionInfo(KeyArgs keyArgs) { FileEncryptionInfo encryptionInfo = null; @@ -623,7 +623,7 @@ protected FileEncryptionInfo getFileEncryptionInfo(KeyArgs keyArgs) { /** * Check bucket quota in bytes. - * @paran metadataManager + * @param metadataManager * @param omBucketInfo * @param allocateSize * @throws IOException @@ -911,7 +911,7 @@ private OmKeyInfo prepareMultipartFileInfo( * @param keyName - key name. * @param uploadID - Multi part upload ID for this key. * @param omMetadataManager - * @return + * @return {@code String} * @throws IOException */ protected String getDBMultipartOpenKey(String volumeName, String bucketName, diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/OMSnapshotMoveDeletedKeysRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/OMSnapshotMoveDeletedKeysRequest.java index 58fdb1232d31..18055bdda40c 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/OMSnapshotMoveDeletedKeysRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/OMSnapshotMoveDeletedKeysRequest.java @@ -80,9 +80,9 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIn OmResponseUtil.getOMResponseBuilder(getOmRequest()); try { // Check the snapshot exists. - SnapshotUtils.getSnapshotInfo(ozoneManager, fromSnapshot.getTableKey()); + SnapshotInfo snapshotInfo = SnapshotUtils.getSnapshotInfo(ozoneManager, fromSnapshot.getTableKey()); - nextSnapshot = SnapshotUtils.getNextActiveSnapshot(fromSnapshot, snapshotChainManager, ozoneManager); + nextSnapshot = SnapshotUtils.getNextSnapshot(ozoneManager, snapshotChainManager, snapshotInfo); // Get next non-deleted snapshot. List nextDBKeysList = moveDeletedKeysRequest.getNextDBKeysList(); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/OMSnapshotMoveTableKeysRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/OMSnapshotMoveTableKeysRequest.java new file mode 100644 index 000000000000..0eb0d3cd166f --- /dev/null +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/OMSnapshotMoveTableKeysRequest.java @@ -0,0 +1,184 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package org.apache.hadoop.ozone.om.request.snapshot; + +import org.apache.hadoop.hdds.protocol.proto.HddsProtos; +import org.apache.hadoop.hdds.utils.TransactionInfo; +import org.apache.hadoop.hdds.utils.db.cache.CacheKey; +import org.apache.hadoop.hdds.utils.db.cache.CacheValue; +import org.apache.hadoop.ozone.om.OmMetadataManagerImpl; +import org.apache.hadoop.ozone.om.OzoneManager; +import org.apache.hadoop.ozone.om.SnapshotChainManager; +import org.apache.hadoop.ozone.om.exceptions.OMException; +import org.apache.hadoop.ozone.om.helpers.SnapshotInfo; +import org.apache.hadoop.ozone.om.request.OMClientRequest; +import org.apache.hadoop.ozone.om.request.util.OmResponseUtil; +import org.apache.hadoop.ozone.om.response.OMClientResponse; +import org.apache.hadoop.ozone.om.response.snapshot.OMSnapshotMoveTableKeysResponse; +import org.apache.hadoop.ozone.om.snapshot.SnapshotUtils; +import org.apache.hadoop.ozone.om.upgrade.DisallowedUntilLayoutVersion; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.SnapshotMoveKeyInfos; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.SnapshotMoveTableKeysRequest; +import org.apache.ratis.server.protocol.TermIndex; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.HashSet; +import java.util.List; +import java.util.Set; + +import static org.apache.hadoop.hdds.HddsUtils.fromProtobuf; +import static org.apache.hadoop.ozone.om.upgrade.OMLayoutFeature.FILESYSTEM_SNAPSHOT; + +/** + * Handles OMSnapshotMoveTableKeysRequest Request. + * This is an OM internal request. Does not need @RequireSnapshotFeatureState. + */ +public class OMSnapshotMoveTableKeysRequest extends OMClientRequest { + + private static final Logger LOG = LoggerFactory.getLogger(OMSnapshotMoveTableKeysRequest.class); + + public OMSnapshotMoveTableKeysRequest(OMRequest omRequest) { + super(omRequest); + } + + @Override + public OMRequest preExecute(OzoneManager ozoneManager) throws IOException { + OmMetadataManagerImpl omMetadataManager = (OmMetadataManagerImpl) ozoneManager.getMetadataManager(); + SnapshotChainManager snapshotChainManager = omMetadataManager.getSnapshotChainManager(); + SnapshotMoveTableKeysRequest moveTableKeysRequest = getOmRequest().getSnapshotMoveTableKeysRequest(); + SnapshotInfo fromSnapshot = SnapshotUtils.getSnapshotInfo(ozoneManager, + snapshotChainManager, fromProtobuf(moveTableKeysRequest.getFromSnapshotID())); + String bucketKeyPrefix = omMetadataManager.getBucketKeyPrefix(fromSnapshot.getVolumeName(), + fromSnapshot.getBucketName()); + String bucketKeyPrefixFSO = omMetadataManager.getBucketKeyPrefixFSO(fromSnapshot.getVolumeName(), + fromSnapshot.getBucketName()); + + Set keys = new HashSet<>(); + List deletedKeys = new ArrayList<>(moveTableKeysRequest.getDeletedKeysList().size()); + + //validate deleted key starts with bucket prefix.[///] + for (SnapshotMoveKeyInfos deletedKey : moveTableKeysRequest.getDeletedKeysList()) { + // Filter only deleted keys with at least one keyInfo per key. + if (!deletedKey.getKeyInfosList().isEmpty()) { + deletedKeys.add(deletedKey); + if (!deletedKey.getKey().startsWith(bucketKeyPrefix)) { + throw new OMException("Deleted Key: " + deletedKey + " doesn't start with prefix " + bucketKeyPrefix, + OMException.ResultCodes.INVALID_KEY_NAME); + } + if (keys.contains(deletedKey.getKey())) { + throw new OMException("Duplicate Deleted Key: " + deletedKey + " in request", + OMException.ResultCodes.INVALID_REQUEST); + } else { + keys.add(deletedKey.getKey()); + } + } + } + + keys.clear(); + List renamedKeysList = new ArrayList<>(moveTableKeysRequest.getRenamedKeysList().size()); + //validate rename key starts with bucket prefix.[///] + for (HddsProtos.KeyValue renamedKey : moveTableKeysRequest.getRenamedKeysList()) { + if (renamedKey.hasKey() && renamedKey.hasValue()) { + renamedKeysList.add(renamedKey); + if (!renamedKey.getKey().startsWith(bucketKeyPrefix)) { + throw new OMException("Rename Key: " + renamedKey + " doesn't start with prefix " + bucketKeyPrefix, + OMException.ResultCodes.INVALID_KEY_NAME); + } + if (keys.contains(renamedKey.getKey())) { + throw new OMException("Duplicate rename Key: " + renamedKey + " in request", + OMException.ResultCodes.INVALID_REQUEST); + } else { + keys.add(renamedKey.getKey()); + } + } + } + keys.clear(); + + // Filter only deleted dirs with only one keyInfo per key. + List deletedDirs = new ArrayList<>(moveTableKeysRequest.getDeletedDirsList().size()); + //validate deleted key starts with bucket FSO path prefix.[///] + for (SnapshotMoveKeyInfos deletedDir : moveTableKeysRequest.getDeletedDirsList()) { + // Filter deleted directories with exactly one keyInfo per key. + if (deletedDir.getKeyInfosList().size() == 1) { + deletedDirs.add(deletedDir); + if (!deletedDir.getKey().startsWith(bucketKeyPrefixFSO)) { + throw new OMException("Deleted dir: " + deletedDir + " doesn't start with prefix " + + bucketKeyPrefixFSO, OMException.ResultCodes.INVALID_KEY_NAME); + } + if (keys.contains(deletedDir.getKey())) { + throw new OMException("Duplicate deleted dir Key: " + deletedDir + " in request", + OMException.ResultCodes.INVALID_REQUEST); + } else { + keys.add(deletedDir.getKey()); + } + } + } + return getOmRequest().toBuilder().setSnapshotMoveTableKeysRequest( + moveTableKeysRequest.toBuilder().clearDeletedDirs().clearDeletedKeys().clearRenamedKeys() + .addAllDeletedKeys(deletedKeys).addAllDeletedDirs(deletedDirs) + .addAllRenamedKeys(renamedKeysList).build()).build(); + } + + @Override + @DisallowedUntilLayoutVersion(FILESYSTEM_SNAPSHOT) + public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIndex termIndex) { + OmMetadataManagerImpl omMetadataManager = (OmMetadataManagerImpl) ozoneManager.getMetadataManager(); + SnapshotChainManager snapshotChainManager = omMetadataManager.getSnapshotChainManager(); + + SnapshotMoveTableKeysRequest moveTableKeysRequest = getOmRequest().getSnapshotMoveTableKeysRequest(); + + OMClientResponse omClientResponse; + OzoneManagerProtocolProtos.OMResponse.Builder omResponse = OmResponseUtil.getOMResponseBuilder(getOmRequest()); + try { + SnapshotInfo fromSnapshot = SnapshotUtils.getSnapshotInfo(ozoneManager, + snapshotChainManager, fromProtobuf(moveTableKeysRequest.getFromSnapshotID())); + // If there is no snapshot in the chain after the current snapshot move the keys to Active Object Store. + SnapshotInfo nextSnapshot = SnapshotUtils.getNextSnapshot(ozoneManager, snapshotChainManager, fromSnapshot); + + // If next snapshot is not active then ignore move. Since this could be a redundant operations. + if (nextSnapshot != null && nextSnapshot.getSnapshotStatus() != SnapshotInfo.SnapshotStatus.SNAPSHOT_ACTIVE) { + throw new OMException("Next snapshot : " + nextSnapshot + " in chain is not active.", + OMException.ResultCodes.INVALID_SNAPSHOT_ERROR); + } + + // Update lastTransactionInfo for fromSnapshot and the nextSnapshot. + fromSnapshot.setLastTransactionInfo(TransactionInfo.valueOf(termIndex).toByteString()); + omMetadataManager.getSnapshotInfoTable().addCacheEntry(new CacheKey<>(fromSnapshot.getTableKey()), + CacheValue.get(termIndex.getIndex(), fromSnapshot)); + if (nextSnapshot != null) { + nextSnapshot.setLastTransactionInfo(TransactionInfo.valueOf(termIndex).toByteString()); + omMetadataManager.getSnapshotInfoTable().addCacheEntry(new CacheKey<>(nextSnapshot.getTableKey()), + CacheValue.get(termIndex.getIndex(), nextSnapshot)); + } + omClientResponse = new OMSnapshotMoveTableKeysResponse(omResponse.build(), fromSnapshot, nextSnapshot, + moveTableKeysRequest.getDeletedKeysList(), moveTableKeysRequest.getDeletedDirsList(), + moveTableKeysRequest.getRenamedKeysList()); + } catch (IOException ex) { + omClientResponse = new OMSnapshotMoveTableKeysResponse(createErrorOMResponse(omResponse, ex)); + } + return omClientResponse; + } +} + diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/OMSnapshotPurgeRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/OMSnapshotPurgeRequest.java index 6602f52514b5..38c51d4de5c0 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/OMSnapshotPurgeRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/OMSnapshotPurgeRequest.java @@ -103,9 +103,7 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIn "Snapshot purge request.", snapTableKey); continue; } - - SnapshotInfo nextSnapshot = - SnapshotUtils.getNextActiveSnapshot(fromSnapshot, snapshotChainManager, ozoneManager); + SnapshotInfo nextSnapshot = SnapshotUtils.getNextSnapshot(ozoneManager, snapshotChainManager, fromSnapshot); // Step 1: Update the deep clean flag for the next active snapshot updateSnapshotInfoAndCache(nextSnapshot, omMetadataManager, trxnLogIndex); @@ -116,7 +114,7 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIn .addCacheEntry(new CacheKey<>(fromSnapshot.getTableKey()), CacheValue.get(trxnLogIndex)); updatedSnapshotInfos.remove(fromSnapshot.getTableKey()); } - + // Update the snapshotInfo lastTransactionInfo. for (SnapshotInfo snapshotInfo : updatedSnapshotInfos.values()) { snapshotInfo.setLastTransactionInfo(TransactionInfo.valueOf(termIndex).toByteString()); omMetadataManager.getSnapshotInfoTable().addCacheEntry(new CacheKey<>(snapshotInfo.getTableKey()), diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/OMVolumeRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/OMVolumeRequest.java index 904b082e2d4c..df74edfb1c8a 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/OMVolumeRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/OMVolumeRequest.java @@ -129,7 +129,6 @@ protected static PersistedUserVolumeInfo addVolumeToOwnerList( * @param dbVolumeKey * @param dbUserKey * @param transactionLogIndex - * @throws IOException */ protected static void createVolume( final OMMetadataManager omMetadataManager, OmVolumeArgs omVolumeArgs, diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMDirectoriesPurgeResponseWithFSO.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMDirectoriesPurgeResponseWithFSO.java index 28c3e3d758e2..782063d32446 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMDirectoriesPurgeResponseWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMDirectoriesPurgeResponseWithFSO.java @@ -78,6 +78,10 @@ public OMDirectoriesPurgeResponseWithFSO(@Nonnull OMResponse omResponse, this.openKeyInfoMap = openKeyInfoMap; } + public OMDirectoriesPurgeResponseWithFSO(OMResponse omResponse) { + super(omResponse); + } + @Override public void addToDBBatch(OMMetadataManager metadataManager, BatchOperation batchOp) throws IOException { diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/snapshot/OMSnapshotMoveDeletedKeysResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/snapshot/OMSnapshotMoveDeletedKeysResponse.java index f39d5827a0cc..7d1b7f237b2c 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/snapshot/OMSnapshotMoveDeletedKeysResponse.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/snapshot/OMSnapshotMoveDeletedKeysResponse.java @@ -40,6 +40,7 @@ import java.util.List; import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.SNAPSHOT_INFO_TABLE; +import static org.apache.hadoop.ozone.om.snapshot.SnapshotUtils.createMergedRepeatedOmKeyInfoFromDeletedTableEntry; /** * Response for OMSnapshotMoveDeletedKeysRequest. @@ -199,8 +200,7 @@ private void processKeys(BatchOperation batchOp, } for (SnapshotMoveKeyInfos dBKey : nextDBKeysList) { - RepeatedOmKeyInfo omKeyInfos = - createRepeatedOmKeyInfo(dBKey, metadataManager); + RepeatedOmKeyInfo omKeyInfos = createMergedRepeatedOmKeyInfoFromDeletedTableEntry(dBKey, metadataManager); if (omKeyInfos == null) { continue; } @@ -223,36 +223,5 @@ public static RepeatedOmKeyInfo createRepeatedOmKeyInfo( return result; } - - private RepeatedOmKeyInfo createRepeatedOmKeyInfo( - SnapshotMoveKeyInfos snapshotMoveKeyInfos, - OMMetadataManager metadataManager) throws IOException { - String dbKey = snapshotMoveKeyInfos.getKey(); - List keyInfoList = snapshotMoveKeyInfos.getKeyInfosList(); - // When older version of keys are moved to the next snapshot's deletedTable - // The newer version might also be in the next snapshot's deletedTable and - // it might overwrite. This is to avoid that and also avoid having - // orphans blocks. - RepeatedOmKeyInfo result = metadataManager.getDeletedTable().get(dbKey); - - for (KeyInfo keyInfo : keyInfoList) { - OmKeyInfo omKeyInfo = OmKeyInfo.getFromProtobuf(keyInfo); - if (result == null) { - result = new RepeatedOmKeyInfo(omKeyInfo); - } else if (!isSameAsLatestOmKeyInfo(omKeyInfo, result)) { - result.addOmKeyInfo(omKeyInfo); - } - } - - return result; - } - - private boolean isSameAsLatestOmKeyInfo(OmKeyInfo omKeyInfo, - RepeatedOmKeyInfo result) { - int size = result.getOmKeyInfoList().size(); - assert size > 0; - OmKeyInfo keyInfoFromRepeated = result.getOmKeyInfoList().get(size - 1); - return omKeyInfo.equals(keyInfoFromRepeated); - } } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/snapshot/OMSnapshotMoveTableKeysResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/snapshot/OMSnapshotMoveTableKeysResponse.java new file mode 100644 index 000000000000..b06570afb14f --- /dev/null +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/snapshot/OMSnapshotMoveTableKeysResponse.java @@ -0,0 +1,162 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ +package org.apache.hadoop.ozone.om.response.snapshot; + +import jakarta.annotation.Nonnull; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos; +import org.apache.hadoop.hdds.utils.db.BatchOperation; +import org.apache.hadoop.hdds.utils.db.RDBStore; +import org.apache.hadoop.ozone.om.OMMetadataManager; +import org.apache.hadoop.ozone.om.OmMetadataManagerImpl; +import org.apache.hadoop.ozone.om.OmSnapshot; +import org.apache.hadoop.ozone.om.OmSnapshotManager; +import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; +import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo; +import org.apache.hadoop.ozone.om.helpers.SnapshotInfo; +import org.apache.hadoop.ozone.om.response.CleanupTableInfo; +import org.apache.hadoop.ozone.om.response.OMClientResponse; +import org.apache.hadoop.ozone.om.snapshot.ReferenceCounted; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.SnapshotMoveKeyInfos; + +import java.io.IOException; +import java.util.List; + +import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.SNAPSHOT_INFO_TABLE; +import static org.apache.hadoop.ozone.om.snapshot.SnapshotUtils.createMergedRepeatedOmKeyInfoFromDeletedTableEntry; + +/** + * Response for OMSnapshotMoveDeletedKeysRequest. + */ +@CleanupTableInfo(cleanupTables = {SNAPSHOT_INFO_TABLE}) +public class OMSnapshotMoveTableKeysResponse extends OMClientResponse { + + private SnapshotInfo fromSnapshot; + private SnapshotInfo nextSnapshot; + private List deletedKeys; + private List renameKeysList; + private List deletedDirs; + + public OMSnapshotMoveTableKeysResponse(OMResponse omResponse, + @Nonnull SnapshotInfo fromSnapshot, SnapshotInfo nextSnapshot, + List deletedKeys, + List deletedDirs, + List renamedKeys) { + super(omResponse); + this.fromSnapshot = fromSnapshot; + this.nextSnapshot = nextSnapshot; + this.deletedKeys = deletedKeys; + this.renameKeysList = renamedKeys; + this.deletedDirs = deletedDirs; + } + + /** + * For when the request is not successful. + * For a successful request, the other constructor should be used. + */ + public OMSnapshotMoveTableKeysResponse(@Nonnull OMResponse omResponse) { + super(omResponse); + checkStatusNotOK(); + } + + @Override + protected void addToDBBatch(OMMetadataManager omMetadataManager, BatchOperation batchOperation) throws IOException { + OmSnapshotManager omSnapshotManager = ((OmMetadataManagerImpl) omMetadataManager) + .getOzoneManager().getOmSnapshotManager(); + + try (ReferenceCounted rcOmFromSnapshot = + omSnapshotManager.getSnapshot(fromSnapshot.getSnapshotId())) { + + OmSnapshot fromOmSnapshot = rcOmFromSnapshot.get(); + + if (nextSnapshot != null) { + try (ReferenceCounted + rcOmNextSnapshot = omSnapshotManager.getSnapshot(nextSnapshot.getSnapshotId())) { + + OmSnapshot nextOmSnapshot = rcOmNextSnapshot.get(); + RDBStore nextSnapshotStore = (RDBStore) nextOmSnapshot.getMetadataManager().getStore(); + // Init Batch Operation for snapshot db. + try (BatchOperation writeBatch = nextSnapshotStore.initBatchOperation()) { + addKeysToNextSnapshot(writeBatch, nextOmSnapshot.getMetadataManager()); + nextSnapshotStore.commitBatchOperation(writeBatch); + nextSnapshotStore.getDb().flushWal(true); + nextSnapshotStore.getDb().flush(); + } + } + } else { + // Handle the case where there is no next Snapshot. + addKeysToNextSnapshot(batchOperation, omMetadataManager); + } + + // Update From Snapshot Deleted Table. + RDBStore fromSnapshotStore = (RDBStore) fromOmSnapshot.getMetadataManager().getStore(); + try (BatchOperation fromSnapshotBatchOp = fromSnapshotStore.initBatchOperation()) { + deleteKeysFromSnapshot(fromSnapshotBatchOp, fromOmSnapshot.getMetadataManager()); + fromSnapshotStore.commitBatchOperation(fromSnapshotBatchOp); + fromSnapshotStore.getDb().flushWal(true); + fromSnapshotStore.getDb().flush(); + } + } + + // Flush snapshot info to rocksDB. + omMetadataManager.getSnapshotInfoTable().putWithBatch(batchOperation, fromSnapshot.getTableKey(), fromSnapshot); + if (nextSnapshot != null) { + omMetadataManager.getSnapshotInfoTable().putWithBatch(batchOperation, nextSnapshot.getTableKey(), nextSnapshot); + } + } + + private void deleteKeysFromSnapshot(BatchOperation batchOp, OMMetadataManager fromSnapshotMetadataManager) + throws IOException { + for (SnapshotMoveKeyInfos deletedOmKeyInfo : deletedKeys) { + // Delete keys from current snapshot that are moved to next snapshot. + fromSnapshotMetadataManager.getDeletedTable().deleteWithBatch(batchOp, deletedOmKeyInfo.getKey()); + } + + // Delete rename keys from current snapshot that are moved to next snapshot. + for (HddsProtos.KeyValue renameEntry : renameKeysList) { + fromSnapshotMetadataManager.getSnapshotRenamedTable().deleteWithBatch(batchOp, renameEntry.getKey()); + } + + // Delete deletedDir from current snapshot that are moved to next snapshot. + for (SnapshotMoveKeyInfos deletedDirInfo : deletedDirs) { + fromSnapshotMetadataManager.getDeletedDirTable().deleteWithBatch(batchOp, deletedDirInfo.getKey()); + } + + } + + private void addKeysToNextSnapshot(BatchOperation batchOp, OMMetadataManager metadataManager) throws IOException { + + // Add renamed keys to the next snapshot or active DB. + for (HddsProtos.KeyValue renameEntry : renameKeysList) { + metadataManager.getSnapshotRenamedTable().putWithBatch(batchOp, renameEntry.getKey(), renameEntry.getValue()); + } + // Add deleted keys to the next snapshot or active DB. + for (SnapshotMoveKeyInfos deletedKeyInfo : deletedKeys) { + RepeatedOmKeyInfo omKeyInfos = createMergedRepeatedOmKeyInfoFromDeletedTableEntry(deletedKeyInfo, + metadataManager); + metadataManager.getDeletedTable().putWithBatch(batchOp, deletedKeyInfo.getKey(), omKeyInfos); + } + // Add deleted dir keys to the next snapshot or active DB. + for (SnapshotMoveKeyInfos deletedDirInfo : deletedDirs) { + metadataManager.getDeletedDirTable().putWithBatch(batchOp, deletedDirInfo.getKey(), + OmKeyInfo.getFromProtobuf(deletedDirInfo.getKeyInfosList().get(0))); + } + } +} + diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/AbstractKeyDeletingService.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/AbstractKeyDeletingService.java index 2c2d16bf14c7..7559cf9a7291 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/AbstractKeyDeletingService.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/AbstractKeyDeletingService.java @@ -19,6 +19,7 @@ import com.google.common.annotations.VisibleForTesting; import com.google.protobuf.ServiceException; import org.apache.commons.lang3.tuple.Pair; +import org.apache.hadoop.hdds.HddsUtils; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.scm.protocol.ScmBlockLocationProtocol; import org.apache.hadoop.hdds.utils.BackgroundService; @@ -32,13 +33,11 @@ import org.apache.hadoop.ozone.om.KeyManager; import org.apache.hadoop.ozone.om.OMMetadataManager; import org.apache.hadoop.ozone.om.OzoneManager; -import org.apache.hadoop.ozone.om.SnapshotChainManager; import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo; import org.apache.hadoop.ozone.om.helpers.SnapshotInfo; import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerRatisUtils; -import org.apache.hadoop.ozone.om.snapshot.SnapshotUtils; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.DeletedKeys; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest; @@ -73,7 +72,7 @@ public abstract class AbstractKeyDeletingService extends BackgroundService private final OzoneManager ozoneManager; private final ScmBlockLocationProtocol scmClient; - private static ClientId clientId = ClientId.randomId(); + private final ClientId clientId = ClientId.randomId(); private final AtomicLong deletedDirsCount; private final AtomicLong movedDirsCount; private final AtomicLong movedFilesCount; @@ -97,7 +96,7 @@ public AbstractKeyDeletingService(String serviceName, long interval, protected int processKeyDeletes(List keyBlocksList, KeyManager manager, HashMap keysToModify, - String snapTableKey) throws IOException { + String snapTableKey, UUID expectedPreviousSnapshotId) throws IOException { long startTime = Time.monotonicNow(); int delCount = 0; @@ -120,7 +119,7 @@ protected int processKeyDeletes(List keyBlocksList, startTime = Time.monotonicNow(); if (isRatisEnabled()) { delCount = submitPurgeKeysRequest(blockDeletionResults, - keysToModify, snapTableKey); + keysToModify, snapTableKey, expectedPreviousSnapshotId); } else { // TODO: Once HA and non-HA paths are merged, we should have // only one code path here. Purge keys should go through an @@ -172,7 +171,7 @@ private int deleteAllKeys(List results, * @param keysToModify Updated list of RepeatedOmKeyInfo */ private int submitPurgeKeysRequest(List results, - HashMap keysToModify, String snapTableKey) { + HashMap keysToModify, String snapTableKey, UUID expectedPreviousSnapshotId) { Map, List> purgeKeysMapPerBucket = new HashMap<>(); @@ -203,6 +202,12 @@ private int submitPurgeKeysRequest(List results, if (snapTableKey != null) { purgeKeysRequest.setSnapshotTableKey(snapTableKey); } + OzoneManagerProtocolProtos.NullableUUID.Builder expectedPreviousSnapshotNullableUUID = + OzoneManagerProtocolProtos.NullableUUID.newBuilder(); + if (expectedPreviousSnapshotId != null) { + expectedPreviousSnapshotNullableUUID.setUuid(HddsUtils.toProtobuf(expectedPreviousSnapshotId)); + } + purgeKeysRequest.setExpectedPreviousSnapshotID(expectedPreviousSnapshotNullableUUID.build()); // Add keys to PurgeKeysRequest bucket wise. for (Map.Entry, List> entry : @@ -274,13 +279,21 @@ private void addToMap(Map, List> map, String object } protected void submitPurgePaths(List requests, - String snapTableKey) { + String snapTableKey, + UUID expectedPreviousSnapshotId) { OzoneManagerProtocolProtos.PurgeDirectoriesRequest.Builder purgeDirRequest = OzoneManagerProtocolProtos.PurgeDirectoriesRequest.newBuilder(); if (snapTableKey != null) { purgeDirRequest.setSnapshotTableKey(snapTableKey); } + OzoneManagerProtocolProtos.NullableUUID.Builder expectedPreviousSnapshotNullableUUID = + OzoneManagerProtocolProtos.NullableUUID.newBuilder(); + if (expectedPreviousSnapshotId != null) { + expectedPreviousSnapshotNullableUUID.setUuid(HddsUtils.toProtobuf(expectedPreviousSnapshotId)); + } + purgeDirRequest.setExpectedPreviousSnapshotID(expectedPreviousSnapshotNullableUUID.build()); + purgeDirRequest.addAllDeletedPath(requests); OzoneManagerProtocolProtos.OMRequest omRequest = @@ -386,7 +399,8 @@ public long optimizeDirDeletesAndSubmitRequest(long remainNum, List> allSubDirList, List purgePathRequestList, String snapTableKey, long startTime, - int remainingBufLimit, KeyManager keyManager) { + int remainingBufLimit, KeyManager keyManager, + UUID expectedPreviousSnapshotId) { // Optimization to handle delete sub-dir and keys to remove quickly // This case will be useful to handle when depth of directory is high @@ -408,6 +422,8 @@ public long optimizeDirDeletesAndSubmitRequest(long remainNum, } consumedSize += request.getSerializedSize(); purgePathRequestList.add(request); + // reduce remain count for self, sub-files, and sub-directories + remainNum = remainNum - 1; remainNum = remainNum - request.getDeletedSubFilesCount(); remainNum = remainNum - request.getMarkDeletedSubDirsCount(); // Count up the purgeDeletedDir, subDirs and subFiles @@ -426,7 +442,7 @@ public long optimizeDirDeletesAndSubmitRequest(long remainNum, } if (!purgePathRequestList.isEmpty()) { - submitPurgePaths(purgePathRequestList, snapTableKey); + submitPurgePaths(purgePathRequestList, snapTableKey, expectedPreviousSnapshotId); } if (dirNum != 0 || subDirNum != 0 || subFileNum != 0) { @@ -549,25 +565,6 @@ protected boolean isBufferLimitCrossed( return cLimit + increment >= maxLimit; } - protected SnapshotInfo getPreviousActiveSnapshot(SnapshotInfo snapInfo, SnapshotChainManager chainManager) - throws IOException { - SnapshotInfo currSnapInfo = snapInfo; - while (chainManager.hasPreviousPathSnapshot( - currSnapInfo.getSnapshotPath(), currSnapInfo.getSnapshotId())) { - - UUID prevPathSnapshot = chainManager.previousPathSnapshot( - currSnapInfo.getSnapshotPath(), currSnapInfo.getSnapshotId()); - String tableKey = chainManager.getTableKey(prevPathSnapshot); - SnapshotInfo prevSnapInfo = SnapshotUtils.getSnapshotInfo(ozoneManager, tableKey); - if (prevSnapInfo.getSnapshotStatus() == - SnapshotInfo.SnapshotStatus.SNAPSHOT_ACTIVE) { - return prevSnapInfo; - } - currSnapInfo = prevSnapInfo; - } - return null; - } - protected boolean isKeyReclaimable( Table previousKeyTable, Table renamedTable, diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/DirectoryDeletingService.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/DirectoryDeletingService.java index c8703c3c4c62..09f4a8f8a3d7 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/DirectoryDeletingService.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/DirectoryDeletingService.java @@ -33,16 +33,20 @@ import org.apache.hadoop.ozone.om.OzoneManager; import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; +import org.apache.hadoop.ozone.om.helpers.SnapshotInfo; import org.apache.hadoop.ozone.om.snapshot.ReferenceCounted; +import org.apache.hadoop.ozone.om.snapshot.SnapshotUtils; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.PurgePathRequest; import org.apache.hadoop.util.Time; -import org.apache.ratis.protocol.ClientId; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import java.io.IOException; import java.util.ArrayList; import java.util.List; +import java.util.Objects; +import java.util.Optional; +import java.util.UUID; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; @@ -70,8 +74,6 @@ public class DirectoryDeletingService extends AbstractKeyDeletingService { public static final Logger LOG = LoggerFactory.getLogger(DirectoryDeletingService.class); - private static ClientId clientId = ClientId.randomId(); - // Use only a single thread for DirDeletion. Multiple threads would read // or write to same tables and can send deletion requests for same key // multiple times. @@ -82,6 +84,7 @@ public class DirectoryDeletingService extends AbstractKeyDeletingService { private final long pathLimitPerTask; private final int ratisByteLimit; private final AtomicBoolean suspended; + private AtomicBoolean isRunningOnAOS; public DirectoryDeletingService(long interval, TimeUnit unit, long serviceTimeout, OzoneManager ozoneManager, @@ -98,6 +101,7 @@ public DirectoryDeletingService(long interval, TimeUnit unit, // always go to 90% of max limit for request as other header will be added this.ratisByteLimit = (int) (limit * 0.9); this.suspended = new AtomicBoolean(false); + this.isRunningOnAOS = new AtomicBoolean(false); } private boolean shouldRun() { @@ -108,6 +112,10 @@ private boolean shouldRun() { return getOzoneManager().isLeaderReady() && !suspended.get(); } + public boolean isRunningOnAOS() { + return isRunningOnAOS.get(); + } + /** * Suspend the service. */ @@ -127,11 +135,16 @@ public void resume() { @Override public BackgroundTaskQueue getTasks() { BackgroundTaskQueue queue = new BackgroundTaskQueue(); - queue.add(new DirectoryDeletingService.DirDeletingTask()); + queue.add(new DirectoryDeletingService.DirDeletingTask(this)); return queue; } - private class DirDeletingTask implements BackgroundTask { + private final class DirDeletingTask implements BackgroundTask { + private final DirectoryDeletingService directoryDeletingService; + + private DirDeletingTask(DirectoryDeletingService service) { + this.directoryDeletingService = service; + } @Override public int getPriority() { @@ -144,6 +157,7 @@ public BackgroundTaskResult call() { if (LOG.isDebugEnabled()) { LOG.debug("Running DirectoryDeletingService"); } + isRunningOnAOS.set(true); getRunCount().incrementAndGet(); long dirNum = 0L; long subDirNum = 0L; @@ -155,9 +169,15 @@ public BackgroundTaskResult call() { = new ArrayList<>((int) remainNum); Table.KeyValue pendingDeletedDirInfo; + try (TableIterator> deleteTableIterator = getOzoneManager().getMetadataManager(). getDeletedDirTable().iterator()) { + // This is to avoid race condition b/w purge request and snapshot chain updation. For AOS taking the global + // snapshotId since AOS could process multiple buckets in one iteration. + UUID expectedPreviousSnapshotId = + ((OmMetadataManagerImpl)getOzoneManager().getMetadataManager()).getSnapshotChainManager() + .getLatestGlobalSnapshotId(); long startTime = Time.monotonicNow(); while (remainNum > 0 && deleteTableIterator.hasNext()) { @@ -189,6 +209,8 @@ public BackgroundTaskResult call() { } consumedSize += request.getSerializedSize(); purgePathRequestList.add(request); + // reduce remain count for self, sub-files, and sub-directories + remainNum = remainNum - 1; remainNum = remainNum - request.getDeletedSubFilesCount(); remainNum = remainNum - request.getMarkDeletedSubDirsCount(); // Count up the purgeDeletedDir, subDirs and subFiles @@ -204,14 +226,17 @@ public BackgroundTaskResult call() { remainNum, dirNum, subDirNum, subFileNum, allSubDirList, purgePathRequestList, null, startTime, ratisByteLimit - consumedSize, - getOzoneManager().getKeyManager()); + getOzoneManager().getKeyManager(), expectedPreviousSnapshotId); } catch (IOException e) { LOG.error("Error while running delete directories and files " + "background task. Will retry at next run.", e); } + isRunningOnAOS.set(false); + synchronized (directoryDeletingService) { + this.directoryDeletingService.notify(); + } } - // place holder by returning empty results of this call back. return BackgroundTaskResult.EmptyTaskResult.newResult(); } @@ -224,12 +249,23 @@ private boolean previousSnapshotHasDir( getOzoneManager().getOmSnapshotManager(); OmMetadataManagerImpl metadataManager = (OmMetadataManagerImpl) getOzoneManager().getMetadataManager(); - + SnapshotInfo previousSnapshotInfo = SnapshotUtils.getLatestSnapshotInfo(deletedDirInfo.getVolumeName(), + deletedDirInfo.getBucketName(), getOzoneManager(), metadataManager.getSnapshotChainManager()); + if (previousSnapshotInfo == null) { + return false; + } + // previous snapshot is not active or it has not been flushed to disk then don't process the key in this + // iteration. + if (previousSnapshotInfo.getSnapshotStatus() != SnapshotInfo.SnapshotStatus.SNAPSHOT_ACTIVE || + !OmSnapshotManager.areSnapshotChangesFlushedToDB(getOzoneManager().getMetadataManager(), + previousSnapshotInfo)) { + return true; + } try (ReferenceCounted rcLatestSnapshot = - metadataManager.getLatestActiveSnapshot( + omSnapshotManager.getSnapshot( deletedDirInfo.getVolumeName(), deletedDirInfo.getBucketName(), - omSnapshotManager)) { + previousSnapshotInfo.getName())) { if (rcLatestSnapshot != null) { String dbRenameKey = metadataManager @@ -250,8 +286,14 @@ private boolean previousSnapshotHasDir( String prevDbKey = prevDirTableDBKey == null ? metadataManager.getOzoneDeletePathDirKey(key) : prevDirTableDBKey; OmDirectoryInfo prevDirInfo = prevDirTable.get(prevDbKey); - return prevDirInfo != null && - prevDirInfo.getObjectID() == deletedDirInfo.getObjectID(); + //Checking if the previous snapshot in the chain hasn't changed while checking if the deleted directory is + // present in the previous snapshot. If the chain has changed, the deleted directory could have been moved + // to the newly created snapshot. + SnapshotInfo newPreviousSnapshotInfo = SnapshotUtils.getLatestSnapshotInfo(deletedDirInfo.getVolumeName(), + deletedDirInfo.getBucketName(), getOzoneManager(), metadataManager.getSnapshotChainManager()); + return (!Objects.equals(Optional.ofNullable(newPreviousSnapshotInfo).map(SnapshotInfo::getSnapshotId), + Optional.ofNullable(previousSnapshotInfo).map(SnapshotInfo::getSnapshotId))) || (prevDirInfo != null && + prevDirInfo.getObjectID() == deletedDirInfo.getObjectID()); } } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/KeyDeletingService.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/KeyDeletingService.java index 5e622cb17019..9a4f74eba59c 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/KeyDeletingService.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/KeyDeletingService.java @@ -23,7 +23,9 @@ import java.util.Iterator; import java.util.List; import java.util.Map; +import java.util.Optional; import java.util.Set; +import java.util.UUID; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicLong; @@ -44,6 +46,7 @@ import org.apache.hadoop.ozone.om.OzoneManager; import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerRatisUtils; import org.apache.hadoop.ozone.om.snapshot.ReferenceCounted; +import org.apache.hadoop.ozone.om.snapshot.SnapshotUtils; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.SnapshotSize; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.SetSnapshotPropertyRequest; @@ -92,11 +95,15 @@ public class KeyDeletingService extends AbstractKeyDeletingService { private final Map exclusiveReplicatedSizeMap; private final Set completedExclusiveSizeSet; private final Map snapshotSeekMap; + private AtomicBoolean isRunningOnAOS; + private final boolean deepCleanSnapshots; + private final SnapshotChainManager snapshotChainManager; public KeyDeletingService(OzoneManager ozoneManager, ScmBlockLocationProtocol scmClient, KeyManager manager, long serviceInterval, - long serviceTimeout, ConfigurationSource conf) { + long serviceTimeout, ConfigurationSource conf, + boolean deepCleanSnapshots) { super(KeyDeletingService.class.getSimpleName(), serviceInterval, TimeUnit.MILLISECONDS, KEY_DELETING_CORE_POOL_SIZE, serviceTimeout, ozoneManager, scmClient); @@ -111,6 +118,9 @@ public KeyDeletingService(OzoneManager ozoneManager, this.exclusiveReplicatedSizeMap = new HashMap<>(); this.completedExclusiveSizeSet = new HashSet<>(); this.snapshotSeekMap = new HashMap<>(); + this.isRunningOnAOS = new AtomicBoolean(false); + this.deepCleanSnapshots = deepCleanSnapshots; + this.snapshotChainManager = ((OmMetadataManagerImpl)manager.getMetadataManager()).getSnapshotChainManager(); } /** @@ -123,10 +133,14 @@ public AtomicLong getDeletedKeyCount() { return deletedKeyCount; } + public boolean isRunningOnAOS() { + return isRunningOnAOS.get(); + } + @Override public BackgroundTaskQueue getTasks() { BackgroundTaskQueue queue = new BackgroundTaskQueue(); - queue.add(new KeyDeletingTask()); + queue.add(new KeyDeletingTask(this)); return queue; } @@ -169,7 +183,12 @@ public void setKeyLimitPerTask(int keyLimitPerTask) { * the blocks info in its deletedBlockLog), it removes these keys from the * DB. */ - private class KeyDeletingTask implements BackgroundTask { + private final class KeyDeletingTask implements BackgroundTask { + private final KeyDeletingService deletingService; + + private KeyDeletingTask(KeyDeletingService service) { + this.deletingService = service; + } @Override public int getPriority() { @@ -183,7 +202,7 @@ public BackgroundTaskResult call() { if (shouldRun()) { final long run = getRunCount().incrementAndGet(); LOG.debug("Running KeyDeletingService {}", run); - + isRunningOnAOS.set(true); int delCount = 0; try { // TODO: [SNAPSHOT] HDDS-7968. Reclaim eligible key blocks in @@ -191,7 +210,9 @@ public BackgroundTaskResult call() { // doesn't have enough entries left. // OM would have to keep track of which snapshot the key is coming // from if the above would be done inside getPendingDeletionKeys(). - + // This is to avoid race condition b/w purge request and snapshot chain update. For AOS taking the global + // snapshotId since AOS could process multiple buckets in one iteration. + UUID expectedPreviousSnapshotId = snapshotChainManager.getLatestGlobalSnapshotId(); PendingKeysDeletion pendingKeysDeletion = manager .getPendingDeletionKeys(getKeyLimitPerTask()); List keyBlocksList = pendingKeysDeletion @@ -199,7 +220,7 @@ public BackgroundTaskResult call() { if (keyBlocksList != null && !keyBlocksList.isEmpty()) { delCount = processKeyDeletes(keyBlocksList, getOzoneManager().getKeyManager(), - pendingKeysDeletion.getKeysToModify(), null); + pendingKeysDeletion.getKeysToModify(), null, expectedPreviousSnapshotId); deletedKeyCount.addAndGet(delCount); } } catch (IOException e) { @@ -208,7 +229,7 @@ public BackgroundTaskResult call() { } try { - if (delCount < keyLimitPerTask) { + if (deepCleanSnapshots && delCount < keyLimitPerTask) { processSnapshotDeepClean(delCount); } } catch (Exception e) { @@ -217,6 +238,11 @@ public BackgroundTaskResult call() { } } + isRunningOnAOS.set(false); + synchronized (deletingService) { + this.deletingService.notify(); + } + // By design, no one cares about the results of this call back. return EmptyTaskResult.newResult(); } @@ -239,15 +265,23 @@ private void processSnapshotDeepClean(int delCount) while (delCount < keyLimitPerTask && iterator.hasNext()) { List keysToPurge = new ArrayList<>(); HashMap keysToModify = new HashMap<>(); - SnapshotInfo currSnapInfo = iterator.next().getValue(); - + SnapshotInfo currSnapInfo = snapshotInfoTable.get(iterator.next().getKey()); // Deep clean only on active snapshot. Deleted Snapshots will be // cleaned up by SnapshotDeletingService. - if (currSnapInfo.getSnapshotStatus() != SNAPSHOT_ACTIVE || + if (currSnapInfo == null || currSnapInfo.getSnapshotStatus() != SNAPSHOT_ACTIVE || currSnapInfo.getDeepClean()) { continue; } + SnapshotInfo prevSnapInfo = SnapshotUtils.getPreviousSnapshot(getOzoneManager(), snapChainManager, + currSnapInfo); + if (prevSnapInfo != null && + (prevSnapInfo.getSnapshotStatus() != SnapshotInfo.SnapshotStatus.SNAPSHOT_ACTIVE || + !OmSnapshotManager.areSnapshotChangesFlushedToDB(getOzoneManager().getMetadataManager(), + prevSnapInfo))) { + continue; + } + try (ReferenceCounted rcCurrOmSnapshot = omSnapshotManager.getSnapshot( currSnapInfo.getVolumeName(), @@ -276,11 +310,13 @@ private void processSnapshotDeepClean(int delCount) } String snapshotBucketKey = dbBucketKey + OzoneConsts.OM_KEY_PREFIX; - SnapshotInfo previousSnapshot = getPreviousActiveSnapshot(currSnapInfo, snapChainManager); + SnapshotInfo previousSnapshot = SnapshotUtils.getPreviousSnapshot(getOzoneManager(), snapChainManager, + currSnapInfo); SnapshotInfo previousToPrevSnapshot = null; if (previousSnapshot != null) { - previousToPrevSnapshot = getPreviousActiveSnapshot(previousSnapshot, snapChainManager); + previousToPrevSnapshot = SnapshotUtils.getPreviousSnapshot(getOzoneManager(), snapChainManager, + previousSnapshot); } Table previousKeyTable = null; @@ -409,7 +445,8 @@ private void processSnapshotDeepClean(int delCount) if (!keysToPurge.isEmpty()) { processKeyDeletes(keysToPurge, currOmSnapshot.getKeyManager(), - keysToModify, currSnapInfo.getTableKey()); + keysToModify, currSnapInfo.getTableKey(), + Optional.ofNullable(previousSnapshot).map(SnapshotInfo::getSnapshotId).orElse(null)); } } finally { IOUtils.closeQuietly(rcPrevOmSnapshot, rcPrevToPrevOmSnapshot); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/SnapshotDeletingService.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/SnapshotDeletingService.java index f85bd781b050..edc6c7a16296 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/SnapshotDeletingService.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/SnapshotDeletingService.java @@ -20,54 +20,49 @@ import com.google.common.annotations.VisibleForTesting; import com.google.protobuf.ServiceException; -import org.apache.commons.lang3.tuple.Pair; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.conf.StorageUnit; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; -import org.apache.hadoop.hdds.scm.protocol.ScmBlockLocationProtocol; import org.apache.hadoop.hdds.utils.BackgroundTask; import org.apache.hadoop.hdds.utils.BackgroundTaskQueue; import org.apache.hadoop.hdds.utils.BackgroundTaskResult; import org.apache.hadoop.hdds.utils.db.Table; -import org.apache.hadoop.hdds.utils.db.TableIterator; import org.apache.hadoop.ozone.ClientVersion; -import org.apache.hadoop.ozone.OzoneConsts; -import org.apache.hadoop.ozone.common.BlockGroup; import org.apache.hadoop.ozone.lock.BootstrapStateHandler; +import org.apache.hadoop.ozone.om.KeyManager; import org.apache.hadoop.ozone.om.OMConfigKeys; import org.apache.hadoop.ozone.om.OmMetadataManagerImpl; import org.apache.hadoop.ozone.om.OmSnapshot; import org.apache.hadoop.ozone.om.OmSnapshotManager; import org.apache.hadoop.ozone.om.OzoneManager; import org.apache.hadoop.ozone.om.SnapshotChainManager; -import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; -import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup; -import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo; import org.apache.hadoop.ozone.om.helpers.SnapshotInfo; import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerRatisUtils; import org.apache.hadoop.ozone.om.snapshot.ReferenceCounted; +import org.apache.hadoop.ozone.om.snapshot.SnapshotUtils; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.PurgePathRequest; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.SnapshotMoveDeletedKeysRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.SnapshotMoveKeyInfos; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.SnapshotMoveTableKeysRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.SnapshotPurgeRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Type; -import org.apache.hadoop.util.Time; import org.apache.ratis.protocol.ClientId; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import java.io.IOException; import java.util.ArrayList; +import java.util.Iterator; import java.util.List; +import java.util.UUID; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicLong; +import java.util.stream.Collectors; -import static org.apache.hadoop.ozone.OzoneConsts.OM_KEY_PREFIX; +import static org.apache.hadoop.hdds.HddsUtils.toProtobuf; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_SNAPSHOT_KEY_DELETING_LIMIT_PER_TASK; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_SNAPSHOT_KEY_DELETING_LIMIT_PER_TASK_DEFAULT; import static org.apache.hadoop.ozone.om.OMConfigKeys.SNAPSHOT_DELETING_LIMIT_PER_TASK; @@ -93,16 +88,17 @@ public class SnapshotDeletingService extends AbstractKeyDeletingService { private final AtomicBoolean suspended; private final OzoneConfiguration conf; private final AtomicLong successRunCount; - private final long snapshotDeletionPerTask; - private final int keyLimitPerSnapshot; + private final int keyLimitPerTask; + private final int snapshotDeletionPerTask; private final int ratisByteLimit; + private final long serviceTimeout; public SnapshotDeletingService(long interval, long serviceTimeout, - OzoneManager ozoneManager, ScmBlockLocationProtocol scmClient) + OzoneManager ozoneManager) throws IOException { super(SnapshotDeletingService.class.getSimpleName(), interval, TimeUnit.MILLISECONDS, SNAPSHOT_DELETING_CORE_POOL_SIZE, - serviceTimeout, ozoneManager, scmClient); + serviceTimeout, ozoneManager, null); this.ozoneManager = ozoneManager; this.omSnapshotManager = ozoneManager.getOmSnapshotManager(); OmMetadataManagerImpl omMetadataManager = (OmMetadataManagerImpl) @@ -111,8 +107,7 @@ public SnapshotDeletingService(long interval, long serviceTimeout, this.successRunCount = new AtomicLong(0); this.suspended = new AtomicBoolean(false); this.conf = ozoneManager.getConfiguration(); - this.snapshotDeletionPerTask = conf - .getLong(SNAPSHOT_DELETING_LIMIT_PER_TASK, + this.snapshotDeletionPerTask = conf.getInt(SNAPSHOT_DELETING_LIMIT_PER_TASK, SNAPSHOT_DELETING_LIMIT_PER_TASK_DEFAULT); int limit = (int) conf.getStorageSize( OMConfigKeys.OZONE_OM_RATIS_LOG_APPENDER_QUEUE_BYTE_LIMIT, @@ -120,9 +115,35 @@ public SnapshotDeletingService(long interval, long serviceTimeout, StorageUnit.BYTES); // always go to 90% of max limit for request as other header will be added this.ratisByteLimit = (int) (limit * 0.9); - this.keyLimitPerSnapshot = conf.getInt( + this.keyLimitPerTask = conf.getInt( OZONE_SNAPSHOT_KEY_DELETING_LIMIT_PER_TASK, OZONE_SNAPSHOT_KEY_DELETING_LIMIT_PER_TASK_DEFAULT); + this.serviceTimeout = serviceTimeout; + } + + // Wait for a notification from KeyDeletingService if the key deletion is running. This is to ensure, merging of + // entries do not start while the AOS is still processing the deleted keys. + @VisibleForTesting + public void waitForKeyDeletingService() throws InterruptedException { + KeyDeletingService keyDeletingService = getOzoneManager().getKeyManager().getDeletingService(); + synchronized (keyDeletingService) { + while (keyDeletingService.isRunningOnAOS()) { + keyDeletingService.wait(serviceTimeout); + } + } + } + + // Wait for a notification from DirectoryDeletingService if the directory deletion is running. This is to ensure, + // merging of entries do not start while the AOS is still processing the deleted keys. + @VisibleForTesting + public void waitForDirDeletingService() throws InterruptedException { + DirectoryDeletingService directoryDeletingService = getOzoneManager().getKeyManager() + .getDirDeletingService(); + synchronized (directoryDeletingService) { + while (directoryDeletingService.isRunningOnAOS()) { + directoryDeletingService.wait(serviceTimeout); + } + } } private class SnapshotDeletingTask implements BackgroundTask { @@ -136,316 +157,89 @@ public BackgroundTaskResult call() throws InterruptedException { getRunCount().incrementAndGet(); - ReferenceCounted rcOmSnapshot = null; - ReferenceCounted rcOmPreviousSnapshot = null; - - Table snapshotInfoTable = - ozoneManager.getMetadataManager().getSnapshotInfoTable(); - List purgeSnapshotKeys = new ArrayList<>(); - try (TableIterator> iterator = snapshotInfoTable.iterator()) { - + try { + int remaining = keyLimitPerTask; + Iterator iterator = chainManager.iterator(true); + List snapshotsToBePurged = new ArrayList<>(); long snapshotLimit = snapshotDeletionPerTask; - - while (iterator.hasNext() && snapshotLimit > 0) { - SnapshotInfo snapInfo = iterator.next().getValue(); - - // Only Iterate in deleted snapshot + while (iterator.hasNext() && snapshotLimit > 0 && remaining > 0) { + SnapshotInfo snapInfo = SnapshotUtils.getSnapshotInfo(ozoneManager, chainManager, iterator.next()); if (shouldIgnoreSnapshot(snapInfo)) { continue; } - - // Note: Can refactor this to use try-with-resources. - // Handling RC decrements manually for now to minimize conflicts. - rcOmSnapshot = omSnapshotManager.getSnapshot( - snapInfo.getVolumeName(), - snapInfo.getBucketName(), - snapInfo.getName()); - OmSnapshot omSnapshot = rcOmSnapshot.get(); - - Table snapshotDeletedTable = - omSnapshot.getMetadataManager().getDeletedTable(); - Table snapshotDeletedDirTable = - omSnapshot.getMetadataManager().getDeletedDirTable(); - - Table renamedTable = - omSnapshot.getMetadataManager().getSnapshotRenamedTable(); - - long volumeId = ozoneManager.getMetadataManager() - .getVolumeId(snapInfo.getVolumeName()); - // Get bucketInfo for the snapshot bucket to get bucket layout. - String dbBucketKey = ozoneManager.getMetadataManager().getBucketKey( - snapInfo.getVolumeName(), snapInfo.getBucketName()); - OmBucketInfo bucketInfo = ozoneManager.getMetadataManager() - .getBucketTable().get(dbBucketKey); - - if (bucketInfo == null) { - // Decrement ref count - rcOmSnapshot.close(); - rcOmSnapshot = null; - throw new IllegalStateException("Bucket " + "/" + - snapInfo.getVolumeName() + "/" + snapInfo.getBucketName() + - " is not found. BucketInfo should not be null for snapshotted" + - " bucket. The OM is in unexpected state."); - } - - String snapshotBucketKey = dbBucketKey + OzoneConsts.OM_KEY_PREFIX; - String dbBucketKeyForDir = ozoneManager.getMetadataManager() - .getBucketKey(Long.toString(volumeId), - Long.toString(bucketInfo.getObjectID())) + OM_KEY_PREFIX; - - if (isSnapshotReclaimable(snapshotDeletedTable, - snapshotDeletedDirTable, snapshotBucketKey, dbBucketKeyForDir)) { - purgeSnapshotKeys.add(snapInfo.getTableKey()); - // Decrement ref count - rcOmSnapshot.close(); - rcOmSnapshot = null; + LOG.info("Started Snapshot Deletion Processing for snapshot : {}", snapInfo.getTableKey()); + SnapshotInfo nextSnapshot = SnapshotUtils.getNextSnapshot(ozoneManager, chainManager, snapInfo); + // Continue if the next snapshot is not active. This is to avoid unnecessary copies from one snapshot to + // another. + if (nextSnapshot != null && + nextSnapshot.getSnapshotStatus() != SnapshotInfo.SnapshotStatus.SNAPSHOT_ACTIVE) { continue; } - //TODO: [SNAPSHOT] Add lock to deletedTable and Active DB. - SnapshotInfo previousSnapshot = getPreviousActiveSnapshot(snapInfo, chainManager); - Table previousKeyTable = null; - Table previousDirTable = null; - OmSnapshot omPreviousSnapshot = null; - - // Split RepeatedOmKeyInfo and update current snapshot deletedKeyTable - // and next snapshot deletedKeyTable. - if (previousSnapshot != null) { - rcOmPreviousSnapshot = omSnapshotManager.getSnapshot( - previousSnapshot.getVolumeName(), - previousSnapshot.getBucketName(), - previousSnapshot.getName()); - omPreviousSnapshot = rcOmPreviousSnapshot.get(); - - previousKeyTable = omPreviousSnapshot - .getMetadataManager().getKeyTable(bucketInfo.getBucketLayout()); - previousDirTable = omPreviousSnapshot - .getMetadataManager().getDirectoryTable(); + // nextSnapshot = null means entries would be moved to AOS. + if (nextSnapshot == null) { + waitForKeyDeletingService(); + waitForDirDeletingService(); } - - // Move key to either next non deleted snapshot's deletedTable - // or keep it in current snapshot deleted table. - List toReclaimList = new ArrayList<>(); - List toNextDBList = new ArrayList<>(); - // A list of renamed keys/files/dirs - List renamedList = new ArrayList<>(); - List dirsToMove = new ArrayList<>(); - - long remainNum = handleDirectoryCleanUp(snapshotDeletedDirTable, - previousDirTable, renamedTable, dbBucketKeyForDir, snapInfo, - omSnapshot, dirsToMove, renamedList); - int deletionCount = 0; - - try (TableIterator> deletedIterator = snapshotDeletedTable - .iterator()) { - - List keysToPurge = new ArrayList<>(); - deletedIterator.seek(snapshotBucketKey); - - while (deletedIterator.hasNext() && - deletionCount < remainNum) { - Table.KeyValue - deletedKeyValue = deletedIterator.next(); - String deletedKey = deletedKeyValue.getKey(); - - // Exit if it is out of the bucket scope. - if (!deletedKey.startsWith(snapshotBucketKey)) { - // If snapshot deletedKeyTable doesn't have any - // entry in the snapshot scope it can be reclaimed - break; - } - - RepeatedOmKeyInfo repeatedOmKeyInfo = deletedKeyValue.getValue(); - - SnapshotMoveKeyInfos.Builder toReclaim = SnapshotMoveKeyInfos - .newBuilder() - .setKey(deletedKey); - SnapshotMoveKeyInfos.Builder toNextDb = SnapshotMoveKeyInfos - .newBuilder() - .setKey(deletedKey); - HddsProtos.KeyValue.Builder renamedKey = HddsProtos.KeyValue - .newBuilder(); - - for (OmKeyInfo keyInfo : repeatedOmKeyInfo.getOmKeyInfoList()) { - splitRepeatedOmKeyInfo(toReclaim, toNextDb, renamedKey, - keyInfo, previousKeyTable, renamedTable, - bucketInfo, volumeId); + try (ReferenceCounted snapshot = omSnapshotManager.getSnapshot( + snapInfo.getVolumeName(), snapInfo.getBucketName(), snapInfo.getName())) { + KeyManager snapshotKeyManager = snapshot.get().getKeyManager(); + int moveCount = 0; + // Get all entries from deletedKeyTable. + List>> deletedKeyEntries = + snapshotKeyManager.getDeletedKeyEntries(snapInfo.getVolumeName(), snapInfo.getBucketName(), + null, remaining); + moveCount += deletedKeyEntries.size(); + // Get all entries from deletedDirTable. + List> deletedDirEntries = snapshotKeyManager.getDeletedDirEntries( + snapInfo.getVolumeName(), snapInfo.getBucketName(), remaining - moveCount); + moveCount += deletedDirEntries.size(); + // Get all entries from snapshotRenamedTable. + List> renameEntries = snapshotKeyManager.getRenamesKeyEntries( + snapInfo.getVolumeName(), snapInfo.getBucketName(), null, remaining - moveCount); + moveCount += renameEntries.size(); + if (moveCount > 0) { + List deletedKeys = new ArrayList<>(deletedKeyEntries.size()); + List deletedDirs = new ArrayList<>(deletedDirEntries.size()); + List renameKeys = new ArrayList<>(renameEntries.size()); + + // Convert deletedKeyEntries to SnapshotMoveKeyInfos. + for (Table.KeyValue> deletedEntry : deletedKeyEntries) { + deletedKeys.add(SnapshotMoveKeyInfos.newBuilder().setKey(deletedEntry.getKey()) + .addAllKeyInfos(deletedEntry.getValue() + .stream().map(val -> val.getProtobuf(ClientVersion.CURRENT_VERSION)) + .collect(Collectors.toList())).build()); } - // If all the KeyInfos are reclaimable in RepeatedOmKeyInfo - // then no need to update current snapshot deletedKeyTable. - if (!(toReclaim.getKeyInfosCount() == - repeatedOmKeyInfo.getOmKeyInfoList().size())) { - toReclaimList.add(toReclaim.build()); - toNextDBList.add(toNextDb.build()); - } else { - // The key can be reclaimed here. - List blocksForKeyDelete = omSnapshot - .getMetadataManager() - .getBlocksForKeyDelete(deletedKey); - if (blocksForKeyDelete != null) { - keysToPurge.addAll(blocksForKeyDelete); - } + // Convert deletedDirEntries to SnapshotMoveKeyInfos. + for (Table.KeyValue deletedDirEntry : deletedDirEntries) { + deletedDirs.add(SnapshotMoveKeyInfos.newBuilder().setKey(deletedDirEntry.getKey()) + .addKeyInfos(deletedDirEntry.getValue().getProtobuf(ClientVersion.CURRENT_VERSION)).build()); } - if (renamedKey.hasKey() && renamedKey.hasValue()) { - renamedList.add(renamedKey.build()); + // Convert renamedEntries to KeyValue. + for (Table.KeyValue renameEntry : renameEntries) { + renameKeys.add(HddsProtos.KeyValue.newBuilder().setKey(renameEntry.getKey()) + .setValue(renameEntry.getValue()).build()); } - deletionCount++; + submitSnapshotMoveDeletedKeys(snapInfo, deletedKeys, renameKeys, deletedDirs); + remaining -= moveCount; + } else { + snapshotsToBePurged.add(snapInfo.getTableKey()); } - - // Delete keys From deletedTable - processKeyDeletes(keysToPurge, omSnapshot.getKeyManager(), - null, snapInfo.getTableKey()); - successRunCount.incrementAndGet(); - } catch (IOException ex) { - LOG.error("Error while running Snapshot Deleting Service for " + - "snapshot " + snapInfo.getTableKey() + " with snapshotId " + - snapInfo.getSnapshotId() + ". Processed " + deletionCount + - " keys and " + (keyLimitPerSnapshot - remainNum) + - " directories and files", ex); } + successRunCount.incrementAndGet(); snapshotLimit--; - // Submit Move request to OM. - submitSnapshotMoveDeletedKeys(snapInfo, toReclaimList, - toNextDBList, renamedList, dirsToMove); - - // Properly decrement ref count for rcOmPreviousSnapshot - if (rcOmPreviousSnapshot != null) { - rcOmPreviousSnapshot.close(); - rcOmPreviousSnapshot = null; - } + } + if (!snapshotsToBePurged.isEmpty()) { + submitSnapshotPurgeRequest(snapshotsToBePurged); } } catch (IOException e) { LOG.error("Error while running Snapshot Deleting Service", e); - } finally { - // Decrement ref counts - if (rcOmPreviousSnapshot != null) { - rcOmPreviousSnapshot.close(); - } - if (rcOmSnapshot != null) { - rcOmSnapshot.close(); - } } - submitSnapshotPurgeRequest(purgeSnapshotKeys); - return BackgroundTaskResult.EmptyTaskResult.newResult(); } - private boolean isSnapshotReclaimable( - Table snapshotDeletedTable, - Table snapshotDeletedDirTable, - String snapshotBucketKey, String dbBucketKeyForDir) throws IOException { - - boolean isDirTableCleanedUp = false; - boolean isKeyTableCleanedUp = false; - try (TableIterator> iterator = snapshotDeletedTable.iterator();) { - iterator.seek(snapshotBucketKey); - // If the next entry doesn't start with snapshotBucketKey then - // deletedKeyTable is already cleaned up. - isKeyTableCleanedUp = !iterator.hasNext() || !iterator.next().getKey() - .startsWith(snapshotBucketKey); - } - - try (TableIterator> - iterator = snapshotDeletedDirTable.iterator()) { - iterator.seek(dbBucketKeyForDir); - // If the next entry doesn't start with dbBucketKeyForDir then - // deletedDirTable is already cleaned up. - isDirTableCleanedUp = !iterator.hasNext() || !iterator.next().getKey() - .startsWith(dbBucketKeyForDir); - } - - return (isDirTableCleanedUp || snapshotDeletedDirTable.isEmpty()) && - (isKeyTableCleanedUp || snapshotDeletedTable.isEmpty()); - } - - @SuppressWarnings("checkstyle:ParameterNumber") - private long handleDirectoryCleanUp( - Table snapshotDeletedDirTable, - Table previousDirTable, - Table renamedTable, - String dbBucketKeyForDir, SnapshotInfo snapInfo, - OmSnapshot omSnapshot, List dirsToMove, - List renamedList) { - - long dirNum = 0L; - long subDirNum = 0L; - long subFileNum = 0L; - long remainNum = keyLimitPerSnapshot; - int consumedSize = 0; - List purgePathRequestList = new ArrayList<>(); - List> allSubDirList - = new ArrayList<>(keyLimitPerSnapshot); - try (TableIterator> deletedDirIterator = - snapshotDeletedDirTable.iterator()) { - - long startTime = Time.monotonicNow(); - deletedDirIterator.seek(dbBucketKeyForDir); - - while (deletedDirIterator.hasNext()) { - Table.KeyValue deletedDir = - deletedDirIterator.next(); - String deletedDirKey = deletedDir.getKey(); - - // Exit for dirs out of snapshot scope. - if (!deletedDirKey.startsWith(dbBucketKeyForDir)) { - break; - } - - if (isDirReclaimable(deletedDir, previousDirTable, - renamedTable, renamedList)) { - // Reclaim here - PurgePathRequest request = prepareDeleteDirRequest( - remainNum, deletedDir.getValue(), deletedDir.getKey(), - allSubDirList, omSnapshot.getKeyManager()); - if (isBufferLimitCrossed(ratisByteLimit, consumedSize, - request.getSerializedSize())) { - if (purgePathRequestList.size() != 0) { - // if message buffer reaches max limit, avoid sending further - remainNum = 0; - break; - } - // if directory itself is having a lot of keys / files, - // reduce capacity to minimum level - remainNum = MIN_ERR_LIMIT_PER_TASK; - request = prepareDeleteDirRequest( - remainNum, deletedDir.getValue(), deletedDir.getKey(), - allSubDirList, omSnapshot.getKeyManager()); - } - consumedSize += request.getSerializedSize(); - purgePathRequestList.add(request); - remainNum = remainNum - request.getDeletedSubFilesCount(); - remainNum = remainNum - request.getMarkDeletedSubDirsCount(); - // Count up the purgeDeletedDir, subDirs and subFiles - if (request.getDeletedDir() != null - && !request.getDeletedDir().isEmpty()) { - dirNum++; - } - subDirNum += request.getMarkDeletedSubDirsCount(); - subFileNum += request.getDeletedSubFilesCount(); - } else { - dirsToMove.add(deletedDir.getKey()); - } - } - - remainNum = optimizeDirDeletesAndSubmitRequest(remainNum, dirNum, - subDirNum, subFileNum, allSubDirList, purgePathRequestList, - snapInfo.getTableKey(), startTime, ratisByteLimit - consumedSize, - omSnapshot.getKeyManager()); - } catch (IOException e) { - LOG.error("Error while running delete directories and files for " + - "snapshot " + snapInfo.getTableKey() + " in snapshot deleting " + - "background task. Will retry at next run.", e); - } - - return remainNum; - } - private void submitSnapshotPurgeRequest(List purgeSnapshotKeys) { if (!purgeSnapshotKeys.isEmpty()) { SnapshotPurgeRequest snapshotPurgeRequest = SnapshotPurgeRequest @@ -463,92 +257,36 @@ private void submitSnapshotPurgeRequest(List purgeSnapshotKeys) { } } - @SuppressWarnings("checkstyle:ParameterNumber") - private void splitRepeatedOmKeyInfo(SnapshotMoveKeyInfos.Builder toReclaim, - SnapshotMoveKeyInfos.Builder toNextDb, - HddsProtos.KeyValue.Builder renamedKey, OmKeyInfo keyInfo, - Table previousKeyTable, - Table renamedTable, - OmBucketInfo bucketInfo, long volumeId) throws IOException { - - if (isKeyReclaimable(previousKeyTable, renamedTable, - keyInfo, bucketInfo, volumeId, renamedKey)) { - // Update in current db's deletedKeyTable - toReclaim.addKeyInfos(keyInfo - .getProtobuf(ClientVersion.CURRENT_VERSION)); - } else { - // Move to next non deleted snapshot's deleted table - toNextDb.addKeyInfos(keyInfo.getProtobuf( - ClientVersion.CURRENT_VERSION)); - } - } - - private boolean isDirReclaimable( - Table.KeyValue deletedDir, - Table previousDirTable, - Table renamedTable, - List renamedList) throws IOException { - - if (previousDirTable == null) { - return true; - } - - String deletedDirDbKey = deletedDir.getKey(); - OmKeyInfo deletedDirInfo = deletedDir.getValue(); - String dbRenameKey = ozoneManager.getMetadataManager().getRenameKey( - deletedDirInfo.getVolumeName(), deletedDirInfo.getBucketName(), - deletedDirInfo.getObjectID()); - - /* - snapshotRenamedTable: /volumeName/bucketName/objectID -> - /volumeId/bucketId/parentId/dirName - */ - String dbKeyBeforeRename = renamedTable.getIfExist(dbRenameKey); - String prevDbKey = null; - - if (dbKeyBeforeRename != null) { - prevDbKey = dbKeyBeforeRename; - HddsProtos.KeyValue renamedDir = HddsProtos.KeyValue - .newBuilder() - .setKey(dbRenameKey) - .setValue(dbKeyBeforeRename) - .build(); - renamedList.add(renamedDir); - } else { - // In OMKeyDeleteResponseWithFSO OzonePathKey is converted to - // OzoneDeletePathKey. Changing it back to check the previous DirTable. - prevDbKey = ozoneManager.getMetadataManager() - .getOzoneDeletePathDirKey(deletedDirDbKey); - } - - OmDirectoryInfo prevDirectoryInfo = previousDirTable.get(prevDbKey); - if (prevDirectoryInfo == null) { - return true; - } - - return prevDirectoryInfo.getObjectID() != deletedDirInfo.getObjectID(); - } - - public void submitSnapshotMoveDeletedKeys(SnapshotInfo snapInfo, - List toReclaimList, - List toNextDBList, - List renamedList, - List dirsToMove) throws InterruptedException { + private void submitSnapshotMoveDeletedKeys(SnapshotInfo snapInfo, + List deletedKeys, + List renamedList, + List dirsToMove) { - SnapshotMoveDeletedKeysRequest.Builder moveDeletedKeysBuilder = - SnapshotMoveDeletedKeysRequest.newBuilder() - .setFromSnapshot(snapInfo.getProtobuf()); + SnapshotMoveTableKeysRequest.Builder moveDeletedKeysBuilder = SnapshotMoveTableKeysRequest.newBuilder() + .setFromSnapshotID(toProtobuf(snapInfo.getSnapshotId())); - SnapshotMoveDeletedKeysRequest moveDeletedKeys = moveDeletedKeysBuilder - .addAllReclaimKeys(toReclaimList) - .addAllNextDBKeys(toNextDBList) + SnapshotMoveTableKeysRequest moveDeletedKeys = moveDeletedKeysBuilder + .addAllDeletedKeys(deletedKeys) .addAllRenamedKeys(renamedList) - .addAllDeletedDirsToMove(dirsToMove) + .addAllDeletedDirs(dirsToMove) .build(); + if (isBufferLimitCrossed(ratisByteLimit, 0, moveDeletedKeys.getSerializedSize())) { + int remaining = MIN_ERR_LIMIT_PER_TASK; + deletedKeys = deletedKeys.subList(0, Math.min(remaining, deletedKeys.size())); + remaining -= deletedKeys.size(); + renamedList = renamedList.subList(0, Math.min(remaining, renamedList.size())); + remaining -= renamedList.size(); + dirsToMove = dirsToMove.subList(0, Math.min(remaining, dirsToMove.size())); + moveDeletedKeys = moveDeletedKeysBuilder + .addAllDeletedKeys(deletedKeys) + .addAllRenamedKeys(renamedList) + .addAllDeletedDirs(dirsToMove) + .build(); + } OMRequest omRequest = OMRequest.newBuilder() - .setCmdType(Type.SnapshotMoveDeletedKeys) - .setSnapshotMoveDeletedKeysRequest(moveDeletedKeys) + .setCmdType(Type.SnapshotMoveTableKeys) + .setSnapshotMoveTableKeysRequest(moveDeletedKeys) .setClientId(clientId.toString()) .build(); @@ -557,20 +295,26 @@ public void submitSnapshotMoveDeletedKeys(SnapshotInfo snapInfo, } } - public void submitRequest(OMRequest omRequest) { + private void submitRequest(OMRequest omRequest) { try { OzoneManagerRatisUtils.submitRequest(ozoneManager, omRequest, clientId, getRunCount().get()); } catch (ServiceException e) { - LOG.error("Snapshot Deleting request failed. " + - "Will retry at next run.", e); + LOG.error("Request: {} fired by SnapshotDeletingService failed. Will retry in the next run", omRequest, e); } } } + /** + * Checks if a given snapshot has been deleted and all the changes made to snapshot have been flushed to disk. + * @param snapInfo SnapshotInfo corresponding to the snapshot. + * @return true if the snapshot is still active or changes to snapshot have not been flushed to disk otherwise false. + * @throws IOException + */ @VisibleForTesting - boolean shouldIgnoreSnapshot(SnapshotInfo snapInfo) { + boolean shouldIgnoreSnapshot(SnapshotInfo snapInfo) throws IOException { SnapshotInfo.SnapshotStatus snapshotStatus = snapInfo.getSnapshotStatus(); - return snapshotStatus != SnapshotInfo.SnapshotStatus.SNAPSHOT_DELETED; + return snapshotStatus != SnapshotInfo.SnapshotStatus.SNAPSHOT_DELETED || + !OmSnapshotManager.areSnapshotChangesFlushedToDB(getOzoneManager().getMetadataManager(), snapInfo); } // TODO: Move this util class. diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/SnapshotDirectoryCleaningService.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/SnapshotDirectoryCleaningService.java index 26d5d24a8a03..e7133e625896 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/SnapshotDirectoryCleaningService.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/SnapshotDirectoryCleaningService.java @@ -61,6 +61,7 @@ import static org.apache.hadoop.ozone.om.helpers.SnapshotInfo.SnapshotStatus.SNAPSHOT_ACTIVE; import static org.apache.hadoop.ozone.om.request.file.OMFileRequest.getDirectoryInfo; import static org.apache.hadoop.ozone.om.snapshot.SnapshotUtils.getOzonePathKeyForFso; +import static org.apache.hadoop.ozone.om.snapshot.SnapshotUtils.getPreviousSnapshot; /** * Snapshot BG Service for deleted directory deep clean and exclusive size @@ -143,11 +144,11 @@ public BackgroundTaskResult call() { > iterator = snapshotInfoTable.iterator()) { while (iterator.hasNext()) { - SnapshotInfo currSnapInfo = iterator.next().getValue(); + SnapshotInfo currSnapInfo = snapshotInfoTable.get(iterator.next().getKey()); // Expand deleted dirs only on active snapshot. Deleted Snapshots // will be cleaned up by SnapshotDeletingService. - if (currSnapInfo.getSnapshotStatus() != SNAPSHOT_ACTIVE || + if (currSnapInfo == null || currSnapInfo.getSnapshotStatus() != SNAPSHOT_ACTIVE || currSnapInfo.getDeepCleanedDeletedDir()) { continue; } @@ -173,7 +174,7 @@ public BackgroundTaskResult call() { "unexpected state."); } - SnapshotInfo previousSnapshot = getPreviousActiveSnapshot(currSnapInfo, snapChainManager); + SnapshotInfo previousSnapshot = getPreviousSnapshot(getOzoneManager(), snapChainManager, currSnapInfo); SnapshotInfo previousToPrevSnapshot = null; Table previousKeyTable = null; @@ -190,7 +191,7 @@ public BackgroundTaskResult call() { .getKeyTable(bucketInfo.getBucketLayout()); prevRenamedTable = omPreviousSnapshot .getMetadataManager().getSnapshotRenamedTable(); - previousToPrevSnapshot = getPreviousActiveSnapshot(previousSnapshot, snapChainManager); + previousToPrevSnapshot = getPreviousSnapshot(getOzoneManager(), snapChainManager, previousSnapshot); } Table previousToPrevKeyTable = null; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotUtils.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotUtils.java index 05b0e5b0cdc5..b400fb6ed769 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotUtils.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotUtils.java @@ -77,7 +77,7 @@ public static Object getINode(Path file) throws IOException { * sst compaction backup directory) * * @param truncateLength - Length of initial path to trim in file path. - * @param hardLinkFiles - Map of link->file paths. + * @param hardLinkFiles - Map of link->file paths. * @return Path to the file of links created. */ public static Path createHardLinkList(int truncateLength, diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/SnapshotDiffManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/SnapshotDiffManager.java index db6d9b7b9084..6393f12066c4 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/SnapshotDiffManager.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/SnapshotDiffManager.java @@ -284,7 +284,8 @@ private boolean initNativeLibraryForEfficientDiff(final OzoneConfiguration conf) try { return ManagedRawSSTFileReader.loadLibrary(); } catch (NativeLibraryNotLoadedException e) { - LOG.error("Native Library for raw sst file reading loading failed.", e); + LOG.warn("Native Library for raw sst file reading loading failed." + + " Fallback to performing a full diff instead. {}", e.getMessage()); return false; } } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/SnapshotUtils.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/SnapshotUtils.java index e0f40dabd8a7..201a9fe0c9c9 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/SnapshotUtils.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/SnapshotUtils.java @@ -24,8 +24,11 @@ import org.apache.hadoop.ozone.om.OzoneManager; import org.apache.hadoop.ozone.om.SnapshotChainManager; import org.apache.hadoop.ozone.om.exceptions.OMException; +import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; +import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo; import org.apache.hadoop.ozone.om.helpers.SnapshotInfo; import org.apache.hadoop.ozone.om.helpers.SnapshotInfo.SnapshotStatus; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; import org.rocksdb.ColumnFamilyHandle; import org.rocksdb.RocksDBException; import org.slf4j.Logger; @@ -33,9 +36,13 @@ import java.io.File; import java.io.IOException; +import java.util.ArrayList; +import java.util.List; import java.util.NoSuchElementException; import java.util.HashMap; import java.util.Map; +import java.util.Objects; +import java.util.Optional; import java.util.UUID; import static org.apache.hadoop.ozone.OzoneConsts.OM_KEY_PREFIX; @@ -86,6 +93,13 @@ public static SnapshotInfo getSnapshotInfo(final OzoneManager ozoneManager, return snapshotInfo; } + public static SnapshotInfo getSnapshotInfo(OzoneManager ozoneManager, + SnapshotChainManager chainManager, + UUID snapshotId) throws IOException { + String tableKey = chainManager.getTableKey(snapshotId); + return SnapshotUtils.getSnapshotInfo(ozoneManager, tableKey); + } + public static void dropColumnFamilyHandle( final ManagedRocksDB rocksDB, final ColumnFamilyHandle columnFamilyHandle) { @@ -139,36 +153,24 @@ public static void checkSnapshotActive(SnapshotInfo snapInfo, } /** - * Get the next non deleted snapshot in the snapshot chain. + * Get the next snapshot in the snapshot chain. */ - public static SnapshotInfo getNextActiveSnapshot(SnapshotInfo snapInfo, - SnapshotChainManager chainManager, OzoneManager ozoneManager) + public static SnapshotInfo getNextSnapshot(OzoneManager ozoneManager, + SnapshotChainManager chainManager, + SnapshotInfo snapInfo) throws IOException { - // If the snapshot is deleted in the previous run, then the in-memory // SnapshotChainManager might throw NoSuchElementException as the snapshot // is removed in-memory but OMDoubleBuffer has not flushed yet. if (snapInfo == null) { - throw new OMException("Snapshot Info is null. Cannot get the next snapshot", INVALID_SNAPSHOT_ERROR); + throw new OMException("Provided Snapshot Info argument is null. Cannot get the next snapshot for a null value", + INVALID_SNAPSHOT_ERROR); } - try { - while (chainManager.hasNextPathSnapshot(snapInfo.getSnapshotPath(), + if (chainManager.hasNextPathSnapshot(snapInfo.getSnapshotPath(), snapInfo.getSnapshotId())) { - - UUID nextPathSnapshot = - chainManager.nextPathSnapshot( - snapInfo.getSnapshotPath(), snapInfo.getSnapshotId()); - - String tableKey = chainManager.getTableKey(nextPathSnapshot); - SnapshotInfo nextSnapshotInfo = getSnapshotInfo(ozoneManager, tableKey); - - if (nextSnapshotInfo.getSnapshotStatus().equals( - SnapshotInfo.SnapshotStatus.SNAPSHOT_ACTIVE)) { - return nextSnapshotInfo; - } - - snapInfo = nextSnapshotInfo; + UUID nextPathSnapshot = chainManager.nextPathSnapshot(snapInfo.getSnapshotPath(), snapInfo.getSnapshotId()); + return getSnapshotInfo(ozoneManager, chainManager, nextPathSnapshot); } } catch (NoSuchElementException ex) { LOG.error("The snapshot {} is not longer in snapshot chain, It " + @@ -178,6 +180,41 @@ public static SnapshotInfo getNextActiveSnapshot(SnapshotInfo snapInfo, return null; } + /** + * Get the previous snapshot in the snapshot chain. + */ + public static SnapshotInfo getPreviousSnapshot(OzoneManager ozoneManager, + SnapshotChainManager chainManager, + SnapshotInfo snapInfo) + throws IOException { + UUID previousSnapshotId = getPreviousSnapshotId(snapInfo, chainManager); + return previousSnapshotId == null ? null : getSnapshotInfo(ozoneManager, chainManager, previousSnapshotId); + } + + /** + * Get the previous snapshot in the snapshot chain. + */ + private static UUID getPreviousSnapshotId(SnapshotInfo snapInfo, SnapshotChainManager chainManager) + throws IOException { + // If the snapshot is deleted in the previous run, then the in-memory + // SnapshotChainManager might throw NoSuchElementException as the snapshot + // is removed in-memory but OMDoubleBuffer has not flushed yet. + if (snapInfo == null) { + throw new OMException("Provided Snapshot Info argument is null. Cannot get the previous snapshot for a null " + + "value", INVALID_SNAPSHOT_ERROR); + } + try { + if (chainManager.hasPreviousPathSnapshot(snapInfo.getSnapshotPath(), + snapInfo.getSnapshotId())) { + return chainManager.previousPathSnapshot(snapInfo.getSnapshotPath(), + snapInfo.getSnapshotId()); + } + } catch (NoSuchElementException ignored) { + + } + return null; + } + /** * Return a map column family to prefix for the keys in the table for * the given volume and bucket. @@ -207,7 +244,7 @@ public static Map getColumnFamilyToKeyPrefixMap( *

    * Note: Currently, this is only intended to be a special use case in * Snapshot. If this is used elsewhere, consider moving this to - * @link OMMetadataManager}. + * {@link OMMetadataManager}. * * @param volumeName volume name * @param bucketName bucket name @@ -242,4 +279,74 @@ public static String getOzonePathKeyForFso(OMMetadataManager metadataManager, final long bucketId = metadataManager.getBucketId(volumeName, bucketName); return OM_KEY_PREFIX + volumeId + OM_KEY_PREFIX + bucketId + OM_KEY_PREFIX; } + + /** + * Returns merged repeatedKeyInfo entry with the existing deleted entry in the table. + * @param snapshotMoveKeyInfos keyInfos to be added. + * @param metadataManager metadataManager for a store. + * @return RepeatedOmKeyInfo + * @throws IOException + */ + public static RepeatedOmKeyInfo createMergedRepeatedOmKeyInfoFromDeletedTableEntry( + OzoneManagerProtocolProtos.SnapshotMoveKeyInfos snapshotMoveKeyInfos, OMMetadataManager metadataManager) throws + IOException { + String dbKey = snapshotMoveKeyInfos.getKey(); + List keyInfoList = new ArrayList<>(); + for (OzoneManagerProtocolProtos.KeyInfo info : snapshotMoveKeyInfos.getKeyInfosList()) { + OmKeyInfo fromProtobuf = OmKeyInfo.getFromProtobuf(info); + keyInfoList.add(fromProtobuf); + } + // When older version of keys are moved to the next snapshot's deletedTable + // The newer version might also be in the next snapshot's deletedTable and + // it might overwrite the existing value which inturn could lead to orphan block in the system. + // Checking the keyInfoList with the last n versions of the omKeyInfo versions would ensure all versions are + // present in the list and would also avoid redundant additions to the list if the last n versions match, which + // can happen on om transaction replay on snapshotted rocksdb. + RepeatedOmKeyInfo result = metadataManager.getDeletedTable().get(dbKey); + if (result == null) { + result = new RepeatedOmKeyInfo(keyInfoList); + } else if (!isSameAsLatestOmKeyInfo(keyInfoList, result)) { + keyInfoList.forEach(result::addOmKeyInfo); + } + return result; + } + + private static boolean isSameAsLatestOmKeyInfo(List omKeyInfos, + RepeatedOmKeyInfo result) { + int size = result.getOmKeyInfoList().size(); + if (size >= omKeyInfos.size()) { + return omKeyInfos.equals(result.getOmKeyInfoList().subList(size - omKeyInfos.size(), size)); + } + return false; + } + + public static SnapshotInfo getLatestSnapshotInfo(String volumeName, String bucketName, + OzoneManager ozoneManager, + SnapshotChainManager snapshotChainManager) throws IOException { + Optional latestPathSnapshot = Optional.ofNullable( + getLatestPathSnapshotId(volumeName, bucketName, snapshotChainManager)); + return latestPathSnapshot.isPresent() ? + getSnapshotInfo(ozoneManager, snapshotChainManager, latestPathSnapshot.get()) : null; + } + + public static UUID getLatestPathSnapshotId(String volumeName, String bucketName, + SnapshotChainManager snapshotChainManager) throws IOException { + String snapshotPath = volumeName + OM_KEY_PREFIX + bucketName; + return snapshotChainManager.getLatestPathSnapshotId(snapshotPath); + } + + // Validates the previous path snapshotId for given a snapshotInfo. In case snapshotInfo is + // null, the snapshotInfo would be considered as AOS and previous snapshot becomes the latest snapshot in the global + // snapshot chain. Would throw OMException if validation fails otherwise function would pass. + public static void validatePreviousSnapshotId(SnapshotInfo snapshotInfo, + SnapshotChainManager snapshotChainManager, + UUID expectedPreviousSnapshotId) throws IOException { + UUID previousSnapshotId = snapshotInfo == null ? snapshotChainManager.getLatestGlobalSnapshotId() : + SnapshotUtils.getPreviousSnapshotId(snapshotInfo, snapshotChainManager); + if (!Objects.equals(expectedPreviousSnapshotId, previousSnapshotId)) { + throw new OMException("Snapshot validation failed. Expected previous snapshotId : " + + expectedPreviousSnapshotId + " but was " + previousSnapshotId, + OMException.ResultCodes.INVALID_REQUEST); + } + } } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerRequestHandler.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerRequestHandler.java index 576fac48c736..5682b040e859 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerRequestHandler.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerRequestHandler.java @@ -130,7 +130,6 @@ import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.SetSafeModeRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.SetSafeModeResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.GetS3VolumeContextResponse; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.ServerDefaultsResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Status; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.SnapshotDiffRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.SnapshotDiffResponse; @@ -376,12 +375,6 @@ public OMResponse handleReadRequest(OMRequest request) { getSnapshotInfo(request.getSnapshotInfoRequest()); responseBuilder.setSnapshotInfoResponse(snapshotInfoResponse); break; - case GetServerDefaults: - responseBuilder.setServerDefaultsResponse( - ServerDefaultsResponse.newBuilder() - .setServerDefaults(impl.getServerDefaults().getProtobuf()) - .build()); - break; case GetQuotaRepairStatus: OzoneManagerProtocolProtos.GetQuotaRepairStatusResponse quotaRepairStatusRsp = getQuotaRepairStatus(request.getGetQuotaRepairStatusRequest()); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/RequestHandler.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/RequestHandler.java index e60362a1ebb3..76546f2e480b 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/RequestHandler.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/RequestHandler.java @@ -68,7 +68,7 @@ default OMClientResponse handleWriteRequest(OMRequest omRequest, TermIndex termI } /** - * Implementation of {@link #handleWriteRequest(OMRequest, TermIndex, OzoneManagerDoubleBuffer)}. + * Implementation of {@link #handleWriteRequest}. * * @param omRequest the write request * @param termIndex - ratis transaction term and index diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/OmTestManagers.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/OmTestManagers.java index edffd5ed74eb..c7a14bb6eedc 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/OmTestManagers.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/OmTestManagers.java @@ -28,6 +28,7 @@ import org.apache.hadoop.hdds.scm.protocol.ScmBlockLocationProtocol; import org.apache.hadoop.hdds.scm.protocol.StorageContainerLocationProtocol; import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem; +import org.apache.hadoop.ozone.client.OzoneClient; import org.apache.hadoop.ozone.client.OzoneClientFactory; import org.apache.hadoop.ozone.om.protocol.OzoneManagerProtocol; import org.apache.hadoop.hdds.security.token.OzoneBlockTokenSecretManager; @@ -52,6 +53,7 @@ public final class OmTestManagers { private final BucketManager bucketManager; private final PrefixManager prefixManager; private final ScmBlockLocationProtocol scmBlockClient; + private final OzoneClient rpcClient; public OzoneManager getOzoneManager() { return om; @@ -77,6 +79,9 @@ public KeyManager getKeyManager() { public ScmBlockLocationProtocol getScmBlockClient() { return scmBlockClient; } + public OzoneClient getRpcClient() { + return rpcClient; + } public OmTestManagers(OzoneConfiguration conf) throws AuthenticationException, IOException, InterruptedException, TimeoutException { @@ -121,7 +126,8 @@ public OmTestManagers(OzoneConfiguration conf, waitFor(() -> om.getOmRatisServer().checkLeaderStatus() == RaftServerStatus.LEADER_AND_READY, 10, 10_000); - writeClient = OzoneClientFactory.getRpcClient(conf) + rpcClient = OzoneClientFactory.getRpcClient(conf); + writeClient = rpcClient .getObjectStore().getClientProxy().getOzoneManagerClient(); metadataManager = (OmMetadataManagerImpl) HddsWhiteboxTestUtils .getInternalState(om, "metadataManager"); diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOMDBDefinition.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOMDBDefinition.java index 36245dc8741d..9ae85b0fcb62 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOMDBDefinition.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOMDBDefinition.java @@ -45,11 +45,10 @@ public void testDBDefinition() throws Exception { OzoneConfiguration configuration = new OzoneConfiguration(); File metaDir = folder.toFile(); DBStore store = OmMetadataManagerImpl.loadDB(configuration, metaDir); - OMDBDefinition dbDef = new OMDBDefinition(); // Get list of tables from DB Definitions final Collection> columnFamilyDefinitions - = dbDef.getColumnFamilies(); + = OMDBDefinition.get().getColumnFamilies(); final int countOmDefTables = columnFamilyDefinitions.size(); ArrayList missingDBDefTables = new ArrayList<>(); diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/OMRequestTestUtils.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/OMRequestTestUtils.java index c807c04688d6..eff23a18e6e2 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/OMRequestTestUtils.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/OMRequestTestUtils.java @@ -30,9 +30,11 @@ import java.util.List; import java.util.Map; import java.util.UUID; +import java.util.stream.Collectors; import javax.xml.bind.DatatypeConverter; import org.apache.commons.lang3.RandomStringUtils; +import org.apache.commons.lang3.tuple.Pair; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdds.HddsUtils; import org.apache.hadoop.hdds.client.BlockID; @@ -40,6 +42,7 @@ import org.apache.hadoop.hdds.client.ReplicationConfigValidator; import org.apache.hadoop.hdds.conf.ConfigurationSource; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos.KeyValue; import org.apache.hadoop.hdds.scm.pipeline.Pipeline; import org.apache.hadoop.hdds.scm.pipeline.PipelineID; import org.apache.hadoop.ozone.ClientVersion; @@ -109,6 +112,7 @@ import org.apache.hadoop.util.Time; import org.apache.hadoop.hdds.utils.db.cache.CacheKey; import org.apache.hadoop.hdds.utils.db.cache.CacheValue; +import org.apache.logging.log4j.util.Strings; import static org.mockito.Mockito.any; import static org.mockito.Mockito.doCallRealMethod; @@ -749,17 +753,17 @@ public static OMRequest.Builder newCreateBucketRequest( .setClientId(UUID.randomUUID().toString()); } - public static List< HddsProtos.KeyValue> getMetadataList() { - List metadataList = new ArrayList<>(); - metadataList.add(HddsProtos.KeyValue.newBuilder().setKey("key1").setValue( + public static List< KeyValue> getMetadataList() { + List metadataList = new ArrayList<>(); + metadataList.add(KeyValue.newBuilder().setKey("key1").setValue( "value1").build()); - metadataList.add(HddsProtos.KeyValue.newBuilder().setKey("key2").setValue( + metadataList.add(KeyValue.newBuilder().setKey("key2").setValue( "value2").build()); return metadataList; } - public static HddsProtos.KeyValue fsoMetadata() { - return HddsProtos.KeyValue.newBuilder() + public static KeyValue fsoMetadata() { + return KeyValue.newBuilder() .setKey(OMConfigKeys.OZONE_OM_ENABLE_FILESYSTEM_PATHS) .setValue(Boolean.FALSE.toString()) .build(); @@ -1050,7 +1054,7 @@ public static OMRequest createCommitPartMPURequest(String volumeName, .setMultipartNumber(partNumber) .setMultipartUploadID(multipartUploadID) .addAllKeyLocations(new ArrayList<>()) - .addMetadata(HddsProtos.KeyValue.newBuilder() + .addMetadata(KeyValue.newBuilder() .setKey(OzoneConsts.ETAG) .setValue(DatatypeConverter.printHexBinary( new DigestInputStream( @@ -1321,6 +1325,69 @@ public static OMRequest createSnapshotRequest(String volumeName, .build(); } + public static OMRequest moveSnapshotTableKeyRequest(UUID snapshotId, + List>> deletedKeys, + List>> deletedDirs, + List> renameKeys) { + List deletedMoveKeys = new ArrayList<>(); + for (Pair> deletedKey : deletedKeys) { + OzoneManagerProtocolProtos.SnapshotMoveKeyInfos snapshotMoveKeyInfos = + OzoneManagerProtocolProtos.SnapshotMoveKeyInfos.newBuilder() + .setKey(deletedKey.getKey()) + .addAllKeyInfos( + deletedKey.getValue().stream() + .map(omKeyInfo -> omKeyInfo.getProtobuf(ClientVersion.CURRENT_VERSION)).collect(Collectors.toList())) + .build(); + deletedMoveKeys.add(snapshotMoveKeyInfos); + } + + List deletedDirMoveKeys = new ArrayList<>(); + for (Pair> deletedKey : deletedDirs) { + OzoneManagerProtocolProtos.SnapshotMoveKeyInfos snapshotMoveKeyInfos = + OzoneManagerProtocolProtos.SnapshotMoveKeyInfos.newBuilder() + .setKey(deletedKey.getKey()) + .addAllKeyInfos( + deletedKey.getValue().stream() + .map(omKeyInfo -> omKeyInfo.getProtobuf(ClientVersion.CURRENT_VERSION)) + .collect(Collectors.toList())) + .build(); + deletedDirMoveKeys.add(snapshotMoveKeyInfos); + } + + List renameKeyList = new ArrayList<>(); + for (Pair renameKey : renameKeys) { + KeyValue.Builder keyValue = KeyValue.newBuilder(); + keyValue.setKey(renameKey.getKey()); + if (!Strings.isBlank(renameKey.getValue())) { + keyValue.setValue(renameKey.getValue()); + } + renameKeyList.add(keyValue.build()); + } + + + OzoneManagerProtocolProtos.SnapshotMoveTableKeysRequest snapshotMoveTableKeysRequest = + OzoneManagerProtocolProtos.SnapshotMoveTableKeysRequest.newBuilder() + .setFromSnapshotID(HddsUtils.toProtobuf(snapshotId)) + .addAllDeletedKeys(deletedMoveKeys) + .addAllDeletedDirs(deletedDirMoveKeys) + .addAllRenamedKeys(renameKeyList) + .build(); + + OzoneManagerProtocolProtos.UserInfo userInfo = + OzoneManagerProtocolProtos.UserInfo.newBuilder() + .setUserName("user") + .setHostName("host") + .setRemoteAddress("remote-address") + .build(); + + return OMRequest.newBuilder() + .setSnapshotMoveTableKeysRequest(snapshotMoveTableKeysRequest) + .setCmdType(Type.SnapshotMoveTableKeys) + .setClientId(UUID.randomUUID().toString()) + .setUserInfo(userInfo) + .build(); + } + /** * Create OMRequest for Rename Snapshot. * diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/snapshot/TestOMSnapshotCreateRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/snapshot/TestOMSnapshotCreateRequest.java index 806c1b90f7f3..af904382256b 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/snapshot/TestOMSnapshotCreateRequest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/snapshot/TestOMSnapshotCreateRequest.java @@ -19,15 +19,8 @@ package org.apache.hadoop.ozone.om.request.snapshot; import org.apache.hadoop.hdds.client.RatisReplicationConfig; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.utils.TransactionInfo; -import org.apache.hadoop.hdds.utils.db.BatchOperation; import org.apache.hadoop.hdds.utils.db.Table; -import org.apache.hadoop.ozone.audit.AuditLogger; -import org.apache.hadoop.ozone.audit.AuditMessage; -import org.apache.hadoop.ozone.om.OMConfigKeys; -import org.apache.hadoop.ozone.om.OMMetrics; -import org.apache.hadoop.ozone.om.OmMetadataManagerImpl; import org.apache.hadoop.ozone.om.OzoneManager; import org.apache.hadoop.ozone.om.exceptions.OMException; import org.apache.hadoop.ozone.om.helpers.BucketLayout; @@ -38,18 +31,15 @@ import org.apache.hadoop.ozone.om.response.OMClientResponse; import org.apache.hadoop.ozone.om.response.key.OMKeyRenameResponse; import org.apache.hadoop.ozone.om.response.key.OMKeyRenameResponseWithFSO; -import org.apache.hadoop.ozone.om.upgrade.OMLayoutVersionManager; +import org.apache.hadoop.ozone.om.snapshot.TestSnapshotRequestAndResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse; -import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; -import org.junit.jupiter.api.io.TempDir; import org.junit.jupiter.params.ParameterizedTest; import org.junit.jupiter.params.provider.ValueSource; -import java.io.File; import java.io.IOException; import java.util.UUID; @@ -65,69 +55,19 @@ import static org.junit.jupiter.api.Assertions.assertThrows; import static org.junit.jupiter.api.Assertions.assertTrue; import static org.mockito.Mockito.any; -import static org.mockito.Mockito.anyString; -import static org.mockito.Mockito.doNothing; -import static org.mockito.Mockito.framework; -import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; /** * Tests OMSnapshotCreateRequest class, which handles CreateSnapshot request. */ -public class TestOMSnapshotCreateRequest { - @TempDir - private File anotherTempDir; - - private OzoneManager ozoneManager; - private OMMetrics omMetrics; - private OmMetadataManagerImpl omMetadataManager; - private BatchOperation batchOperation; - - private String volumeName; - private String bucketName; +public class TestOMSnapshotCreateRequest extends TestSnapshotRequestAndResponse { private String snapshotName1; private String snapshotName2; @BeforeEach public void setup() throws Exception { - ozoneManager = mock(OzoneManager.class); - omMetrics = OMMetrics.create(); - OzoneConfiguration ozoneConfiguration = new OzoneConfiguration(); - ozoneConfiguration.set(OMConfigKeys.OZONE_OM_DB_DIRS, - anotherTempDir.getAbsolutePath()); - omMetadataManager = new OmMetadataManagerImpl(ozoneConfiguration, - ozoneManager); - when(ozoneManager.getMetrics()).thenReturn(omMetrics); - when(ozoneManager.getMetadataManager()).thenReturn(omMetadataManager); - when(ozoneManager.isRatisEnabled()).thenReturn(true); - when(ozoneManager.isFilesystemSnapshotEnabled()).thenReturn(true); - when(ozoneManager.isAdmin(any())).thenReturn(false); - when(ozoneManager.isOwner(any(), any())).thenReturn(false); - when(ozoneManager.getBucketOwner(any(), any(), - any(), any())).thenReturn("dummyBucketOwner"); - OMLayoutVersionManager lvm = mock(OMLayoutVersionManager.class); - when(lvm.isAllowed(anyString())).thenReturn(true); - when(ozoneManager.getVersionManager()).thenReturn(lvm); - AuditLogger auditLogger = mock(AuditLogger.class); - when(ozoneManager.getAuditLogger()).thenReturn(auditLogger); - doNothing().when(auditLogger).logWrite(any(AuditMessage.class)); - batchOperation = omMetadataManager.getStore().initBatchOperation(); - - volumeName = UUID.randomUUID().toString(); - bucketName = UUID.randomUUID().toString(); snapshotName1 = UUID.randomUUID().toString(); snapshotName2 = UUID.randomUUID().toString(); - OMRequestTestUtils.addVolumeAndBucketToDB(volumeName, bucketName, - omMetadataManager); - } - - @AfterEach - public void stop() { - omMetrics.unRegister(); - framework().clearInlineMocks(); - if (batchOperation != null) { - batchOperation.close(); - } } @ValueSource(strings = { @@ -140,9 +80,9 @@ public void stop() { }) @ParameterizedTest public void testPreExecute(String snapshotName) throws Exception { - when(ozoneManager.isOwner(any(), any())).thenReturn(true); - OMRequest omRequest = createSnapshotRequest(volumeName, - bucketName, snapshotName); + when(getOzoneManager().isOwner(any(), any())).thenReturn(true); + OMRequest omRequest = createSnapshotRequest(getVolumeName(), + getBucketName(), snapshotName); doPreExecute(omRequest); } @@ -158,9 +98,9 @@ public void testPreExecute(String snapshotName) throws Exception { }) @ParameterizedTest public void testPreExecuteFailure(String snapshotName) { - when(ozoneManager.isOwner(any(), any())).thenReturn(true); - OMRequest omRequest = createSnapshotRequest(volumeName, - bucketName, snapshotName); + when(getOzoneManager().isOwner(any(), any())).thenReturn(true); + OMRequest omRequest = createSnapshotRequest(getVolumeName(), + getBucketName(), snapshotName); OMException omException = assertThrows(OMException.class, () -> doPreExecute(omRequest)); assertTrue(omException.getMessage() @@ -170,8 +110,8 @@ public void testPreExecuteFailure(String snapshotName) { @Test public void testPreExecuteBadOwner() { // Owner is not set for the request. - OMRequest omRequest = createSnapshotRequest(volumeName, - bucketName, snapshotName1); + OMRequest omRequest = createSnapshotRequest(getVolumeName(), + getBucketName(), snapshotName1); OMException omException = assertThrows(OMException.class, () -> doPreExecute(omRequest)); @@ -181,29 +121,29 @@ public void testPreExecuteBadOwner() { @Test public void testValidateAndUpdateCache() throws Exception { - when(ozoneManager.isAdmin(any())).thenReturn(true); - OMRequest omRequest = createSnapshotRequest(volumeName, - bucketName, snapshotName1); + when(getOzoneManager().isAdmin(any())).thenReturn(true); + OMRequest omRequest = createSnapshotRequest(getVolumeName(), + getBucketName(), snapshotName1); OMSnapshotCreateRequest omSnapshotCreateRequest = doPreExecute(omRequest); - String key = getTableKey(volumeName, bucketName, snapshotName1); - String bucketKey = omMetadataManager.getBucketKey(volumeName, bucketName); + String key = getTableKey(getVolumeName(), getBucketName(), snapshotName1); + String bucketKey = getOmMetadataManager().getBucketKey(getVolumeName(), getBucketName()); // Add a 1000-byte key to the bucket OmKeyInfo key1 = addKey("key-testValidateAndUpdateCache", 12345L); addKeyToTable(key1); - OmBucketInfo omBucketInfo = omMetadataManager.getBucketTable().get( + OmBucketInfo omBucketInfo = getOmMetadataManager().getBucketTable().get( bucketKey); long bucketDataSize = key1.getDataSize(); long bucketUsedBytes = omBucketInfo.getUsedBytes(); assertEquals(key1.getReplicatedSize(), bucketUsedBytes); // Value in cache should be null as of now. - assertNull(omMetadataManager.getSnapshotInfoTable().get(key)); + assertNull(getOmMetadataManager().getSnapshotInfoTable().get(key)); // Run validateAndUpdateCache. OMClientResponse omClientResponse = - omSnapshotCreateRequest.validateAndUpdateCache(ozoneManager, 1); + omSnapshotCreateRequest.validateAndUpdateCache(getOzoneManager(), 1); assertNotNull(omClientResponse.getOMResponse()); @@ -227,21 +167,21 @@ public void testValidateAndUpdateCache() throws Exception { // Get value from cache SnapshotInfo snapshotInfoInCache = - omMetadataManager.getSnapshotInfoTable().get(key); + getOmMetadataManager().getSnapshotInfoTable().get(key); assertNotNull(snapshotInfoInCache); assertEquals(snapshotInfoFromProto, snapshotInfoInCache); assertEquals(snapshotInfoInCache.getLastTransactionInfo(), TransactionInfo.valueOf(TransactionInfo.getTermIndex(1L)).toByteString()); - assertEquals(0, omMetrics.getNumSnapshotCreateFails()); - assertEquals(1, omMetrics.getNumSnapshotActive()); - assertEquals(1, omMetrics.getNumSnapshotCreates()); + assertEquals(0, getOmMetrics().getNumSnapshotCreateFails()); + assertEquals(1, getOmMetrics().getNumSnapshotActive()); + assertEquals(1, getOmMetrics().getNumSnapshotCreates()); } @Test public void testEntryRenamedKeyTable() throws Exception { - when(ozoneManager.isAdmin(any())).thenReturn(true); + when(getOzoneManager().isAdmin(any())).thenReturn(true); Table snapshotRenamedTable = - omMetadataManager.getSnapshotRenamedTable(); + getOmMetadataManager().getSnapshotRenamedTable(); renameKey("key1", "key2", 0); renameDir("dir1", "dir2", 5); @@ -251,17 +191,17 @@ public void testEntryRenamedKeyTable() throws Exception { // Create snapshot createSnapshot(snapshotName1); - String snapKey = getTableKey(volumeName, - bucketName, snapshotName1); + String snapKey = getTableKey(getVolumeName(), + getBucketName(), snapshotName1); SnapshotInfo snapshotInfo = - omMetadataManager.getSnapshotInfoTable().get(snapKey); + getOmMetadataManager().getSnapshotInfoTable().get(snapKey); assertNotNull(snapshotInfo); renameKey("key3", "key4", 10); renameDir("dir3", "dir4", 15); // Rename table should have two entries as rename is within snapshot scope. - assertEquals(2, omMetadataManager + assertEquals(2, getOmMetadataManager() .countRowsInTable(snapshotRenamedTable)); // Create snapshot to clear snapshotRenamedTable @@ -271,33 +211,33 @@ public void testEntryRenamedKeyTable() throws Exception { @Test public void testEntryExists() throws Exception { - when(ozoneManager.isAdmin(any())).thenReturn(true); + when(getOzoneManager().isAdmin(any())).thenReturn(true); - String key = getTableKey(volumeName, bucketName, snapshotName1); + String key = getTableKey(getVolumeName(), getBucketName(), snapshotName1); OMRequest omRequest = - createSnapshotRequest(volumeName, bucketName, snapshotName1); + createSnapshotRequest(getVolumeName(), getBucketName(), snapshotName1); OMSnapshotCreateRequest omSnapshotCreateRequest = doPreExecute(omRequest); - assertNull(omMetadataManager.getSnapshotInfoTable().get(key)); - omSnapshotCreateRequest.validateAndUpdateCache(ozoneManager, 1); + assertNull(getOmMetadataManager().getSnapshotInfoTable().get(key)); + omSnapshotCreateRequest.validateAndUpdateCache(getOzoneManager(), 1); - assertNotNull(omMetadataManager.getSnapshotInfoTable().get(key)); + assertNotNull(getOmMetadataManager().getSnapshotInfoTable().get(key)); // Now try to create again to verify error - omRequest = createSnapshotRequest(volumeName, bucketName, snapshotName1); + omRequest = createSnapshotRequest(getVolumeName(), getBucketName(), snapshotName1); omSnapshotCreateRequest = doPreExecute(omRequest); OMClientResponse omClientResponse = - omSnapshotCreateRequest.validateAndUpdateCache(ozoneManager, 2); + omSnapshotCreateRequest.validateAndUpdateCache(getOzoneManager(), 2); OMResponse omResponse = omClientResponse.getOMResponse(); assertNotNull(omResponse.getCreateSnapshotResponse()); assertEquals(OzoneManagerProtocolProtos.Status.FILE_ALREADY_EXISTS, omResponse.getStatus()); - assertEquals(1, omMetrics.getNumSnapshotCreateFails()); - assertEquals(1, omMetrics.getNumSnapshotActive()); - assertEquals(2, omMetrics.getNumSnapshotCreates()); + assertEquals(1, getOmMetrics().getNumSnapshotCreateFails()); + assertEquals(1, getOmMetrics().getNumSnapshotActive()); + assertEquals(2, getOmMetrics().getNumSnapshotCreates()); } private void renameKey(String fromKey, String toKey, long offset) @@ -316,15 +256,15 @@ private void renameKey(String fromKey, String toKey, long offset) new OMKeyRenameResponse(omResponse, fromKeyInfo.getKeyName(), toKeyInfo.getKeyName(), toKeyInfo); - omKeyRenameResponse.addToDBBatch(omMetadataManager, batchOperation); - omMetadataManager.getStore().commitBatchOperation(batchOperation); + omKeyRenameResponse.addToDBBatch(getOmMetadataManager(), getBatchOperation()); + getOmMetadataManager().getStore().commitBatchOperation(getBatchOperation()); } private void renameDir(String fromKey, String toKey, long offset) throws Exception { String fromKeyParentName = UUID.randomUUID().toString(); - OmKeyInfo fromKeyParent = OMRequestTestUtils.createOmKeyInfo(volumeName, - bucketName, fromKeyParentName, RatisReplicationConfig.getInstance(THREE)) + OmKeyInfo fromKeyParent = OMRequestTestUtils.createOmKeyInfo(getVolumeName(), + getBucketName(), fromKeyParentName, RatisReplicationConfig.getInstance(THREE)) .setObjectID(100L) .build(); @@ -342,32 +282,32 @@ private void renameDir(String fromKey, String toKey, long offset) new OMKeyRenameResponseWithFSO(omResponse, getDBKeyName(fromKeyInfo), getDBKeyName(toKeyInfo), fromKeyParent, null, toKeyInfo, null, true, BucketLayout.FILE_SYSTEM_OPTIMIZED); - omKeyRenameResponse.addToDBBatch(omMetadataManager, batchOperation); - omMetadataManager.getStore().commitBatchOperation(batchOperation); + omKeyRenameResponse.addToDBBatch(getOmMetadataManager(), getBatchOperation()); + getOmMetadataManager().getStore().commitBatchOperation(getBatchOperation()); } protected String getDBKeyName(OmKeyInfo keyInfo) throws IOException { - return omMetadataManager.getOzonePathKey( - omMetadataManager.getVolumeId(volumeName), - omMetadataManager.getBucketId(volumeName, bucketName), + return getOmMetadataManager().getOzonePathKey( + getOmMetadataManager().getVolumeId(getVolumeName()), + getOmMetadataManager().getBucketId(getVolumeName(), getBucketName()), keyInfo.getParentObjectID(), keyInfo.getKeyName()); } private void createSnapshot(String snapName) throws Exception { OMRequest omRequest = createSnapshotRequest( - volumeName, bucketName, snapName); + getVolumeName(), getBucketName(), snapName); OMSnapshotCreateRequest omSnapshotCreateRequest = doPreExecute(omRequest); //create entry OMClientResponse omClientResponse = - omSnapshotCreateRequest.validateAndUpdateCache(ozoneManager, 1); - omClientResponse.checkAndUpdateDB(omMetadataManager, batchOperation); - omMetadataManager.getStore().commitBatchOperation(batchOperation); + omSnapshotCreateRequest.validateAndUpdateCache(getOzoneManager(), 1); + omClientResponse.checkAndUpdateDB(getOmMetadataManager(), getBatchOperation()); + getOmMetadataManager().getStore().commitBatchOperation(getBatchOperation()); } private OMSnapshotCreateRequest doPreExecute( OMRequest originalRequest) throws Exception { - return doPreExecute(originalRequest, ozoneManager); + return doPreExecute(originalRequest, getOzoneManager()); } /** @@ -384,15 +324,15 @@ public static OMSnapshotCreateRequest doPreExecute( } private OmKeyInfo addKey(String keyName, long objectId) { - return OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, keyName, + return OMRequestTestUtils.createOmKeyInfo(getVolumeName(), getBucketName(), keyName, RatisReplicationConfig.getInstance(THREE)).setObjectID(objectId) .build(); } protected String addKeyToTable(OmKeyInfo keyInfo) throws Exception { OMRequestTestUtils.addKeyToTable(false, true, keyInfo, 0, 0L, - omMetadataManager); - return omMetadataManager.getOzoneKey(keyInfo.getVolumeName(), + getOmMetadataManager()); + return getOmMetadataManager().getOzoneKey(keyInfo.getVolumeName(), keyInfo.getBucketName(), keyInfo.getKeyName()); } } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/snapshot/TestOMSnapshotDeleteRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/snapshot/TestOMSnapshotDeleteRequest.java index 5a8bb5d7c0d0..4c5dc2e77f0f 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/snapshot/TestOMSnapshotDeleteRequest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/snapshot/TestOMSnapshotDeleteRequest.java @@ -19,33 +19,21 @@ package org.apache.hadoop.ozone.om.request.snapshot; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.utils.db.cache.CacheKey; import org.apache.hadoop.hdds.utils.db.cache.CacheValue; -import org.apache.hadoop.ozone.audit.AuditLogger; -import org.apache.hadoop.ozone.audit.AuditMessage; -import org.apache.hadoop.ozone.om.OMConfigKeys; -import org.apache.hadoop.ozone.om.OMMetrics; -import org.apache.hadoop.ozone.om.OmMetadataManagerImpl; -import org.apache.hadoop.ozone.om.OmSnapshotManager; -import org.apache.hadoop.ozone.om.OzoneManager; import org.apache.hadoop.ozone.om.exceptions.OMException; import org.apache.hadoop.ozone.om.helpers.SnapshotInfo; -import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; import org.apache.hadoop.ozone.om.response.OMClientResponse; -import org.apache.hadoop.ozone.om.upgrade.OMLayoutVersionManager; +import org.apache.hadoop.ozone.om.snapshot.TestSnapshotRequestAndResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Status; import org.apache.hadoop.util.Time; -import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; -import org.junit.jupiter.api.io.TempDir; import org.junit.jupiter.params.ParameterizedTest; import org.junit.jupiter.params.provider.ValueSource; -import java.io.File; import java.util.UUID; import static org.apache.hadoop.ozone.om.helpers.SnapshotInfo.SnapshotStatus.SNAPSHOT_ACTIVE; @@ -61,10 +49,6 @@ import static org.junit.jupiter.api.Assertions.assertThrows; import static org.junit.jupiter.api.Assertions.assertTrue; import static org.mockito.Mockito.any; -import static org.mockito.Mockito.anyString; -import static org.mockito.Mockito.doNothing; -import static org.mockito.Mockito.framework; -import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; /** @@ -72,60 +56,15 @@ * Mostly mirrors TestOMSnapshotCreateRequest. * testEntryNotExist() and testEntryExists() are unique. */ -public class TestOMSnapshotDeleteRequest { - @TempDir - private File folder; +public class TestOMSnapshotDeleteRequest extends TestSnapshotRequestAndResponse { - private OzoneManager ozoneManager; - private OMMetrics omMetrics; - private OmMetadataManagerImpl omMetadataManager; - - private String volumeName; - private String bucketName; private String snapshotName; @BeforeEach public void setup() throws Exception { - ozoneManager = mock(OzoneManager.class); - omMetrics = OMMetrics.create(); - OzoneConfiguration ozoneConfiguration = new OzoneConfiguration(); - ozoneConfiguration.set(OMConfigKeys.OZONE_OM_DB_DIRS, - folder.getAbsolutePath()); - omMetadataManager = new OmMetadataManagerImpl(ozoneConfiguration, - ozoneManager); - when(ozoneManager.getMetrics()).thenReturn(omMetrics); - when(ozoneManager.getMetadataManager()).thenReturn(omMetadataManager); - when(ozoneManager.isRatisEnabled()).thenReturn(true); - when(ozoneManager.isFilesystemSnapshotEnabled()).thenReturn(true); - when(ozoneManager.isAdmin(any())).thenReturn(false); - when(ozoneManager.isOwner(any(), any())).thenReturn(false); - when(ozoneManager.getBucketOwner(any(), any(), - any(), any())).thenReturn("dummyBucketOwner"); - OMLayoutVersionManager lvm = mock(OMLayoutVersionManager.class); - when(lvm.isAllowed(anyString())).thenReturn(true); - when(ozoneManager.getVersionManager()).thenReturn(lvm); - AuditLogger auditLogger = mock(AuditLogger.class); - when(ozoneManager.getAuditLogger()).thenReturn(auditLogger); - doNothing().when(auditLogger).logWrite(any(AuditMessage.class)); - - OmSnapshotManager omSnapshotManager = mock(OmSnapshotManager.class); - when(ozoneManager.getOmSnapshotManager()).thenReturn(omSnapshotManager); - - volumeName = UUID.randomUUID().toString(); - bucketName = UUID.randomUUID().toString(); snapshotName = UUID.randomUUID().toString(); - OMRequestTestUtils.addVolumeAndBucketToDB( - volumeName, bucketName, omMetadataManager); - } - @AfterEach - public void stop() { - omMetrics.unRegister(); - framework().clearInlineMocks(); - } - - @ValueSource(strings = { // '-' is allowed. "9cdf0e8a-6946-41ad-a2d1-9eb724fab126", @@ -136,9 +75,9 @@ public void stop() { }) @ParameterizedTest public void testPreExecute(String deleteSnapshotName) throws Exception { - when(ozoneManager.isOwner(any(), any())).thenReturn(true); - OMRequest omRequest = deleteSnapshotRequest(volumeName, - bucketName, deleteSnapshotName); + when(getOzoneManager().isOwner(any(), any())).thenReturn(true); + OMRequest omRequest = deleteSnapshotRequest(getVolumeName(), + getBucketName(), deleteSnapshotName); doPreExecute(omRequest); } @@ -154,9 +93,9 @@ public void testPreExecute(String deleteSnapshotName) throws Exception { }) @ParameterizedTest public void testPreExecuteFailure(String deleteSnapshotName) { - when(ozoneManager.isOwner(any(), any())).thenReturn(true); - OMRequest omRequest = deleteSnapshotRequest(volumeName, - bucketName, deleteSnapshotName); + when(getOzoneManager().isOwner(any(), any())).thenReturn(true); + OMRequest omRequest = deleteSnapshotRequest(getVolumeName(), + getBucketName(), deleteSnapshotName); OMException omException = assertThrows(OMException.class, () -> doPreExecute(omRequest)); assertTrue(omException.getMessage() @@ -166,8 +105,8 @@ public void testPreExecuteFailure(String deleteSnapshotName) { @Test public void testPreExecuteBadOwner() { // Owner is not set for the request. - OMRequest omRequest = deleteSnapshotRequest(volumeName, - bucketName, snapshotName); + OMRequest omRequest = deleteSnapshotRequest(getVolumeName(), + getBucketName(), snapshotName); OMException omException = assertThrows(OMException.class, () -> doPreExecute(omRequest)); @@ -177,27 +116,27 @@ public void testPreExecuteBadOwner() { @Test public void testValidateAndUpdateCache() throws Exception { - when(ozoneManager.isAdmin(any())).thenReturn(true); + when(getOzoneManager().isAdmin(any())).thenReturn(true); OMRequest omRequest = - deleteSnapshotRequest(volumeName, bucketName, snapshotName); + deleteSnapshotRequest(getVolumeName(), getBucketName(), snapshotName); OMSnapshotDeleteRequest omSnapshotDeleteRequest = doPreExecute(omRequest); - String key = SnapshotInfo.getTableKey(volumeName, bucketName, snapshotName); + String key = SnapshotInfo.getTableKey(getVolumeName(), getBucketName(), snapshotName); // As we have not still called validateAndUpdateCache, get() should // return null. - assertNull(omMetadataManager.getSnapshotInfoTable().get(key)); + assertNull(getOmMetadataManager().getSnapshotInfoTable().get(key)); // add key to cache - SnapshotInfo snapshotInfo = SnapshotInfo.newInstance(volumeName, bucketName, + SnapshotInfo snapshotInfo = SnapshotInfo.newInstance(getVolumeName(), getBucketName(), snapshotName, null, Time.now()); assertEquals(SNAPSHOT_ACTIVE, snapshotInfo.getSnapshotStatus()); - omMetadataManager.getSnapshotInfoTable().addCacheEntry( + getOmMetadataManager().getSnapshotInfoTable().addCacheEntry( new CacheKey<>(key), CacheValue.get(1L, snapshotInfo)); // Trigger validateAndUpdateCache OMClientResponse omClientResponse = - omSnapshotDeleteRequest.validateAndUpdateCache(ozoneManager, 2L); + omSnapshotDeleteRequest.validateAndUpdateCache(getOzoneManager(), 2L); OMResponse omResponse = omClientResponse.getOMResponse(); assertNotNull(omResponse); @@ -207,14 +146,14 @@ public void testValidateAndUpdateCache() throws Exception { assertEquals(OK, omResponse.getStatus()); // check cache - snapshotInfo = omMetadataManager.getSnapshotInfoTable().get(key); + snapshotInfo = getOmMetadataManager().getSnapshotInfoTable().get(key); assertNotNull(snapshotInfo); assertEquals(SNAPSHOT_DELETED, snapshotInfo.getSnapshotStatus()); - assertEquals(0, omMetrics.getNumSnapshotCreates()); + assertEquals(0, getOmMetrics().getNumSnapshotCreates()); // Expected -1 because no snapshot was created before. - assertEquals(-1, omMetrics.getNumSnapshotActive()); - assertEquals(1, omMetrics.getNumSnapshotDeleted()); - assertEquals(0, omMetrics.getNumSnapshotDeleteFails()); + assertEquals(-1, getOmMetrics().getNumSnapshotActive()); + assertEquals(1, getOmMetrics().getNumSnapshotDeleted()); + assertEquals(0, getOmMetrics().getNumSnapshotDeleteFails()); } /** @@ -222,25 +161,25 @@ public void testValidateAndUpdateCache() throws Exception { */ @Test public void testEntryNotExist() throws Exception { - when(ozoneManager.isAdmin(any())).thenReturn(true); + when(getOzoneManager().isAdmin(any())).thenReturn(true); OMRequest omRequest = deleteSnapshotRequest( - volumeName, bucketName, snapshotName); + getVolumeName(), getBucketName(), snapshotName); OMSnapshotDeleteRequest omSnapshotDeleteRequest = doPreExecute(omRequest); - String key = SnapshotInfo.getTableKey(volumeName, bucketName, snapshotName); + String key = SnapshotInfo.getTableKey(getVolumeName(), getBucketName(), snapshotName); // Entry does not exist - assertNull(omMetadataManager.getSnapshotInfoTable().get(key)); + assertNull(getOmMetadataManager().getSnapshotInfoTable().get(key)); // Trigger delete snapshot validateAndUpdateCache OMClientResponse omClientResponse = - omSnapshotDeleteRequest.validateAndUpdateCache(ozoneManager, 1L); + omSnapshotDeleteRequest.validateAndUpdateCache(getOzoneManager(), 1L); OMResponse omResponse = omClientResponse.getOMResponse(); assertNotNull(omResponse.getDeleteSnapshotResponse()); assertEquals(Status.FILE_NOT_FOUND, omResponse.getStatus()); - assertEquals(0, omMetrics.getNumSnapshotActive()); - assertEquals(0, omMetrics.getNumSnapshotDeleted()); - assertEquals(1, omMetrics.getNumSnapshotDeleteFails()); + assertEquals(0, getOmMetrics().getNumSnapshotActive()); + assertEquals(0, getOmMetrics().getNumSnapshotDeleted()); + assertEquals(1, getOmMetrics().getNumSnapshotDeleteFails()); } /** @@ -249,50 +188,50 @@ public void testEntryNotExist() throws Exception { */ @Test public void testEntryExist() throws Exception { - when(ozoneManager.isAdmin(any())).thenReturn(true); - String key = SnapshotInfo.getTableKey(volumeName, bucketName, snapshotName); + when(getOzoneManager().isAdmin(any())).thenReturn(true); + String key = SnapshotInfo.getTableKey(getVolumeName(), getBucketName(), snapshotName); OMRequest omRequest1 = - createSnapshotRequest(volumeName, bucketName, snapshotName); + createSnapshotRequest(getVolumeName(), getBucketName(), snapshotName); OMSnapshotCreateRequest omSnapshotCreateRequest = - TestOMSnapshotCreateRequest.doPreExecute(omRequest1, ozoneManager); + TestOMSnapshotCreateRequest.doPreExecute(omRequest1, getOzoneManager()); - assertNull(omMetadataManager.getSnapshotInfoTable().get(key)); + assertNull(getOmMetadataManager().getSnapshotInfoTable().get(key)); // Create snapshot entry - omSnapshotCreateRequest.validateAndUpdateCache(ozoneManager, 1L); + omSnapshotCreateRequest.validateAndUpdateCache(getOzoneManager(), 1L); SnapshotInfo snapshotInfo = - omMetadataManager.getSnapshotInfoTable().get(key); + getOmMetadataManager().getSnapshotInfoTable().get(key); assertNotNull(snapshotInfo); assertEquals(SNAPSHOT_ACTIVE, snapshotInfo.getSnapshotStatus()); - assertEquals(1, omMetrics.getNumSnapshotActive()); + assertEquals(1, getOmMetrics().getNumSnapshotActive()); OMRequest omRequest2 = - deleteSnapshotRequest(volumeName, bucketName, snapshotName); + deleteSnapshotRequest(getVolumeName(), getBucketName(), snapshotName); OMSnapshotDeleteRequest omSnapshotDeleteRequest = doPreExecute(omRequest2); // Delete snapshot entry OMClientResponse omClientResponse = - omSnapshotDeleteRequest.validateAndUpdateCache(ozoneManager, 2L); + omSnapshotDeleteRequest.validateAndUpdateCache(getOzoneManager(), 2L); // Response should be successful OMResponse omResponse = omClientResponse.getOMResponse(); assertNotNull(omResponse); assertNotNull(omResponse.getDeleteSnapshotResponse()); assertEquals(OK, omResponse.getStatus()); - snapshotInfo = omMetadataManager.getSnapshotInfoTable().get(key); + snapshotInfo = getOmMetadataManager().getSnapshotInfoTable().get(key); // The snapshot entry should still exist in the table, // but marked as DELETED. assertNotNull(snapshotInfo); assertEquals(SNAPSHOT_DELETED, snapshotInfo.getSnapshotStatus()); assertThat(snapshotInfo.getDeletionTime()).isGreaterThan(0L); - assertEquals(0, omMetrics.getNumSnapshotActive()); + assertEquals(0, getOmMetrics().getNumSnapshotActive()); // Now delete snapshot entry again, expect error. - omRequest2 = deleteSnapshotRequest(volumeName, bucketName, snapshotName); + omRequest2 = deleteSnapshotRequest(getVolumeName(), getBucketName(), snapshotName); omSnapshotDeleteRequest = doPreExecute(omRequest2); omClientResponse = - omSnapshotDeleteRequest.validateAndUpdateCache(ozoneManager, 3L); + omSnapshotDeleteRequest.validateAndUpdateCache(getOzoneManager(), 3L); omResponse = omClientResponse.getOMResponse(); assertNotNull(omResponse); @@ -300,11 +239,11 @@ public void testEntryExist() throws Exception { assertEquals(Status.FILE_NOT_FOUND, omResponse.getStatus()); // Snapshot entry should still be there. - snapshotInfo = omMetadataManager.getSnapshotInfoTable().get(key); + snapshotInfo = getOmMetadataManager().getSnapshotInfoTable().get(key); assertNotNull(snapshotInfo); assertEquals(SNAPSHOT_DELETED, snapshotInfo.getSnapshotStatus()); - assertEquals(0, omMetrics.getNumSnapshotActive()); - assertEquals(1, omMetrics.getNumSnapshotDeleteFails()); + assertEquals(0, getOmMetrics().getNumSnapshotActive()); + assertEquals(1, getOmMetrics().getNumSnapshotDeleteFails()); } private OMSnapshotDeleteRequest doPreExecute( @@ -313,7 +252,7 @@ private OMSnapshotDeleteRequest doPreExecute( new OMSnapshotDeleteRequest(originalRequest); OMRequest modifiedRequest = - omSnapshotDeleteRequest.preExecute(ozoneManager); + omSnapshotDeleteRequest.preExecute(getOzoneManager()); return new OMSnapshotDeleteRequest(modifiedRequest); } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/snapshot/TestOMSnapshotMoveTableKeysRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/snapshot/TestOMSnapshotMoveTableKeysRequest.java new file mode 100644 index 000000000000..247f322dfcfe --- /dev/null +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/snapshot/TestOMSnapshotMoveTableKeysRequest.java @@ -0,0 +1,264 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package org.apache.hadoop.ozone.om.request.snapshot; + +import org.apache.commons.lang3.tuple.Pair; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos; +import org.apache.hadoop.ozone.om.exceptions.OMException; +import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; +import org.apache.hadoop.ozone.om.helpers.SnapshotInfo; +import org.apache.hadoop.ozone.om.response.OMClientResponse; +import org.apache.hadoop.ozone.om.snapshot.SnapshotUtils; +import org.apache.hadoop.ozone.om.snapshot.TestSnapshotRequestAndResponse; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; +import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; + +import java.util.Collections; +import java.util.List; +import java.util.UUID; +import java.util.stream.Collectors; +import java.util.stream.Stream; + +import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.INVALID_KEY_NAME; +import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.INVALID_REQUEST; +import static org.apache.hadoop.ozone.om.request.OMRequestTestUtils.addVolumeAndBucketToDB; +import static org.apache.hadoop.ozone.om.request.OMRequestTestUtils.deleteSnapshotRequest; +import static org.apache.hadoop.ozone.om.request.OMRequestTestUtils.moveSnapshotTableKeyRequest; + +/** + * Class to test OmSnapshotMoveTableKeyRequest. + */ +public class TestOMSnapshotMoveTableKeysRequest extends TestSnapshotRequestAndResponse { + + private String snapshotName1; + private String snapshotName2; + private SnapshotInfo snapshotInfo1; + private SnapshotInfo snapshotInfo2; + + @BeforeEach + public void setup() throws Exception { + snapshotName1 = UUID.randomUUID().toString(); + snapshotName2 = UUID.randomUUID().toString(); + } + + public TestOMSnapshotMoveTableKeysRequest() { + super(true); + } + + private void createSnapshots(boolean createSecondSnapshot) throws Exception { + createSnapshotCheckpoint(getVolumeName(), getBucketName(), snapshotName1); + snapshotInfo1 = SnapshotUtils.getSnapshotInfo(getOzoneManager(), getVolumeName(), getBucketName(), snapshotName1); + if (createSecondSnapshot) { + createSnapshotCheckpoint(getVolumeName(), getBucketName(), snapshotName2); + snapshotInfo2 = SnapshotUtils.getSnapshotInfo(getOzoneManager(), getVolumeName(), getBucketName(), snapshotName2); + } + } + + private SnapshotInfo deleteSnapshot(SnapshotInfo snapshotInfo, long transactionIndex) throws Exception { + OzoneManagerProtocolProtos.OMRequest omRequest = deleteSnapshotRequest(snapshotInfo.getVolumeName(), + snapshotInfo.getBucketName(), snapshotInfo.getName()); + OMSnapshotDeleteRequest omSnapshotDeleteRequest = new OMSnapshotDeleteRequest(omRequest); + omSnapshotDeleteRequest.preExecute(getOzoneManager()); + omSnapshotDeleteRequest.validateAndUpdateCache(getOzoneManager(), transactionIndex); + return SnapshotUtils.getSnapshotInfo(getOzoneManager(), snapshotInfo.getTableKey()); + } + + @Test + public void testValidateAndUpdateCacheWithNextSnapshotInactive() throws Exception { + createSnapshots(true); + snapshotInfo2 = deleteSnapshot(snapshotInfo2, 0); + OzoneManagerProtocolProtos.OMRequest omRequest = moveSnapshotTableKeyRequest(snapshotInfo1.getSnapshotId(), + Collections.emptyList(), Collections.emptyList(), Collections.emptyList()); + OMSnapshotMoveTableKeysRequest omSnapshotMoveTableKeysRequest = new OMSnapshotMoveTableKeysRequest(omRequest); + omSnapshotMoveTableKeysRequest = new OMSnapshotMoveTableKeysRequest( + omSnapshotMoveTableKeysRequest.preExecute(getOzoneManager())); + OMClientResponse omClientResponse = omSnapshotMoveTableKeysRequest.validateAndUpdateCache(getOzoneManager(), 1); + Assertions.assertFalse(omClientResponse.getOMResponse().getSuccess()); + Assertions.assertEquals(OzoneManagerProtocolProtos.Status.INVALID_SNAPSHOT_ERROR, + omClientResponse.getOMResponse().getStatus()); + } + + @Test + public void testPreExecuteWithInvalidDeletedKeyPrefix() throws Exception { + createSnapshots(true); + String invalidVolumeName = UUID.randomUUID().toString(); + String invalidBucketName = UUID.randomUUID().toString(); + addVolumeAndBucketToDB(invalidVolumeName, invalidBucketName, getOmMetadataManager()); + List>> deletedKeys = + Stream.of(getDeletedKeys(getVolumeName(), getBucketName(), 0, 10, 10, 0), + getDeletedKeys(invalidVolumeName, invalidBucketName, 0, 10, 10, 0)) + .flatMap(List::stream).collect(Collectors.toList()); + OzoneManagerProtocolProtos.OMRequest omRequest = moveSnapshotTableKeyRequest(snapshotInfo1.getSnapshotId(), + deletedKeys, Collections.emptyList(), Collections.emptyList()); + OMSnapshotMoveTableKeysRequest omSnapshotMoveTableKeysRequest = new OMSnapshotMoveTableKeysRequest(omRequest); + OMException omException = Assertions.assertThrows(OMException.class, + () -> omSnapshotMoveTableKeysRequest.preExecute(getOzoneManager())); + Assertions.assertEquals(INVALID_KEY_NAME, omException.getResult()); + } + + @Test + public void testPreExecuteWithInvalidDeletedDirPrefix() throws Exception { + createSnapshots(true); + String invalidVolumeName = UUID.randomUUID().toString(); + String invalidBucketName = UUID.randomUUID().toString(); + addVolumeAndBucketToDB(invalidVolumeName, invalidBucketName, getOmMetadataManager()); + List>> deletedDirs = + Stream.of(getDeletedDirKeys(getVolumeName(), getBucketName(), 0, 10, 1), + getDeletedDirKeys(invalidVolumeName, invalidBucketName, 0, 10, 1)) + .flatMap(List::stream).collect(Collectors.toList()); + OzoneManagerProtocolProtos.OMRequest omRequest = moveSnapshotTableKeyRequest(snapshotInfo1.getSnapshotId(), + Collections.emptyList(), deletedDirs, Collections.emptyList()); + OMSnapshotMoveTableKeysRequest omSnapshotMoveTableKeysRequest = new OMSnapshotMoveTableKeysRequest(omRequest); + OMException omException = Assertions.assertThrows(OMException.class, + () -> omSnapshotMoveTableKeysRequest.preExecute(getOzoneManager())); + Assertions.assertEquals(INVALID_KEY_NAME, omException.getResult()); + } + + @Test + public void testPreExecuteWithInvalidNumberKeys() throws Exception { + createSnapshots(true); + String invalidVolumeName = UUID.randomUUID().toString(); + String invalidBucketName = UUID.randomUUID().toString(); + addVolumeAndBucketToDB(invalidVolumeName, invalidBucketName, getOmMetadataManager()); + List>> deletedDirs = + Stream.of(getDeletedDirKeys(getVolumeName(), getBucketName(), 0, 10, 1), + getDeletedDirKeys(invalidVolumeName, invalidBucketName, 0, 10, 10)) + .flatMap(List::stream).collect(Collectors.toList()); + List>> deletedKeys = + Stream.of(getDeletedKeys(getVolumeName(), getBucketName(), 0, 10, 10, 0), + getDeletedKeys(invalidVolumeName, invalidBucketName, 0, 10, 0, 0)) + .flatMap(List::stream).collect(Collectors.toList()); + List> renameKeys = getRenameKeys(getVolumeName(), getBucketName(), 0, 10, snapshotName1); + renameKeys.add(Pair.of(getOmMetadataManager().getRenameKey(getVolumeName(), getBucketName(), 11), null)); + OzoneManagerProtocolProtos.OMRequest omRequest = moveSnapshotTableKeyRequest(snapshotInfo1.getSnapshotId(), + deletedKeys, deletedDirs, renameKeys); + OMSnapshotMoveTableKeysRequest omSnapshotMoveTableKeysRequest = new OMSnapshotMoveTableKeysRequest(omRequest); + omRequest = omSnapshotMoveTableKeysRequest.preExecute(getOzoneManager()); + for (OzoneManagerProtocolProtos.SnapshotMoveKeyInfos deletedDir : + omRequest.getSnapshotMoveTableKeysRequest().getDeletedDirsList()) { + Assertions.assertEquals(1, deletedDir.getKeyInfosList().size()); + } + + for (OzoneManagerProtocolProtos.SnapshotMoveKeyInfos deletedKey : + omRequest.getSnapshotMoveTableKeysRequest().getDeletedKeysList()) { + Assertions.assertNotEquals(0, deletedKey.getKeyInfosList().size()); + } + + for (HddsProtos.KeyValue renameKey : omRequest.getSnapshotMoveTableKeysRequest().getRenamedKeysList()) { + Assertions.assertTrue(renameKey.hasKey() && renameKey.hasValue()); + } + + } + + @Test + public void testPreExecuteWithInvalidRenamePrefix() throws Exception { + createSnapshots(true); + String invalidVolumeName = UUID.randomUUID().toString(); + String invalidBucketName = UUID.randomUUID().toString(); + addVolumeAndBucketToDB(invalidVolumeName, invalidBucketName, getOmMetadataManager()); + List> renameKeys = + Stream.of(getRenameKeys(getVolumeName(), getBucketName(), 0, 10, snapshotName1), + getRenameKeys(invalidVolumeName, invalidBucketName, 0, 10, snapshotName2)).flatMap(List::stream) + .collect(Collectors.toList()); + OzoneManagerProtocolProtos.OMRequest omRequest = moveSnapshotTableKeyRequest(snapshotInfo1.getSnapshotId(), + Collections.emptyList(), Collections.emptyList(), renameKeys); + OMSnapshotMoveTableKeysRequest omSnapshotMoveTableKeysRequest = new OMSnapshotMoveTableKeysRequest(omRequest); + OMException omException = Assertions.assertThrows(OMException.class, + () -> omSnapshotMoveTableKeysRequest.preExecute(getOzoneManager())); + Assertions.assertEquals(INVALID_KEY_NAME, omException.getResult()); + } + + @Test + public void testValidateAndUpdateCache() throws Exception { + createSnapshots(true); + String invalidVolumeName = UUID.randomUUID().toString(); + String invalidBucketName = UUID.randomUUID().toString(); + addVolumeAndBucketToDB(invalidVolumeName, invalidBucketName, getOmMetadataManager()); + List>> deletedKeys = getDeletedKeys(getVolumeName(), getBucketName(), 0, 10, 10, 0); + List>> deletedDirs = getDeletedDirKeys(getVolumeName(), getBucketName(), 0, 10, 1); + List> renameKeys = getRenameKeys(getVolumeName(), getBucketName(), 0, 10, snapshotName1); + OzoneManagerProtocolProtos.OMRequest omRequest = moveSnapshotTableKeyRequest(snapshotInfo1.getSnapshotId(), + deletedKeys, deletedDirs, renameKeys); + OMSnapshotMoveTableKeysRequest omSnapshotMoveTableKeysRequest = new OMSnapshotMoveTableKeysRequest(omRequest); + // perform preExecute. + omSnapshotMoveTableKeysRequest = new OMSnapshotMoveTableKeysRequest( + omSnapshotMoveTableKeysRequest.preExecute(getOzoneManager())); + OMClientResponse omClientResponse = omSnapshotMoveTableKeysRequest.validateAndUpdateCache(getOzoneManager(), 1); + Assertions.assertTrue(omClientResponse.getOMResponse().getSuccess()); + Assertions.assertEquals(OzoneManagerProtocolProtos.Status.OK, + omClientResponse.getOMResponse().getStatus()); + } + + @Test + public void testPreExecuteWithInvalidDuplicateDeletedKey() throws Exception { + createSnapshots(true); + String invalidVolumeName = UUID.randomUUID().toString(); + String invalidBucketName = UUID.randomUUID().toString(); + addVolumeAndBucketToDB(invalidVolumeName, invalidBucketName, getOmMetadataManager()); + List>> deletedKeys = + Stream.of(getDeletedKeys(getVolumeName(), getBucketName(), 0, 10, 10, 0), + getDeletedKeys(getVolumeName(), getBucketName(), 0, 10, 10, 0)).flatMap(List::stream) + .collect(Collectors.toList()); + OzoneManagerProtocolProtos.OMRequest omRequest = moveSnapshotTableKeyRequest(snapshotInfo1.getSnapshotId(), + deletedKeys, Collections.emptyList(), Collections.emptyList()); + OMSnapshotMoveTableKeysRequest omSnapshotMoveTableKeysRequest = new OMSnapshotMoveTableKeysRequest(omRequest); + OMException omException = Assertions.assertThrows(OMException.class, + () -> omSnapshotMoveTableKeysRequest.preExecute(getOzoneManager())); + Assertions.assertEquals(INVALID_REQUEST, omException.getResult()); + } + + @Test + public void testPreExecuteWithInvalidDuplicateDeletedDir() throws Exception { + createSnapshots(true); + String invalidVolumeName = UUID.randomUUID().toString(); + String invalidBucketName = UUID.randomUUID().toString(); + addVolumeAndBucketToDB(invalidVolumeName, invalidBucketName, getOmMetadataManager()); + List>> deletedDirs = + Stream.of(getDeletedDirKeys(getVolumeName(), getBucketName(), 0, 10, 1), + getDeletedDirKeys(getVolumeName(), getBucketName(), 0, 10, 1)).flatMap(List::stream) + .collect(Collectors.toList()); + OzoneManagerProtocolProtos.OMRequest omRequest = moveSnapshotTableKeyRequest(snapshotInfo1.getSnapshotId(), + Collections.emptyList(), deletedDirs, Collections.emptyList()); + OMSnapshotMoveTableKeysRequest omSnapshotMoveTableKeysRequest = new OMSnapshotMoveTableKeysRequest(omRequest); + OMException omException = Assertions.assertThrows(OMException.class, + () -> omSnapshotMoveTableKeysRequest.preExecute(getOzoneManager())); + Assertions.assertEquals(INVALID_REQUEST, omException.getResult()); + } + + @Test + public void testPreExecuteWithInvalidDuplicateRenameKey() throws Exception { + createSnapshots(true); + String invalidVolumeName = UUID.randomUUID().toString(); + String invalidBucketName = UUID.randomUUID().toString(); + addVolumeAndBucketToDB(invalidVolumeName, invalidBucketName, getOmMetadataManager()); + List> renameKeys = + Stream.of(getRenameKeys(getVolumeName(), getBucketName(), 0, 10, snapshotName1), + getRenameKeys(getVolumeName(), getBucketName(), 0, 10, snapshotName1)) + .flatMap(List::stream).collect(Collectors.toList()); + OzoneManagerProtocolProtos.OMRequest omRequest = moveSnapshotTableKeyRequest(snapshotInfo1.getSnapshotId(), + Collections.emptyList(), Collections.emptyList(), renameKeys); + OMSnapshotMoveTableKeysRequest omSnapshotMoveTableKeysRequest = new OMSnapshotMoveTableKeysRequest(omRequest); + OMException omException = Assertions.assertThrows(OMException.class, + () -> omSnapshotMoveTableKeysRequest.preExecute(getOzoneManager())); + Assertions.assertEquals(INVALID_REQUEST, omException.getResult()); + } +} diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/snapshot/TestOMSnapshotPurgeRequestAndResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/snapshot/TestOMSnapshotPurgeRequestAndResponse.java index d0a5559a87b8..1c44decdfdac 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/snapshot/TestOMSnapshotPurgeRequestAndResponse.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/snapshot/TestOMSnapshotPurgeRequestAndResponse.java @@ -20,43 +20,28 @@ package org.apache.hadoop.ozone.om.request.snapshot; import com.google.protobuf.ByteString; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.utils.TransactionInfo; import org.apache.hadoop.hdds.utils.db.BatchOperation; -import org.apache.hadoop.hdds.utils.db.RDBStore; import org.apache.hadoop.hdds.utils.db.Table; -import org.apache.hadoop.ozone.OzoneConfigKeys; -import org.apache.hadoop.ozone.audit.AuditLogger; -import org.apache.hadoop.ozone.om.IOmMetadataReader; -import org.apache.hadoop.ozone.om.OMConfigKeys; -import org.apache.hadoop.ozone.om.OMMetadataManager; -import org.apache.hadoop.ozone.om.OMMetrics; import org.apache.hadoop.ozone.om.OmMetadataManagerImpl; -import org.apache.hadoop.ozone.om.OmSnapshotManager; -import org.apache.hadoop.ozone.om.OzoneManager; import org.apache.hadoop.ozone.om.SnapshotChainManager; import org.apache.hadoop.ozone.om.helpers.SnapshotInfo; import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; -import org.apache.hadoop.ozone.om.response.snapshot.OMSnapshotCreateResponse; import org.apache.hadoop.ozone.om.response.snapshot.OMSnapshotPurgeResponse; -import org.apache.hadoop.ozone.om.snapshot.ReferenceCounted; -import org.apache.hadoop.ozone.om.upgrade.OMLayoutVersionManager; +import org.apache.hadoop.ozone.om.snapshot.TestSnapshotRequestAndResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.SnapshotPurgeRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Type; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; -import org.junit.jupiter.api.io.TempDir; import org.junit.jupiter.params.ParameterizedTest; import org.junit.jupiter.params.provider.Arguments; import org.junit.jupiter.params.provider.MethodSource; import org.junit.jupiter.params.provider.ValueSource; -import java.io.File; import java.io.IOException; import java.nio.file.Files; import java.nio.file.Path; -import java.nio.file.Paths; import java.util.ArrayList; import java.util.Collections; import java.util.HashMap; @@ -71,10 +56,8 @@ import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertFalse; import static org.junit.jupiter.api.Assertions.assertNotEquals; -import static org.junit.jupiter.api.Assertions.assertNotNull; import static org.junit.jupiter.api.Assertions.assertNull; import static org.junit.jupiter.api.Assertions.assertTrue; -import static org.mockito.Mockito.any; import static org.mockito.Mockito.anyString; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; @@ -82,49 +65,16 @@ /** * Tests OMSnapshotPurgeRequest class. */ -public class TestOMSnapshotPurgeRequestAndResponse { - private List checkpointPaths = new ArrayList<>(); - - private OzoneManager ozoneManager; - private OMMetrics omMetrics; - private OMMetadataManager omMetadataManager; - private OmSnapshotManager omSnapshotManager; - private AuditLogger auditLogger; - - private String volumeName; - private String bucketName; +public class TestOMSnapshotPurgeRequestAndResponse extends TestSnapshotRequestAndResponse { + private final List checkpointPaths = new ArrayList<>(); private String keyName; + public TestOMSnapshotPurgeRequestAndResponse() { + super(true); + } + @BeforeEach - void setup(@TempDir File testDir) throws Exception { - ozoneManager = mock(OzoneManager.class); - OMLayoutVersionManager lvm = mock(OMLayoutVersionManager.class); - when(lvm.isAllowed(anyString())).thenReturn(true); - when(ozoneManager.getVersionManager()).thenReturn(lvm); - when(ozoneManager.isRatisEnabled()).thenReturn(true); - auditLogger = mock(AuditLogger.class); - when(ozoneManager.getAuditLogger()).thenReturn(auditLogger); - omMetrics = OMMetrics.create(); - OzoneConfiguration ozoneConfiguration = new OzoneConfiguration(); - ozoneConfiguration.set(OMConfigKeys.OZONE_OM_DB_DIRS, - testDir.getAbsolutePath()); - ozoneConfiguration.set(OzoneConfigKeys.OZONE_METADATA_DIRS, - testDir.getAbsolutePath()); - omMetadataManager = new OmMetadataManagerImpl(ozoneConfiguration, - ozoneManager); - when(ozoneManager.getMetrics()).thenReturn(omMetrics); - when(ozoneManager.getMetadataManager()).thenReturn(omMetadataManager); - when(ozoneManager.getConfiguration()).thenReturn(ozoneConfiguration); - when(ozoneManager.isAdmin(any())).thenReturn(true); - when(ozoneManager.isFilesystemSnapshotEnabled()).thenReturn(true); - - ReferenceCounted rcOmMetadataReader = - mock(ReferenceCounted.class); - when(ozoneManager.getOmMetadataReader()).thenReturn(rcOmMetadataReader); - omSnapshotManager = new OmSnapshotManager(ozoneManager); - when(ozoneManager.getOmSnapshotManager()).thenReturn(omSnapshotManager); - volumeName = UUID.randomUUID().toString(); - bucketName = UUID.randomUUID().toString(); + public void setup() throws Exception { keyName = UUID.randomUUID().toString(); } @@ -135,17 +85,14 @@ private List createSnapshots(int numSnapshotKeys) throws Exception { Random random = new Random(); - // Add volume, bucket and key entries to OM DB. - OMRequestTestUtils.addVolumeAndBucketToDB(volumeName, bucketName, - omMetadataManager); // Create Snapshot and CheckpointDir List purgeSnapshots = new ArrayList<>(numSnapshotKeys); for (int i = 1; i <= numSnapshotKeys; i++) { String snapshotName = keyName + "-" + random.nextLong(); createSnapshotCheckpoint(snapshotName); - purgeSnapshots.add(SnapshotInfo.getTableKey(volumeName, - bucketName, snapshotName)); + purgeSnapshots.add(SnapshotInfo.getTableKey(getVolumeName(), + getBucketName(), snapshotName)); } return purgeSnapshots; @@ -175,39 +122,7 @@ private OMRequest createPurgeKeysRequest(List purgeSnapshotKeys) { * Create snapshot and checkpoint directory. */ private void createSnapshotCheckpoint(String snapshotName) throws Exception { - createSnapshotCheckpoint(volumeName, bucketName, snapshotName); - } - - private void createSnapshotCheckpoint(String volume, - String bucket, - String snapshotName) throws Exception { - OMRequest omRequest = OMRequestTestUtils - .createSnapshotRequest(volume, bucket, snapshotName); - // Pre-Execute OMSnapshotCreateRequest. - OMSnapshotCreateRequest omSnapshotCreateRequest = - TestOMSnapshotCreateRequest.doPreExecute(omRequest, ozoneManager); - - // validateAndUpdateCache OMSnapshotCreateResponse. - OMSnapshotCreateResponse omClientResponse = (OMSnapshotCreateResponse) - omSnapshotCreateRequest.validateAndUpdateCache(ozoneManager, 1); - // Add to batch and commit to DB. - try (BatchOperation batchOperation = omMetadataManager.getStore().initBatchOperation()) { - omClientResponse.addToDBBatch(omMetadataManager, batchOperation); - omMetadataManager.getStore().commitBatchOperation(batchOperation); - } - - String key = SnapshotInfo.getTableKey(volume, bucket, snapshotName); - SnapshotInfo snapshotInfo = - omMetadataManager.getSnapshotInfoTable().get(key); - assertNotNull(snapshotInfo); - - RDBStore store = (RDBStore) omMetadataManager.getStore(); - String checkpointPrefix = store.getDbLocation().getName(); - Path snapshotDirPath = Paths.get(store.getSnapshotsParentDir(), - checkpointPrefix + snapshotInfo.getCheckpointDir()); - // Check the DB is still there - assertTrue(Files.exists(snapshotDirPath)); - checkpointPaths.add(snapshotDirPath); + checkpointPaths.add(createSnapshotCheckpoint(getVolumeName(), getBucketName(), snapshotName)); } private OMSnapshotPurgeRequest preExecute(OMRequest originalOmRequest) @@ -215,7 +130,7 @@ private OMSnapshotPurgeRequest preExecute(OMRequest originalOmRequest) OMSnapshotPurgeRequest omSnapshotPurgeRequest = new OMSnapshotPurgeRequest(originalOmRequest); OMRequest modifiedOmRequest = omSnapshotPurgeRequest - .preExecute(ozoneManager); + .preExecute(getOzoneManager()); return new OMSnapshotPurgeRequest(modifiedOmRequest); } @@ -227,48 +142,48 @@ private void purgeSnapshots(OMRequest snapshotPurgeRequest) // validateAndUpdateCache for OMSnapshotPurgeRequest. OMSnapshotPurgeResponse omSnapshotPurgeResponse = (OMSnapshotPurgeResponse) - omSnapshotPurgeRequest.validateAndUpdateCache(ozoneManager, 200L); + omSnapshotPurgeRequest.validateAndUpdateCache(getOzoneManager(), 200L); // Commit to DB. - try (BatchOperation batchOperation = omMetadataManager.getStore().initBatchOperation()) { - omSnapshotPurgeResponse.checkAndUpdateDB(omMetadataManager, batchOperation); - omMetadataManager.getStore().commitBatchOperation(batchOperation); + try (BatchOperation batchOperation = getOmMetadataManager().getStore().initBatchOperation()) { + omSnapshotPurgeResponse.checkAndUpdateDB(getOmMetadataManager(), batchOperation); + getOmMetadataManager().getStore().commitBatchOperation(batchOperation); } } @Test public void testValidateAndUpdateCache() throws Exception { - long initialSnapshotPurgeCount = omMetrics.getNumSnapshotPurges(); - long initialSnapshotPurgeFailCount = omMetrics.getNumSnapshotPurgeFails(); + long initialSnapshotPurgeCount = getOmMetrics().getNumSnapshotPurges(); + long initialSnapshotPurgeFailCount = getOmMetrics().getNumSnapshotPurgeFails(); List snapshotDbKeysToPurge = createSnapshots(10); - assertFalse(omMetadataManager.getSnapshotInfoTable().isEmpty()); + assertFalse(getOmMetadataManager().getSnapshotInfoTable().isEmpty()); OMRequest snapshotPurgeRequest = createPurgeKeysRequest( snapshotDbKeysToPurge); OMSnapshotPurgeRequest omSnapshotPurgeRequest = preExecute(snapshotPurgeRequest); OMSnapshotPurgeResponse omSnapshotPurgeResponse = (OMSnapshotPurgeResponse) - omSnapshotPurgeRequest.validateAndUpdateCache(ozoneManager, 200L); + omSnapshotPurgeRequest.validateAndUpdateCache(getOzoneManager(), 200L); for (String snapshotTableKey: snapshotDbKeysToPurge) { - assertNull(omMetadataManager.getSnapshotInfoTable().get(snapshotTableKey)); + assertNull(getOmMetadataManager().getSnapshotInfoTable().get(snapshotTableKey)); } - try (BatchOperation batchOperation = omMetadataManager.getStore().initBatchOperation()) { - omSnapshotPurgeResponse.checkAndUpdateDB(omMetadataManager, batchOperation); - omMetadataManager.getStore().commitBatchOperation(batchOperation); + try (BatchOperation batchOperation = getOmMetadataManager().getStore().initBatchOperation()) { + omSnapshotPurgeResponse.checkAndUpdateDB(getOmMetadataManager(), batchOperation); + getOmMetadataManager().getStore().commitBatchOperation(batchOperation); } // Check if the entries are deleted. - assertTrue(omMetadataManager.getSnapshotInfoTable().isEmpty()); + assertTrue(getOmMetadataManager().getSnapshotInfoTable().isEmpty()); // Check if all the checkpoints are cleared. for (Path checkpoint : checkpointPaths) { assertFalse(Files.exists(checkpoint)); } - assertEquals(initialSnapshotPurgeCount + 1, omMetrics.getNumSnapshotPurges()); - assertEquals(initialSnapshotPurgeFailCount, omMetrics.getNumSnapshotPurgeFails()); + assertEquals(initialSnapshotPurgeCount + 1, getOmMetrics().getNumSnapshotPurges()); + assertEquals(initialSnapshotPurgeFailCount, getOmMetrics().getNumSnapshotPurgeFails()); } /** @@ -276,8 +191,8 @@ public void testValidateAndUpdateCache() throws Exception { */ @Test public void testValidateAndUpdateCacheFailure() throws Exception { - long initialSnapshotPurgeCount = omMetrics.getNumSnapshotPurges(); - long initialSnapshotPurgeFailCount = omMetrics.getNumSnapshotPurgeFails(); + long initialSnapshotPurgeCount = getOmMetrics().getNumSnapshotPurges(); + long initialSnapshotPurgeFailCount = getOmMetrics().getNumSnapshotPurgeFails(); List snapshotDbKeysToPurge = createSnapshots(10); @@ -286,17 +201,17 @@ public void testValidateAndUpdateCacheFailure() throws Exception { when(mockedSnapshotInfoTable.get(anyString())).thenThrow(new IOException("Injected fault error.")); when(mockedMetadataManager.getSnapshotInfoTable()).thenReturn(mockedSnapshotInfoTable); - when(ozoneManager.getMetadataManager()).thenReturn(mockedMetadataManager); + when(getOzoneManager().getMetadataManager()).thenReturn(mockedMetadataManager); OMRequest snapshotPurgeRequest = createPurgeKeysRequest(snapshotDbKeysToPurge); OMSnapshotPurgeRequest omSnapshotPurgeRequest = preExecute(snapshotPurgeRequest); OMSnapshotPurgeResponse omSnapshotPurgeResponse = (OMSnapshotPurgeResponse) - omSnapshotPurgeRequest.validateAndUpdateCache(ozoneManager, 200L); + omSnapshotPurgeRequest.validateAndUpdateCache(getOzoneManager(), 200L); assertEquals(INTERNAL_ERROR, omSnapshotPurgeResponse.getOMResponse().getStatus()); - assertEquals(initialSnapshotPurgeCount, omMetrics.getNumSnapshotPurges()); - assertEquals(initialSnapshotPurgeFailCount + 1, omMetrics.getNumSnapshotPurgeFails()); + assertEquals(initialSnapshotPurgeCount, getOmMetrics().getNumSnapshotPurges()); + assertEquals(initialSnapshotPurgeFailCount + 1, getOmMetrics().getNumSnapshotPurgeFails()); } // TODO: clean up: Do we this test after @@ -309,7 +224,7 @@ public void testSnapshotChainCleanup(int index) throws Exception { // Before purge, check snapshot chain OmMetadataManagerImpl metadataManager = - (OmMetadataManagerImpl) omMetadataManager; + (OmMetadataManagerImpl) getOmMetadataManager(); SnapshotChainManager chainManager = metadataManager .getSnapshotChainManager(); SnapshotInfo snapInfo = metadataManager.getSnapshotInfoTable() @@ -343,8 +258,8 @@ public void testSnapshotChainCleanup(int index) throws Exception { snapInfo.getSnapshotId()); } - long rowsInTableBeforePurge = omMetadataManager - .countRowsInTable(omMetadataManager.getSnapshotInfoTable()); + long rowsInTableBeforePurge = getOmMetadataManager() + .countRowsInTable(getOmMetadataManager().getSnapshotInfoTable()); // Purge Snapshot of the given index. List toPurgeList = Collections.singletonList(snapShotToPurge); OMRequest snapshotPurgeRequest = createPurgeKeysRequest( @@ -367,8 +282,8 @@ public void testSnapshotChainCleanup(int index) throws Exception { .getGlobalPreviousSnapshotId(), prevGlobalSnapId); } - assertNotEquals(rowsInTableBeforePurge, omMetadataManager - .countRowsInTable(omMetadataManager.getSnapshotInfoTable())); + assertNotEquals(rowsInTableBeforePurge, getOmMetadataManager() + .countRowsInTable(getOmMetadataManager().getSnapshotInfoTable())); } private static Stream snapshotPurgeCases() { @@ -422,14 +337,14 @@ public void testSnapshotChainInSnapshotInfoTableAfterSnapshotPurge( int toIndex, boolean createInBucketOrder) throws Exception { SnapshotChainManager chainManager = - ((OmMetadataManagerImpl) omMetadataManager).getSnapshotChainManager(); + ((OmMetadataManagerImpl) getOmMetadataManager()).getSnapshotChainManager(); int totalKeys = numberOfBuckets * numberOfKeysPerBucket; List buckets = new ArrayList<>(); for (int i = 0; i < numberOfBuckets; i++) { String bucketNameLocal = "bucket-" + UUID.randomUUID(); - OMRequestTestUtils.addVolumeAndBucketToDB(volumeName, bucketNameLocal, - omMetadataManager); + OMRequestTestUtils.addVolumeAndBucketToDB(getVolumeName(), bucketNameLocal, + getOmMetadataManager()); buckets.add(bucketNameLocal); } @@ -440,17 +355,17 @@ public void testSnapshotChainInSnapshotInfoTableAfterSnapshotPurge( int bucketIndex = createInBucketOrder ? i : j; String bucket = buckets.get(bucketIndex % numberOfBuckets); String snapshotName = UUID.randomUUID().toString(); - createSnapshotCheckpoint(volumeName, bucket, snapshotName); + createSnapshotCheckpoint(getVolumeName(), bucket, snapshotName); String snapshotTableKey = - SnapshotInfo.getTableKey(volumeName, bucket, snapshotName); + SnapshotInfo.getTableKey(getVolumeName(), bucket, snapshotName); SnapshotInfo snapshotInfo = - omMetadataManager.getSnapshotInfoTable().get(snapshotTableKey); + getOmMetadataManager().getSnapshotInfoTable().get(snapshotTableKey); snapshotInfoList.add(snapshotInfo); } } - long numberOfSnapshotBeforePurge = omMetadataManager - .countRowsInTable(omMetadataManager.getSnapshotInfoTable()); + long numberOfSnapshotBeforePurge = getOmMetadataManager() + .countRowsInTable(getOmMetadataManager().getSnapshotInfoTable()); assertEquals(totalKeys, numberOfSnapshotBeforePurge); assertEquals(totalKeys, chainManager.getGlobalSnapshotChain().size()); Map expectedTransactionInfos = new HashMap<>(); @@ -476,7 +391,7 @@ public void testSnapshotChainInSnapshotInfoTableAfterSnapshotPurge( expectedTransactionInfos.put(chainManager.nextPathSnapshot(purgeSnapshotInfo.getSnapshotPath(), snapId), expectedLastTransactionVal); } - String purgeSnapshotKey = SnapshotInfo.getTableKey(volumeName, + String purgeSnapshotKey = SnapshotInfo.getTableKey(getVolumeName(), purgeSnapshotInfo.getBucketName(), purgeSnapshotInfo.getName()); purgeSnapshotKeys.add(purgeSnapshotKey); @@ -489,17 +404,17 @@ public void testSnapshotChainInSnapshotInfoTableAfterSnapshotPurge( for (int i = 0; i < totalKeys; i++) { if (i < fromIndex || i > toIndex) { SnapshotInfo info = snapshotInfoList.get(i); - String snapshotKey = SnapshotInfo.getTableKey(volumeName, + String snapshotKey = SnapshotInfo.getTableKey(getVolumeName(), info.getBucketName(), info.getName()); snapshotInfoListAfterPurge.add( - omMetadataManager.getSnapshotInfoTable().get(snapshotKey)); + getOmMetadataManager().getSnapshotInfoTable().get(snapshotKey)); } } long expectNumberOfSnapshotAfterPurge = totalKeys - (toIndex - fromIndex + 1); - long actualNumberOfSnapshotAfterPurge = omMetadataManager - .countRowsInTable(omMetadataManager.getSnapshotInfoTable()); + long actualNumberOfSnapshotAfterPurge = getOmMetadataManager() + .countRowsInTable(getOmMetadataManager().getSnapshotInfoTable()); assertEquals(expectNumberOfSnapshotAfterPurge, actualNumberOfSnapshotAfterPurge); assertEquals(expectNumberOfSnapshotAfterPurge, chainManager @@ -516,7 +431,7 @@ private void validateSnapshotOrderInSnapshotInfoTableAndSnapshotChain( assertEquals(snapshotInfo.getLastTransactionInfo(), expectedTransactionInfos.get(snapshotInfo.getSnapshotId())); } OmMetadataManagerImpl metadataManager = - (OmMetadataManagerImpl) omMetadataManager; + (OmMetadataManagerImpl) getOmMetadataManager(); SnapshotChainManager chainManager = metadataManager .getSnapshotChainManager(); diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/snapshot/TestOMSnapshotRenameRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/snapshot/TestOMSnapshotRenameRequest.java index ab2bac1bd0e2..a746597288aa 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/snapshot/TestOMSnapshotRenameRequest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/snapshot/TestOMSnapshotRenameRequest.java @@ -17,17 +17,8 @@ package org.apache.hadoop.ozone.om.request.snapshot; import org.apache.hadoop.hdds.client.RatisReplicationConfig; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.utils.db.BatchOperation; import org.apache.hadoop.hdds.utils.db.cache.CacheKey; import org.apache.hadoop.hdds.utils.db.cache.CacheValue; -import org.apache.hadoop.ozone.OzoneConfigKeys; -import org.apache.hadoop.ozone.audit.AuditLogger; -import org.apache.hadoop.ozone.audit.AuditMessage; -import org.apache.hadoop.ozone.om.OMConfigKeys; -import org.apache.hadoop.ozone.om.OMMetrics; -import org.apache.hadoop.ozone.om.OmMetadataManagerImpl; -import org.apache.hadoop.ozone.om.OmSnapshotManager; import org.apache.hadoop.ozone.om.OzoneManager; import org.apache.hadoop.ozone.om.exceptions.OMException; import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; @@ -35,17 +26,14 @@ import org.apache.hadoop.ozone.om.helpers.SnapshotInfo; import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; import org.apache.hadoop.ozone.om.response.OMClientResponse; -import org.apache.hadoop.ozone.om.upgrade.OMLayoutVersionManager; +import org.apache.hadoop.ozone.om.snapshot.TestSnapshotRequestAndResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; import org.apache.hadoop.util.Time; -import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; -import org.junit.jupiter.api.io.TempDir; import org.junit.jupiter.params.ParameterizedTest; import org.junit.jupiter.params.provider.ValueSource; -import java.io.File; import java.util.UUID; import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.THREE; @@ -62,75 +50,19 @@ import static org.junit.jupiter.api.Assertions.assertThrows; import static org.junit.jupiter.api.Assertions.assertTrue; import static org.mockito.ArgumentMatchers.any; -import static org.mockito.ArgumentMatchers.anyString; -import static org.mockito.Mockito.doNothing; -import static org.mockito.Mockito.framework; -import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; /** * Tests OMSnapshotRenameRequest class, which handles RenameSnapshot request. */ -public class TestOMSnapshotRenameRequest { - - @TempDir - private File anotherTempDir; - - private OzoneManager ozoneManager; - private OMMetrics omMetrics; - private OmMetadataManagerImpl omMetadataManager; - private BatchOperation batchOperation; - - private String volumeName; - private String bucketName; +public class TestOMSnapshotRenameRequest extends TestSnapshotRequestAndResponse { private String snapshotName1; private String snapshotName2; @BeforeEach public void setup() throws Exception { - ozoneManager = mock(OzoneManager.class); - omMetrics = OMMetrics.create(); - OzoneConfiguration ozoneConfiguration = new OzoneConfiguration(); - ozoneConfiguration.set(OMConfigKeys.OZONE_OM_DB_DIRS, - anotherTempDir.getAbsolutePath()); - ozoneConfiguration.set(OzoneConfigKeys.OZONE_METADATA_DIRS, - anotherTempDir.getAbsolutePath()); - omMetadataManager = new OmMetadataManagerImpl(ozoneConfiguration, - ozoneManager); - when(ozoneManager.getMetrics()).thenReturn(omMetrics); - when(ozoneManager.getMetadataManager()).thenReturn(omMetadataManager); - when(ozoneManager.isRatisEnabled()).thenReturn(true); - when(ozoneManager.isFilesystemSnapshotEnabled()).thenReturn(true); - when(ozoneManager.isAdmin(any())).thenReturn(false); - when(ozoneManager.isOwner(any(), any())).thenReturn(false); - when(ozoneManager.getBucketOwner(any(), any(), - any(), any())).thenReturn("dummyBucketOwner"); - OMLayoutVersionManager lvm = mock(OMLayoutVersionManager.class); - when(lvm.isAllowed(anyString())).thenReturn(true); - when(ozoneManager.getVersionManager()).thenReturn(lvm); - AuditLogger auditLogger = mock(AuditLogger.class); - when(ozoneManager.getAuditLogger()).thenReturn(auditLogger); - doNothing().when(auditLogger).logWrite(any(AuditMessage.class)); - batchOperation = omMetadataManager.getStore().initBatchOperation(); - when(ozoneManager.getConfiguration()).thenReturn(ozoneConfiguration); - OmSnapshotManager omSnapshotManager = new OmSnapshotManager(ozoneManager); - when(ozoneManager.getOmSnapshotManager()).thenReturn(omSnapshotManager); - - volumeName = UUID.randomUUID().toString(); - bucketName = UUID.randomUUID().toString(); snapshotName1 = UUID.randomUUID().toString(); snapshotName2 = UUID.randomUUID().toString(); - OMRequestTestUtils.addVolumeAndBucketToDB(volumeName, bucketName, - omMetadataManager); - } - - @AfterEach - public void stop() { - omMetrics.unRegister(); - framework().clearInlineMocks(); - if (batchOperation != null) { - batchOperation.close(); - } } @ValueSource(strings = { @@ -143,11 +75,11 @@ public void stop() { }) @ParameterizedTest public void testPreExecute(String toSnapshotName) throws Exception { - when(ozoneManager.isOwner(any(), any())).thenReturn(true); + when(getOzoneManager().isOwner(any(), any())).thenReturn(true); String currentSnapshotName = "current"; - OzoneManagerProtocolProtos.OMRequest omRequest = renameSnapshotRequest(volumeName, - bucketName, currentSnapshotName, toSnapshotName); + OzoneManagerProtocolProtos.OMRequest omRequest = renameSnapshotRequest(getVolumeName(), + getBucketName(), currentSnapshotName, toSnapshotName); doPreExecute(omRequest); } @@ -167,10 +99,10 @@ public void testPreExecute(String toSnapshotName) throws Exception { }) @ParameterizedTest public void testPreExecuteFailure(String toSnapshotName) { - when(ozoneManager.isOwner(any(), any())).thenReturn(true); + when(getOzoneManager().isOwner(any(), any())).thenReturn(true); String currentSnapshotName = "current"; - OzoneManagerProtocolProtos.OMRequest omRequest = renameSnapshotRequest(volumeName, - bucketName, currentSnapshotName, toSnapshotName); + OzoneManagerProtocolProtos.OMRequest omRequest = renameSnapshotRequest(getVolumeName(), + getBucketName(), currentSnapshotName, toSnapshotName); OMException omException = assertThrows(OMException.class, () -> doPreExecute(omRequest)); assertTrue(omException.getMessage().contains("Invalid snapshot name: " + toSnapshotName)); @@ -179,8 +111,8 @@ public void testPreExecuteFailure(String toSnapshotName) { @Test public void testPreExecuteBadOwner() { // Owner is not set for the request. - OzoneManagerProtocolProtos.OMRequest omRequest = renameSnapshotRequest(volumeName, - bucketName, snapshotName1, snapshotName2); + OzoneManagerProtocolProtos.OMRequest omRequest = renameSnapshotRequest(getVolumeName(), + getBucketName(), snapshotName1, snapshotName2); OMException omException = assertThrows(OMException.class, () -> doPreExecute(omRequest)); @@ -190,39 +122,39 @@ public void testPreExecuteBadOwner() { @Test public void testValidateAndUpdateCache() throws Exception { - when(ozoneManager.isAdmin(any())).thenReturn(true); - OzoneManagerProtocolProtos.OMRequest omRequest = renameSnapshotRequest(volumeName, - bucketName, snapshotName1, snapshotName2); + when(getOzoneManager().isAdmin(any())).thenReturn(true); + OzoneManagerProtocolProtos.OMRequest omRequest = renameSnapshotRequest(getVolumeName(), + getBucketName(), snapshotName1, snapshotName2); OMSnapshotRenameRequest omSnapshotRenameRequest = doPreExecute(omRequest); - String key = getTableKey(volumeName, bucketName, snapshotName1); - String bucketKey = omMetadataManager.getBucketKey(volumeName, bucketName); + String key = getTableKey(getVolumeName(), getBucketName(), snapshotName1); + String bucketKey = getOmMetadataManager().getBucketKey(getVolumeName(), getBucketName()); // Add a 1000-byte key to the bucket OmKeyInfo key1 = addKey("key-testValidateAndUpdateCache", 12345L); addKeyToTable(key1); - OmBucketInfo omBucketInfo = omMetadataManager.getBucketTable().get( + OmBucketInfo omBucketInfo = getOmMetadataManager().getBucketTable().get( bucketKey); long bucketDataSize = key1.getDataSize(); long bucketUsedBytes = omBucketInfo.getUsedBytes(); assertEquals(key1.getReplicatedSize(), bucketUsedBytes); // Value in cache should be null as of now. - assertNull(omMetadataManager.getSnapshotInfoTable().get(key)); + assertNull(getOmMetadataManager().getSnapshotInfoTable().get(key)); // Add key to cache. - SnapshotInfo snapshotInfo = SnapshotInfo.newInstance(volumeName, bucketName, + SnapshotInfo snapshotInfo = SnapshotInfo.newInstance(getVolumeName(), getBucketName(), snapshotName1, UUID.randomUUID(), Time.now()); snapshotInfo.setReferencedSize(1000L); snapshotInfo.setReferencedReplicatedSize(3 * 1000L); assertEquals(SNAPSHOT_ACTIVE, snapshotInfo.getSnapshotStatus()); - omMetadataManager.getSnapshotInfoTable().addCacheEntry( + getOmMetadataManager().getSnapshotInfoTable().addCacheEntry( new CacheKey<>(key), CacheValue.get(1L, snapshotInfo)); // Run validateAndUpdateCache. OMClientResponse omClientResponse = - omSnapshotRenameRequest.validateAndUpdateCache(ozoneManager, 2L); + omSnapshotRenameRequest.validateAndUpdateCache(getOzoneManager(), 2L); assertNotNull(omClientResponse.getOMResponse()); @@ -244,56 +176,56 @@ public void testValidateAndUpdateCache() throws Exception { SnapshotInfo snapshotInfoOldProto = getFromProtobuf(snapshotInfoProto); - String key2 = getTableKey(volumeName, bucketName, snapshotName2); + String key2 = getTableKey(getVolumeName(), getBucketName(), snapshotName2); // Get value from cache SnapshotInfo snapshotInfoNewInCache = - omMetadataManager.getSnapshotInfoTable().get(key2); + getOmMetadataManager().getSnapshotInfoTable().get(key2); assertNotNull(snapshotInfoNewInCache); assertEquals(snapshotInfoOldProto, snapshotInfoNewInCache); assertEquals(snapshotInfo.getSnapshotId(), snapshotInfoNewInCache.getSnapshotId()); SnapshotInfo snapshotInfoOldInCache = - omMetadataManager.getSnapshotInfoTable().get(key); + getOmMetadataManager().getSnapshotInfoTable().get(key); assertNull(snapshotInfoOldInCache); } @Test public void testEntryExists() throws Exception { - when(ozoneManager.isAdmin(any())).thenReturn(true); + when(getOzoneManager().isAdmin(any())).thenReturn(true); - String keyNameOld = getTableKey(volumeName, bucketName, snapshotName1); - String keyNameNew = getTableKey(volumeName, bucketName, snapshotName2); + String keyNameOld = getTableKey(getVolumeName(), getBucketName(), snapshotName1); + String keyNameNew = getTableKey(getVolumeName(), getBucketName(), snapshotName2); - assertNull(omMetadataManager.getSnapshotInfoTable().get(keyNameOld)); - assertNull(omMetadataManager.getSnapshotInfoTable().get(keyNameNew)); + assertNull(getOmMetadataManager().getSnapshotInfoTable().get(keyNameOld)); + assertNull(getOmMetadataManager().getSnapshotInfoTable().get(keyNameNew)); // First make sure we have two snapshots. OzoneManagerProtocolProtos.OMRequest createOmRequest = - createSnapshotRequest(volumeName, bucketName, snapshotName1); + createSnapshotRequest(getVolumeName(), getBucketName(), snapshotName1); OMSnapshotCreateRequest omSnapshotCreateRequest = - TestOMSnapshotCreateRequest.doPreExecute(createOmRequest, ozoneManager); - omSnapshotCreateRequest.validateAndUpdateCache(ozoneManager, 1); + TestOMSnapshotCreateRequest.doPreExecute(createOmRequest, getOzoneManager()); + omSnapshotCreateRequest.validateAndUpdateCache(getOzoneManager(), 1); createOmRequest = - createSnapshotRequest(volumeName, bucketName, snapshotName2); + createSnapshotRequest(getVolumeName(), getBucketName(), snapshotName2); omSnapshotCreateRequest = - TestOMSnapshotCreateRequest.doPreExecute(createOmRequest, ozoneManager); - omSnapshotCreateRequest.validateAndUpdateCache(ozoneManager, 2); + TestOMSnapshotCreateRequest.doPreExecute(createOmRequest, getOzoneManager()); + omSnapshotCreateRequest.validateAndUpdateCache(getOzoneManager(), 2); - assertNotNull(omMetadataManager.getSnapshotInfoTable().get(keyNameOld)); - assertNotNull(omMetadataManager.getSnapshotInfoTable().get(keyNameNew)); + assertNotNull(getOmMetadataManager().getSnapshotInfoTable().get(keyNameOld)); + assertNotNull(getOmMetadataManager().getSnapshotInfoTable().get(keyNameNew)); // Now try renaming and get an error. OzoneManagerProtocolProtos.OMRequest omRequest = - renameSnapshotRequest(volumeName, bucketName, snapshotName1, snapshotName2); + renameSnapshotRequest(getVolumeName(), getBucketName(), snapshotName1, snapshotName2); OMSnapshotRenameRequest omSnapshotRenameRequest = doPreExecute(omRequest); OMClientResponse omClientResponse = - omSnapshotRenameRequest.validateAndUpdateCache(ozoneManager, 3); + omSnapshotRenameRequest.validateAndUpdateCache(getOzoneManager(), 3); - assertNotNull(omMetadataManager.getSnapshotInfoTable().get(keyNameOld)); - assertNotNull(omMetadataManager.getSnapshotInfoTable().get(keyNameNew)); + assertNotNull(getOmMetadataManager().getSnapshotInfoTable().get(keyNameOld)); + assertNotNull(getOmMetadataManager().getSnapshotInfoTable().get(keyNameNew)); OzoneManagerProtocolProtos.OMResponse omResponse = omClientResponse.getOMResponse(); assertNotNull(omResponse.getRenameSnapshotResponse()); @@ -303,24 +235,24 @@ public void testEntryExists() throws Exception { @Test public void testEntryNotFound() throws Exception { - when(ozoneManager.isAdmin(any())).thenReturn(true); + when(getOzoneManager().isAdmin(any())).thenReturn(true); - String keyNameOld = getTableKey(volumeName, bucketName, snapshotName1); - String keyNameNew = getTableKey(volumeName, bucketName, snapshotName2); + String keyNameOld = getTableKey(getVolumeName(), getBucketName(), snapshotName1); + String keyNameNew = getTableKey(getVolumeName(), getBucketName(), snapshotName2); - assertNull(omMetadataManager.getSnapshotInfoTable().get(keyNameOld)); - assertNull(omMetadataManager.getSnapshotInfoTable().get(keyNameNew)); + assertNull(getOmMetadataManager().getSnapshotInfoTable().get(keyNameOld)); + assertNull(getOmMetadataManager().getSnapshotInfoTable().get(keyNameNew)); // Now try renaming and get an error. OzoneManagerProtocolProtos.OMRequest omRequest = - renameSnapshotRequest(volumeName, bucketName, snapshotName1, snapshotName2); + renameSnapshotRequest(getVolumeName(), getBucketName(), snapshotName1, snapshotName2); OMSnapshotRenameRequest omSnapshotRenameRequest = doPreExecute(omRequest); OMClientResponse omClientResponse = - omSnapshotRenameRequest.validateAndUpdateCache(ozoneManager, 3); + omSnapshotRenameRequest.validateAndUpdateCache(getOzoneManager(), 3); - assertNull(omMetadataManager.getSnapshotInfoTable().get(keyNameOld)); - assertNull(omMetadataManager.getSnapshotInfoTable().get(keyNameNew)); + assertNull(getOmMetadataManager().getSnapshotInfoTable().get(keyNameOld)); + assertNull(getOmMetadataManager().getSnapshotInfoTable().get(keyNameNew)); OzoneManagerProtocolProtos.OMResponse omResponse = omClientResponse.getOMResponse(); assertNotNull(omResponse.getRenameSnapshotResponse()); @@ -330,7 +262,7 @@ public void testEntryNotFound() throws Exception { private OMSnapshotRenameRequest doPreExecute( OzoneManagerProtocolProtos.OMRequest originalRequest) throws Exception { - return doPreExecute(originalRequest, ozoneManager); + return doPreExecute(originalRequest, getOzoneManager()); } public static OMSnapshotRenameRequest doPreExecute( @@ -344,15 +276,15 @@ public static OMSnapshotRenameRequest doPreExecute( } private OmKeyInfo addKey(String keyName, long objectId) { - return OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, keyName, + return OMRequestTestUtils.createOmKeyInfo(getVolumeName(), getBucketName(), keyName, RatisReplicationConfig.getInstance(THREE)).setObjectID(objectId) .build(); } protected String addKeyToTable(OmKeyInfo keyInfo) throws Exception { OMRequestTestUtils.addKeyToTable(false, true, keyInfo, 0, 0L, - omMetadataManager); - return omMetadataManager.getOzoneKey(keyInfo.getVolumeName(), + getOmMetadataManager()); + return getOmMetadataManager().getOzoneKey(keyInfo.getVolumeName(), keyInfo.getBucketName(), keyInfo.getKeyName()); } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/snapshot/TestOMSnapshotSetPropertyRequestAndResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/snapshot/TestOMSnapshotSetPropertyRequestAndResponse.java index b5bfc2714b0f..380922f9e225 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/snapshot/TestOMSnapshotSetPropertyRequestAndResponse.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/snapshot/TestOMSnapshotSetPropertyRequestAndResponse.java @@ -18,32 +18,23 @@ */ package org.apache.hadoop.ozone.om.request.snapshot; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.utils.db.BatchOperation; import org.apache.hadoop.hdds.utils.db.Table; import org.apache.hadoop.hdds.utils.db.TableIterator; import org.apache.hadoop.hdds.utils.db.cache.CacheKey; import org.apache.hadoop.hdds.utils.db.cache.CacheValue; -import org.apache.hadoop.ozone.OzoneConfigKeys; -import org.apache.hadoop.ozone.om.OMConfigKeys; -import org.apache.hadoop.ozone.om.OMMetadataManager; -import org.apache.hadoop.ozone.om.OMMetrics; import org.apache.hadoop.ozone.om.OmMetadataManagerImpl; -import org.apache.hadoop.ozone.om.OzoneManager; import org.apache.hadoop.ozone.om.helpers.SnapshotInfo; import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; import org.apache.hadoop.ozone.om.response.snapshot.OMSnapshotSetPropertyResponse; -import org.apache.hadoop.ozone.om.upgrade.OMLayoutVersionManager; +import org.apache.hadoop.ozone.om.snapshot.TestSnapshotRequestAndResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.SnapshotSize; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.SetSnapshotPropertyRequest; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.BeforeEach; -import org.junit.jupiter.api.io.TempDir; -import java.io.File; import java.io.IOException; import java.util.ArrayList; import java.util.List; @@ -60,37 +51,13 @@ * Tests TestOMSnapshotSetPropertyRequest * TestOMSnapshotSetPropertyResponse class. */ -public class TestOMSnapshotSetPropertyRequestAndResponse { - private BatchOperation batchOperation; - private OzoneManager ozoneManager; - private OMMetadataManager omMetadataManager; - private OMMetrics omMetrics; - private String volumeName; - private String bucketName; +public class TestOMSnapshotSetPropertyRequestAndResponse extends TestSnapshotRequestAndResponse { private String snapName; private long exclusiveSize; private long exclusiveSizeAfterRepl; @BeforeEach - void setup(@TempDir File testDir) throws Exception { - omMetrics = OMMetrics.create(); - ozoneManager = mock(OzoneManager.class); - OMLayoutVersionManager lvm = mock(OMLayoutVersionManager.class); - when(lvm.isAllowed(anyString())).thenReturn(true); - when(ozoneManager.getVersionManager()).thenReturn(lvm); - when(ozoneManager.isRatisEnabled()).thenReturn(true); - OzoneConfiguration ozoneConfiguration = new OzoneConfiguration(); - ozoneConfiguration.set(OMConfigKeys.OZONE_OM_DB_DIRS, - testDir.getAbsolutePath()); - ozoneConfiguration.set(OzoneConfigKeys.OZONE_METADATA_DIRS, - testDir.getAbsolutePath()); - omMetadataManager = new OmMetadataManagerImpl(ozoneConfiguration, - ozoneManager); - when(ozoneManager.getMetadataManager()).thenReturn(omMetadataManager); - when(ozoneManager.getMetrics()).thenReturn(omMetrics); - - volumeName = UUID.randomUUID().toString(); - bucketName = UUID.randomUUID().toString(); + void setup() { snapName = UUID.randomUUID().toString(); exclusiveSize = 2000L; exclusiveSizeAfterRepl = 6000L; @@ -98,11 +65,11 @@ void setup(@TempDir File testDir) throws Exception { @Test public void testValidateAndUpdateCache() throws IOException { - long initialSnapshotSetPropertyCount = omMetrics.getNumSnapshotSetProperties(); - long initialSnapshotSetPropertyFailCount = omMetrics.getNumSnapshotSetPropertyFails(); + long initialSnapshotSetPropertyCount = getOmMetrics().getNumSnapshotSetProperties(); + long initialSnapshotSetPropertyFailCount = getOmMetrics().getNumSnapshotSetPropertyFails(); createSnapshotDataForTest(); - assertFalse(omMetadataManager.getSnapshotInfoTable().isEmpty()); + assertFalse(getOmMetadataManager().getSnapshotInfoTable().isEmpty()); List snapshotUpdateSizeRequests = createSnapshotUpdateSizeRequest(); @@ -111,28 +78,27 @@ public void testValidateAndUpdateCache() throws IOException { OMSnapshotSetPropertyRequest omSnapshotSetPropertyRequest = new OMSnapshotSetPropertyRequest(request); OMRequest modifiedOmRequest = omSnapshotSetPropertyRequest - .preExecute(ozoneManager); + .preExecute(getOzoneManager()); omSnapshotSetPropertyRequest = new OMSnapshotSetPropertyRequest(modifiedOmRequest); // Validate and Update Cache OMSnapshotSetPropertyResponse omSnapshotSetPropertyResponse = (OMSnapshotSetPropertyResponse) omSnapshotSetPropertyRequest - .validateAndUpdateCache(ozoneManager, 200L); + .validateAndUpdateCache(getOzoneManager(), 200L); // Commit to DB. - batchOperation = omMetadataManager.getStore().initBatchOperation(); - omSnapshotSetPropertyResponse.checkAndUpdateDB(omMetadataManager, - batchOperation); - omMetadataManager.getStore().commitBatchOperation(batchOperation); + omSnapshotSetPropertyResponse.checkAndUpdateDB(getOmMetadataManager(), + getBatchOperation()); + getOmMetadataManager().getStore().commitBatchOperation(getBatchOperation()); } assertEquals(initialSnapshotSetPropertyCount + snapshotUpdateSizeRequests.size(), - omMetrics.getNumSnapshotSetProperties()); - assertEquals(initialSnapshotSetPropertyFailCount, omMetrics.getNumSnapshotSetPropertyFails()); + getOmMetrics().getNumSnapshotSetProperties()); + assertEquals(initialSnapshotSetPropertyFailCount, getOmMetrics().getNumSnapshotSetPropertyFails()); // Check if the exclusive size is set. try (TableIterator> - iterator = omMetadataManager.getSnapshotInfoTable().iterator()) { + iterator = getOmMetadataManager().getSnapshotInfoTable().iterator()) { while (iterator.hasNext()) { Table.KeyValue snapshotEntry = iterator.next(); assertCacheValues(snapshotEntry.getKey()); @@ -149,11 +115,11 @@ public void testValidateAndUpdateCache() throws IOException { */ @Test public void testValidateAndUpdateCacheFailure() throws IOException { - long initialSnapshotSetPropertyCount = omMetrics.getNumSnapshotSetProperties(); - long initialSnapshotSetPropertyFailCount = omMetrics.getNumSnapshotSetPropertyFails(); + long initialSnapshotSetPropertyCount = getOmMetrics().getNumSnapshotSetProperties(); + long initialSnapshotSetPropertyFailCount = getOmMetrics().getNumSnapshotSetPropertyFails(); createSnapshotDataForTest(); - assertFalse(omMetadataManager.getSnapshotInfoTable().isEmpty()); + assertFalse(getOmMetadataManager().getSnapshotInfoTable().isEmpty()); List snapshotUpdateSizeRequests = createSnapshotUpdateSizeRequest(); OmMetadataManagerImpl mockedMetadataManager = mock(OmMetadataManagerImpl.class); @@ -161,27 +127,27 @@ public void testValidateAndUpdateCacheFailure() throws IOException { when(mockedSnapshotInfoTable.get(anyString())).thenThrow(new IOException("Injected fault error.")); when(mockedMetadataManager.getSnapshotInfoTable()).thenReturn(mockedSnapshotInfoTable); - when(ozoneManager.getMetadataManager()).thenReturn(mockedMetadataManager); + when(getOzoneManager().getMetadataManager()).thenReturn(mockedMetadataManager); for (OMRequest omRequest: snapshotUpdateSizeRequests) { OMSnapshotSetPropertyRequest omSnapshotSetPropertyRequest = new OMSnapshotSetPropertyRequest(omRequest); - OMRequest modifiedOmRequest = omSnapshotSetPropertyRequest.preExecute(ozoneManager); + OMRequest modifiedOmRequest = omSnapshotSetPropertyRequest.preExecute(getOzoneManager()); omSnapshotSetPropertyRequest = new OMSnapshotSetPropertyRequest(modifiedOmRequest); // Validate and Update Cache OMSnapshotSetPropertyResponse omSnapshotSetPropertyResponse = (OMSnapshotSetPropertyResponse) - omSnapshotSetPropertyRequest.validateAndUpdateCache(ozoneManager, 200L); + omSnapshotSetPropertyRequest.validateAndUpdateCache(getOzoneManager(), 200L); assertEquals(INTERNAL_ERROR, omSnapshotSetPropertyResponse.getOMResponse().getStatus()); } - assertEquals(initialSnapshotSetPropertyCount, omMetrics.getNumSnapshotSetProperties()); + assertEquals(initialSnapshotSetPropertyCount, getOmMetrics().getNumSnapshotSetProperties()); assertEquals(initialSnapshotSetPropertyFailCount + snapshotUpdateSizeRequests.size(), - omMetrics.getNumSnapshotSetPropertyFails()); + getOmMetrics().getNumSnapshotSetPropertyFails()); } private void assertCacheValues(String dbKey) { - CacheValue cacheValue = omMetadataManager + CacheValue cacheValue = getOmMetadataManager() .getSnapshotInfoTable() .getCacheValue(new CacheKey<>(dbKey)); assertEquals(exclusiveSize, cacheValue.getCacheValue().getExclusiveSize()); @@ -193,7 +159,7 @@ private List createSnapshotUpdateSizeRequest() throws IOException { List omRequests = new ArrayList<>(); try (TableIterator> - iterator = omMetadataManager.getSnapshotInfoTable().iterator()) { + iterator = getOmMetadataManager().getSnapshotInfoTable().iterator()) { while (iterator.hasNext()) { String snapDbKey = iterator.next().getKey(); SnapshotSize snapshotSize = SnapshotSize.newBuilder() @@ -220,8 +186,8 @@ private List createSnapshotUpdateSizeRequest() private void createSnapshotDataForTest() throws IOException { // Create 10 Snapshots for (int i = 0; i < 10; i++) { - OMRequestTestUtils.addSnapshotToTableCache(volumeName, bucketName, - snapName + i, omMetadataManager); + OMRequestTestUtils.addSnapshotToTableCache(getVolumeName(), getBucketName(), + snapName + i, getOmMetadataManager()); } } } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/snapshot/TestOMSnapshotMoveTableKeysResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/snapshot/TestOMSnapshotMoveTableKeysResponse.java new file mode 100644 index 000000000000..d2e2d94ec739 --- /dev/null +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/snapshot/TestOMSnapshotMoveTableKeysResponse.java @@ -0,0 +1,199 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ +package org.apache.hadoop.ozone.om.response.snapshot; + +import org.apache.commons.lang3.tuple.Pair; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos; +import org.apache.hadoop.hdds.utils.db.BatchOperation; +import org.apache.hadoop.hdds.utils.db.Table; +import org.apache.hadoop.ozone.ClientVersion; +import org.apache.hadoop.ozone.om.OMMetadataManager; +import org.apache.hadoop.ozone.om.OmSnapshot; +import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; +import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo; +import org.apache.hadoop.ozone.om.helpers.SnapshotInfo; +import org.apache.hadoop.ozone.om.snapshot.ReferenceCounted; +import org.apache.hadoop.ozone.om.snapshot.SnapshotUtils; +import org.apache.hadoop.ozone.om.snapshot.TestSnapshotRequestAndResponse; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; +import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.ValueSource; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.UUID; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.stream.Collectors; +import java.util.stream.LongStream; + +/** + * Test class to test OMSnapshotMoveTableKeysResponse. + */ +public class TestOMSnapshotMoveTableKeysResponse extends TestSnapshotRequestAndResponse { + + private String snapshotName1; + private String snapshotName2; + private SnapshotInfo snapshotInfo1; + private SnapshotInfo snapshotInfo2; + + @BeforeEach + public void setup() throws Exception { + snapshotName1 = UUID.randomUUID().toString(); + snapshotName2 = UUID.randomUUID().toString(); + } + + public TestOMSnapshotMoveTableKeysResponse() { + super(true); + } + + private void createSnapshots(boolean createSecondSnapshot) throws Exception { + addDataToTable(getOmMetadataManager().getSnapshotRenamedTable(), getRenameKeys(getVolumeName(), getBucketName(), 0, + 10, snapshotName1)); + addDataToTable(getOmMetadataManager().getDeletedTable(), getDeletedKeys(getVolumeName(), getBucketName(), 0, + 10, 10, 0).stream() + .map(pair -> Pair.of(pair.getKey(), new RepeatedOmKeyInfo(pair.getRight()))) + .collect(Collectors.toList())); + addDataToTable(getOmMetadataManager().getDeletedDirTable(), + getDeletedDirKeys(getVolumeName(), getBucketName(), 0, 10, 1).stream() + .map(pair -> Pair.of(pair.getKey(), pair.getRight().get(0))).collect(Collectors.toList())); + createSnapshotCheckpoint(getVolumeName(), getBucketName(), snapshotName1); + snapshotInfo1 = SnapshotUtils.getSnapshotInfo(getOzoneManager(), getVolumeName(), getBucketName(), snapshotName1); + addDataToTable(getOmMetadataManager().getSnapshotRenamedTable(), getRenameKeys(getVolumeName(), getBucketName(), 5, + 15, snapshotName2)); + addDataToTable(getOmMetadataManager().getDeletedTable(), getDeletedKeys(getVolumeName(), getBucketName(), 5, + 8, 10, 10).stream() + .map(pair -> Pair.of(pair.getKey(), new RepeatedOmKeyInfo(pair.getRight()))) + .collect(Collectors.toList())); + addDataToTable(getOmMetadataManager().getDeletedTable(), getDeletedKeys(getVolumeName(), getBucketName(), 8, + 15, 10, 0).stream() + .map(pair -> Pair.of(pair.getKey(), new RepeatedOmKeyInfo(pair.getRight()))) + .collect(Collectors.toList())); + addDataToTable(getOmMetadataManager().getDeletedDirTable(), + getDeletedDirKeys(getVolumeName(), getBucketName(), 5, 15, 1).stream() + .map(pair -> Pair.of(pair.getKey(), pair.getRight().get(0))).collect(Collectors.toList())); + if (createSecondSnapshot) { + createSnapshotCheckpoint(getVolumeName(), getBucketName(), snapshotName2); + snapshotInfo2 = SnapshotUtils.getSnapshotInfo(getOzoneManager(), getVolumeName(), getBucketName(), snapshotName2); + } + } + + private void addDataToTable(Table table, List> vals) throws IOException { + for (Pair pair : vals) { + table.put(pair.getKey(), pair.getValue()); + } + } + + @ParameterizedTest + @ValueSource(booleans = {true, false}) + public void testMoveTableKeysToNextSnapshot(boolean nextSnapshotExists) throws Exception { + createSnapshots(nextSnapshotExists); + + try (ReferenceCounted snapshot1 = getOmSnapshotManager().getSnapshot(getVolumeName(), getBucketName(), + snapshotName1); + ReferenceCounted snapshot2 = nextSnapshotExists ? getOmSnapshotManager().getSnapshot( + getVolumeName(), getBucketName(), snapshotName2) : null) { + OmSnapshot snapshot = snapshot1.get(); + List deletedTable = new ArrayList<>(); + List deletedDirTable = new ArrayList<>(); + List renamedTable = new ArrayList<>(); + Map renameEntries = new HashMap<>(); + snapshot.getMetadataManager().getDeletedTable().iterator() + .forEachRemaining(entry -> { + try { + deletedTable.add(OzoneManagerProtocolProtos.SnapshotMoveKeyInfos.newBuilder().setKey(entry.getKey()) + .addAllKeyInfos(entry.getValue().getOmKeyInfoList().stream().map(omKeyInfo -> omKeyInfo.getProtobuf( + ClientVersion.CURRENT_VERSION)).collect(Collectors.toList())).build()); + } catch (IOException e) { + throw new RuntimeException(e); + } + }); + + snapshot.getMetadataManager().getDeletedDirTable().iterator() + .forEachRemaining(entry -> { + try { + deletedDirTable.add(OzoneManagerProtocolProtos.SnapshotMoveKeyInfos.newBuilder().setKey(entry.getKey()) + .addKeyInfos(entry.getValue().getProtobuf(ClientVersion.CURRENT_VERSION)).build()); + } catch (IOException e) { + throw new RuntimeException(e); + } + }); + snapshot.getMetadataManager().getSnapshotRenamedTable().iterator().forEachRemaining(entry -> { + try { + renamedTable.add(HddsProtos.KeyValue.newBuilder().setKey(entry.getKey()).setValue(entry.getValue()).build()); + renameEntries.put(entry.getKey(), entry.getValue()); + } catch (IOException e) { + throw new RuntimeException(e); + } + }); + OMSnapshotMoveTableKeysResponse response = new OMSnapshotMoveTableKeysResponse( + OzoneManagerProtocolProtos.OMResponse.newBuilder().setStatus(OzoneManagerProtocolProtos.Status.OK) + .setCmdType(OzoneManagerProtocolProtos.Type.SnapshotMoveTableKeys).build(), + snapshotInfo1, nextSnapshotExists ? snapshotInfo2 : null, deletedTable, deletedDirTable, renamedTable); + try (BatchOperation batchOperation = getOmMetadataManager().getStore().initBatchOperation()) { + response.addToDBBatch(getOmMetadataManager(), batchOperation); + getOmMetadataManager().getStore().commitBatchOperation(batchOperation); + } + Assertions.assertTrue(snapshot.getMetadataManager().getDeletedTable().isEmpty()); + Assertions.assertTrue(snapshot.getMetadataManager().getDeletedDirTable().isEmpty()); + Assertions.assertTrue(snapshot.getMetadataManager().getSnapshotRenamedTable().isEmpty()); + OMMetadataManager nextMetadataManager = + nextSnapshotExists ? snapshot2.get().getMetadataManager() : getOmMetadataManager(); + AtomicInteger count = new AtomicInteger(); + nextMetadataManager.getDeletedTable().iterator().forEachRemaining(entry -> { + count.getAndIncrement(); + try { + int maxCount = count.get() >= 6 && count.get() <= 8 ? 20 : 10; + Assertions.assertEquals(maxCount, entry.getValue().getOmKeyInfoList().size()); + List versions = entry.getValue().getOmKeyInfoList().stream().map(OmKeyInfo::getKeyLocationVersions) + .map(omKeyInfo -> omKeyInfo.get(0).getVersion()).collect(Collectors.toList()); + List expectedVersions = new ArrayList<>(); + if (maxCount == 20) { + expectedVersions.addAll(LongStream.range(10, 20).boxed().collect(Collectors.toList())); + } + expectedVersions.addAll(LongStream.range(0, 10).boxed().collect(Collectors.toList())); + Assertions.assertEquals(expectedVersions, versions); + } catch (IOException e) { + throw new RuntimeException(e); + } + }); + Assertions.assertEquals(15, count.get()); + count.set(0); + + nextMetadataManager.getDeletedDirTable().iterator().forEachRemaining(entry -> count.getAndIncrement()); + Assertions.assertEquals(15, count.get()); + count.set(0); + nextMetadataManager.getSnapshotRenamedTable().iterator().forEachRemaining(entry -> { + try { + String expectedValue = renameEntries.getOrDefault(entry.getKey(), entry.getValue()); + Assertions.assertEquals(expectedValue, entry.getValue()); + } catch (IOException e) { + throw new RuntimeException(e); + } + count.getAndIncrement(); + }); + Assertions.assertEquals(15, count.get()); + } + + } +} diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/volume/TestOMVolumeCreateResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/volume/TestOMVolumeCreateResponse.java index 9d8de4bbb202..075dad5ee03f 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/volume/TestOMVolumeCreateResponse.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/volume/TestOMVolumeCreateResponse.java @@ -18,10 +18,8 @@ package org.apache.hadoop.ozone.om.response.volume; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.ozone.om.OMConfigKeys; +import org.apache.hadoop.hdds.utils.db.BatchOperation; import org.apache.hadoop.ozone.om.OMMetadataManager; -import org.apache.hadoop.ozone.om.OmMetadataManagerImpl; import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos @@ -31,13 +29,8 @@ import org.apache.hadoop.ozone.storage.proto. OzoneManagerStorageProtos.PersistedUserVolumeInfo; import org.apache.hadoop.util.Time; -import org.apache.hadoop.hdds.utils.db.BatchOperation; -import org.junit.jupiter.api.AfterEach; -import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; -import org.junit.jupiter.api.io.TempDir; -import java.nio.file.Path; import java.util.UUID; import static org.junit.jupiter.api.Assertions.assertEquals; @@ -45,33 +38,12 @@ /** * This class tests OMVolumeCreateResponse. */ -public class TestOMVolumeCreateResponse { - - @TempDir - private Path folder; - - private OMMetadataManager omMetadataManager; - private BatchOperation batchOperation; - - @BeforeEach - public void setup() throws Exception { - OzoneConfiguration ozoneConfiguration = new OzoneConfiguration(); - ozoneConfiguration.set(OMConfigKeys.OZONE_OM_DB_DIRS, - folder.toAbsolutePath().toString()); - omMetadataManager = new OmMetadataManagerImpl(ozoneConfiguration, null); - batchOperation = omMetadataManager.getStore().initBatchOperation(); - } - - @AfterEach - public void tearDown() { - if (batchOperation != null) { - batchOperation.close(); - } - } +public class TestOMVolumeCreateResponse extends TestOMVolumeResponse { @Test public void testAddToDBBatch() throws Exception { - + OMMetadataManager omMetadataManager = getOmMetadataManager(); + BatchOperation batchOperation = getBatchOperation(); String volumeName = UUID.randomUUID().toString(); String userName = "user1"; PersistedUserVolumeInfo volumeList = PersistedUserVolumeInfo.newBuilder() @@ -79,10 +51,10 @@ public void testAddToDBBatch() throws Exception { .addVolumeNames(volumeName).build(); OMResponse omResponse = OMResponse.newBuilder() - .setCmdType(OzoneManagerProtocolProtos.Type.CreateVolume) - .setStatus(OzoneManagerProtocolProtos.Status.OK) - .setSuccess(true) - .setCreateVolumeResponse(CreateVolumeResponse.getDefaultInstance()) + .setCmdType(OzoneManagerProtocolProtos.Type.CreateVolume) + .setStatus(OzoneManagerProtocolProtos.Status.OK) + .setSuccess(true) + .setCreateVolumeResponse(CreateVolumeResponse.getDefaultInstance()) .build(); OmVolumeArgs omVolumeArgs = OmVolumeArgs.newBuilder() @@ -109,7 +81,8 @@ public void testAddToDBBatch() throws Exception { @Test void testAddToDBBatchNoOp() throws Exception { - + OMMetadataManager omMetadataManager = getOmMetadataManager(); + BatchOperation batchOperation = getBatchOperation(); OMResponse omResponse = OMResponse.newBuilder() .setCmdType(OzoneManagerProtocolProtos.Type.CreateVolume) .setStatus(OzoneManagerProtocolProtos.Status.VOLUME_ALREADY_EXISTS) @@ -125,6 +98,4 @@ void testAddToDBBatchNoOp() throws Exception { assertEquals(0, omMetadataManager.countRowsInTable( omMetadataManager.getVolumeTable())); } - - } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/volume/TestOMVolumeDeleteResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/volume/TestOMVolumeDeleteResponse.java index 70dd23a7b047..e4b938811370 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/volume/TestOMVolumeDeleteResponse.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/volume/TestOMVolumeDeleteResponse.java @@ -18,10 +18,8 @@ package org.apache.hadoop.ozone.om.response.volume; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.ozone.om.OMConfigKeys; +import org.apache.hadoop.hdds.utils.db.BatchOperation; import org.apache.hadoop.ozone.om.OMMetadataManager; -import org.apache.hadoop.ozone.om.OmMetadataManagerImpl; import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos @@ -30,14 +28,9 @@ .OMResponse; import org.apache.hadoop.ozone.storage.proto.OzoneManagerStorageProtos.PersistedUserVolumeInfo; import org.apache.hadoop.util.Time; -import org.apache.hadoop.hdds.utils.db.BatchOperation; -import org.junit.jupiter.api.AfterEach; -import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; -import org.junit.jupiter.api.io.TempDir; import java.util.UUID; -import java.nio.file.Path; import static org.junit.jupiter.api.Assertions.assertDoesNotThrow; import static org.junit.jupiter.api.Assertions.assertNull; @@ -45,33 +38,12 @@ /** * This class tests OMVolumeCreateResponse. */ -public class TestOMVolumeDeleteResponse { - - @TempDir - private Path folder; - - private OMMetadataManager omMetadataManager; - private BatchOperation batchOperation; - - @BeforeEach - public void setup() throws Exception { - OzoneConfiguration ozoneConfiguration = new OzoneConfiguration(); - ozoneConfiguration.set(OMConfigKeys.OZONE_OM_DB_DIRS, - folder.toAbsolutePath().toString()); - omMetadataManager = new OmMetadataManagerImpl(ozoneConfiguration, null); - batchOperation = omMetadataManager.getStore().initBatchOperation(); - } - - @AfterEach - public void tearDown() { - if (batchOperation != null) { - batchOperation.close(); - } - } +public class TestOMVolumeDeleteResponse extends TestOMVolumeResponse { @Test public void testAddToDBBatch() throws Exception { - + OMMetadataManager omMetadataManager = getOmMetadataManager(); + BatchOperation batchOperation = getBatchOperation(); String volumeName = UUID.randomUUID().toString(); String userName = "user1"; PersistedUserVolumeInfo volumeList = PersistedUserVolumeInfo.newBuilder() @@ -95,7 +67,7 @@ public void testAddToDBBatch() throws Exception { // As we are deleting updated volume list should be empty. PersistedUserVolumeInfo updatedVolumeList = PersistedUserVolumeInfo.newBuilder() - .setObjectID(1).setUpdateID(1).build(); + .setObjectID(1).setUpdateID(1).build(); OMVolumeDeleteResponse omVolumeDeleteResponse = new OMVolumeDeleteResponse(omResponse, volumeName, userName, updatedVolumeList); @@ -107,7 +79,7 @@ public void testAddToDBBatch() throws Exception { omMetadataManager.getStore().commitBatchOperation(batchOperation); assertNull(omMetadataManager.getVolumeTable().get( - omMetadataManager.getVolumeKey(volumeName))); + omMetadataManager.getVolumeKey(volumeName))); assertNull(omMetadataManager.getUserTable().get( omMetadataManager.getUserKey(userName))); @@ -115,7 +87,8 @@ public void testAddToDBBatch() throws Exception { @Test public void testAddToDBBatchNoOp() { - + OMMetadataManager omMetadataManager = getOmMetadataManager(); + BatchOperation batchOperation = getBatchOperation(); OMResponse omResponse = OMResponse.newBuilder() .setCmdType(OzoneManagerProtocolProtos.Type.DeleteVolume) .setStatus(OzoneManagerProtocolProtos.Status.VOLUME_NOT_FOUND) @@ -127,5 +100,4 @@ public void testAddToDBBatchNoOp() { omResponse); assertDoesNotThrow(() -> omVolumeDeleteResponse.checkAndUpdateDB(omMetadataManager, batchOperation)); } - } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/volume/TestOMVolumeResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/volume/TestOMVolumeResponse.java new file mode 100644 index 000000000000..7edbaedf2dd5 --- /dev/null +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/volume/TestOMVolumeResponse.java @@ -0,0 +1,64 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

    + * http://www.apache.org/licenses/LICENSE-2.0 + *

    + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.om.response.volume; + +import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.utils.db.BatchOperation; +import org.apache.hadoop.ozone.om.OMConfigKeys; +import org.apache.hadoop.ozone.om.OMMetadataManager; +import org.apache.hadoop.ozone.om.OmMetadataManagerImpl; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.io.TempDir; + +import java.nio.file.Path; + +/** + * Base test class for OM volume response. + */ +public class TestOMVolumeResponse { + @TempDir + private Path folder; + + private OMMetadataManager omMetadataManager; + private BatchOperation batchOperation; + + @BeforeEach + public void setup() throws Exception { + OzoneConfiguration ozoneConfiguration = new OzoneConfiguration(); + ozoneConfiguration.set(OMConfigKeys.OZONE_OM_DB_DIRS, + folder.toAbsolutePath().toString()); + omMetadataManager = new OmMetadataManagerImpl(ozoneConfiguration, null); + batchOperation = omMetadataManager.getStore().initBatchOperation(); + } + + @AfterEach + public void tearDown() { + if (batchOperation != null) { + batchOperation.close(); + } + } + + protected OMMetadataManager getOmMetadataManager() { + return omMetadataManager; + } + protected BatchOperation getBatchOperation() { + return batchOperation; + } +} diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/volume/TestOMVolumeSetOwnerResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/volume/TestOMVolumeSetOwnerResponse.java index aa640067ca45..00da2029c1e6 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/volume/TestOMVolumeSetOwnerResponse.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/volume/TestOMVolumeSetOwnerResponse.java @@ -18,10 +18,8 @@ package org.apache.hadoop.ozone.om.response.volume; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.ozone.om.OMConfigKeys; +import org.apache.hadoop.hdds.utils.db.BatchOperation; import org.apache.hadoop.ozone.om.OMMetadataManager; -import org.apache.hadoop.ozone.om.OmMetadataManagerImpl; import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos @@ -30,14 +28,9 @@ .OMResponse; import org.apache.hadoop.ozone.storage.proto.OzoneManagerStorageProtos.PersistedUserVolumeInfo; import org.apache.hadoop.util.Time; -import org.apache.hadoop.hdds.utils.db.BatchOperation; import org.apache.hadoop.hdds.utils.db.Table; -import org.junit.jupiter.api.AfterEach; -import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; -import org.junit.jupiter.api.io.TempDir; -import java.nio.file.Path; import java.util.UUID; import static org.junit.jupiter.api.Assertions.assertEquals; @@ -45,34 +38,12 @@ /** * This class tests OMVolumeCreateResponse. */ -public class TestOMVolumeSetOwnerResponse { - - @TempDir - private Path folder; - - private OMMetadataManager omMetadataManager; - private BatchOperation batchOperation; - - @BeforeEach - public void setup() throws Exception { - OzoneConfiguration ozoneConfiguration = new OzoneConfiguration(); - ozoneConfiguration.set(OMConfigKeys.OZONE_OM_DB_DIRS, - folder.toAbsolutePath().toString()); - omMetadataManager = new OmMetadataManagerImpl(ozoneConfiguration, null); - batchOperation = omMetadataManager.getStore().initBatchOperation(); - } - - @AfterEach - public void tearDown() { - if (batchOperation != null) { - batchOperation.close(); - } - } - +public class TestOMVolumeSetOwnerResponse extends TestOMVolumeResponse { @Test public void testAddToDBBatch() throws Exception { - + OMMetadataManager omMetadataManager = getOmMetadataManager(); + BatchOperation batchOperation = getBatchOperation(); String volumeName = UUID.randomUUID().toString(); String oldOwner = "user1"; PersistedUserVolumeInfo volumeList = PersistedUserVolumeInfo.newBuilder() @@ -94,25 +65,24 @@ public void testAddToDBBatch() throws Exception { new OMVolumeCreateResponse(omResponse, omVolumeArgs, volumeList); - String newOwner = "user2"; PersistedUserVolumeInfo newOwnerVolumeList = PersistedUserVolumeInfo.newBuilder() - .setObjectID(1) - .setUpdateID(1) - .addVolumeNames(volumeName).build(); + .setObjectID(1) + .setUpdateID(1) + .addVolumeNames(volumeName).build(); PersistedUserVolumeInfo oldOwnerVolumeList = PersistedUserVolumeInfo.newBuilder() - .setObjectID(2) - .setUpdateID(2) - .build(); + .setObjectID(2) + .setUpdateID(2) + .build(); OmVolumeArgs newOwnerVolumeArgs = OmVolumeArgs.newBuilder() .setOwnerName(newOwner).setAdminName(newOwner) .setVolume(volumeName).setCreationTime(omVolumeArgs.getCreationTime()) .build(); OMVolumeSetOwnerResponse omVolumeSetOwnerResponse = - new OMVolumeSetOwnerResponse(omResponse, oldOwner, oldOwnerVolumeList, + new OMVolumeSetOwnerResponse(omResponse, oldOwner, oldOwnerVolumeList, newOwnerVolumeList, newOwnerVolumeArgs); omVolumeCreateResponse.addToDBBatch(omMetadataManager, batchOperation); @@ -139,7 +109,8 @@ public void testAddToDBBatch() throws Exception { @Test void testAddToDBBatchNoOp() throws Exception { - + OMMetadataManager omMetadataManager = getOmMetadataManager(); + BatchOperation batchOperation = getBatchOperation(); OMResponse omResponse = OMResponse.newBuilder() .setCmdType(OzoneManagerProtocolProtos.Type.SetVolumeProperty) .setStatus(OzoneManagerProtocolProtos.Status.VOLUME_NOT_FOUND) @@ -155,6 +126,4 @@ void testAddToDBBatchNoOp() throws Exception { assertEquals(0, omMetadataManager.countRowsInTable( omMetadataManager.getVolumeTable())); } - - } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/volume/TestOMVolumeSetQuotaResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/volume/TestOMVolumeSetQuotaResponse.java index fbc8e3c944d9..c33e9d174a92 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/volume/TestOMVolumeSetQuotaResponse.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/volume/TestOMVolumeSetQuotaResponse.java @@ -18,10 +18,8 @@ package org.apache.hadoop.ozone.om.response.volume; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.ozone.om.OMConfigKeys; +import org.apache.hadoop.hdds.utils.db.BatchOperation; import org.apache.hadoop.ozone.om.OMMetadataManager; -import org.apache.hadoop.ozone.om.OmMetadataManagerImpl; import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos @@ -29,14 +27,9 @@ import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos .OMResponse; import org.apache.hadoop.util.Time; -import org.apache.hadoop.hdds.utils.db.BatchOperation; import org.apache.hadoop.hdds.utils.db.Table; -import org.junit.jupiter.api.AfterEach; -import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; -import org.junit.jupiter.api.io.TempDir; -import java.nio.file.Path; import java.util.UUID; import static org.junit.jupiter.api.Assertions.assertEquals; @@ -44,34 +37,12 @@ /** * This class tests OMVolumeCreateResponse. */ -public class TestOMVolumeSetQuotaResponse { - - @TempDir - private Path folder; - - private OMMetadataManager omMetadataManager; - private BatchOperation batchOperation; - - @BeforeEach - public void setup() throws Exception { - OzoneConfiguration ozoneConfiguration = new OzoneConfiguration(); - ozoneConfiguration.set(OMConfigKeys.OZONE_OM_DB_DIRS, - folder.toAbsolutePath().toString()); - omMetadataManager = new OmMetadataManagerImpl(ozoneConfiguration, null); - batchOperation = omMetadataManager.getStore().initBatchOperation(); - } - - @AfterEach - public void tearDown() { - if (batchOperation != null) { - batchOperation.close(); - } - } - +public class TestOMVolumeSetQuotaResponse extends TestOMVolumeResponse { @Test public void testAddToDBBatch() throws Exception { - + OMMetadataManager omMetadataManager = getOmMetadataManager(); + BatchOperation batchOperation = getBatchOperation(); String volumeName = UUID.randomUUID().toString(); String userName = "user1"; @@ -107,7 +78,8 @@ public void testAddToDBBatch() throws Exception { @Test void testAddToDBBatchNoOp() throws Exception { - + OMMetadataManager omMetadataManager = getOmMetadataManager(); + BatchOperation batchOperation = getBatchOperation(); OMResponse omResponse = OMResponse.newBuilder() .setCmdType(OzoneManagerProtocolProtos.Type.CreateVolume) .setStatus(OzoneManagerProtocolProtos.Status.VOLUME_NOT_FOUND) @@ -123,6 +95,4 @@ void testAddToDBBatchNoOp() throws Exception { assertEquals(0, omMetadataManager.countRowsInTable( omMetadataManager.getVolumeTable())); } - - } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/service/TestDirectoryDeletingService.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/service/TestDirectoryDeletingService.java index 8dcb030d637a..04e8efa7b794 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/service/TestDirectoryDeletingService.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/service/TestDirectoryDeletingService.java @@ -50,6 +50,7 @@ import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.ONE; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_DIR_DELETING_SERVICE_INTERVAL; +import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_PATH_DELETING_LIMIT_PER_TASK_DEFAULT; import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertTrue; @@ -161,4 +162,55 @@ public void testDeleteDirectoryCrossingSizeLimit() throws Exception { 500, 60000); assertThat(dirDeletingService.getRunCount().get()).isGreaterThanOrEqualTo(1); } + + @Test + public void testDeleteDirectoryFlatDirsHavingNoChilds() throws Exception { + OzoneConfiguration conf = createConfAndInitValues(); + OmTestManagers omTestManagers + = new OmTestManagers(conf); + KeyManager keyManager = omTestManagers.getKeyManager(); + writeClient = omTestManagers.getWriteClient(); + om = omTestManagers.getOzoneManager(); + + OMRequestTestUtils.addVolumeAndBucketToDB(volumeName, bucketName, + om.getMetadataManager(), BucketLayout.FILE_SYSTEM_OPTIMIZED); + String bucketKey = om.getMetadataManager().getBucketKey(volumeName, bucketName); + OmBucketInfo bucketInfo = om.getMetadataManager().getBucketTable().get(bucketKey); + + int dirCreatesCount = OZONE_PATH_DELETING_LIMIT_PER_TASK_DEFAULT * 2 + 100; + long parentId = 1; + OmDirectoryInfo baseDir = new OmDirectoryInfo.Builder().setName("dir_base") + .setCreationTime(Time.now()).setModificationTime(Time.now()) + .setObjectID(parentId).setParentObjectID(bucketInfo.getObjectID()) + .setUpdateID(0).build(); + OMRequestTestUtils.addDirKeyToDirTable(true, baseDir, volumeName, bucketName, + 1L, om.getMetadataManager()); + for (int i = 0; i < dirCreatesCount; ++i) { + OmDirectoryInfo dir1 = new OmDirectoryInfo.Builder().setName("dir" + i) + .setCreationTime(Time.now()).setModificationTime(Time.now()).setParentObjectID(parentId) + .setObjectID(i + 100).setUpdateID(i).build(); + OMRequestTestUtils.addDirKeyToDirTable(true, dir1, volumeName, bucketName, + 1L, om.getMetadataManager()); + } + + DirectoryDeletingService dirDeletingService = keyManager.getDirDeletingService(); + long[] delDirCnt = new long[2]; + delDirCnt[0] = dirDeletingService.getDeletedDirsCount(); + + OmKeyArgs delArgs = new OmKeyArgs.Builder() + .setVolumeName(volumeName).setBucketName(bucketName).setKeyName("dir_base") + .setReplicationConfig(StandaloneReplicationConfig.getInstance(ONE)) + .setDataSize(0).setRecursive(true).build(); + writeClient.deleteKey(delArgs); + + // check if difference between each run should not cross the directory deletion limit + // and wait till all dir is removed + GenericTestUtils.waitFor(() -> { + delDirCnt[1] = dirDeletingService.getDeletedDirsCount(); + assertTrue(delDirCnt[1] - delDirCnt[0] <= OZONE_PATH_DELETING_LIMIT_PER_TASK_DEFAULT, + "base: " + delDirCnt[0] + ", new: " + delDirCnt[1]); + delDirCnt[0] = delDirCnt[1]; + return dirDeletingService.getDeletedDirsCount() >= dirCreatesCount; + }, 500, 300000); + } } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/service/TestKeyDeletingService.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/service/TestKeyDeletingService.java index 8163592cfc6d..ff6506da0347 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/service/TestKeyDeletingService.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/service/TestKeyDeletingService.java @@ -39,13 +39,17 @@ import org.apache.hadoop.hdds.utils.db.Table; import org.apache.hadoop.hdds.utils.db.TableIterator; import org.apache.hadoop.ozone.om.KeyManager; +import org.apache.hadoop.ozone.om.KeyManagerImpl; import org.apache.hadoop.ozone.om.OMMetadataManager; import org.apache.hadoop.ozone.om.OmSnapshot; +import org.apache.hadoop.ozone.om.OmSnapshotManager; import org.apache.hadoop.ozone.om.OmTestManagers; import org.apache.hadoop.ozone.om.OzoneManager; +import org.apache.hadoop.ozone.om.PendingKeysDeletion; import org.apache.hadoop.ozone.om.ScmBlockLocationTestingClient; import org.apache.hadoop.ozone.common.BlockGroup; import org.apache.hadoop.ozone.om.helpers.BucketLayout; +import org.apache.hadoop.ozone.om.helpers.KeyInfoWithVolumeContext; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup; @@ -57,10 +61,13 @@ import org.apache.ratis.util.ExitUtils; import org.junit.jupiter.api.AfterAll; import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.Nested; import org.junit.jupiter.api.TestInstance; import org.junit.jupiter.api.io.TempDir; +import org.mockito.ArgumentMatchers; +import org.mockito.Mockito; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -81,12 +88,16 @@ import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.THREE; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_BLOCK_DELETING_SERVICE_INTERVAL; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_SNAPSHOT_DELETING_SERVICE_INTERVAL; +import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_SNAPSHOT_DEEP_CLEANING_ENABLED; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_SNAPSHOT_SST_FILTERING_SERVICE_INTERVAL; import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertDoesNotThrow; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertFalse; import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.mockito.ArgumentMatchers.anyInt; +import static org.mockito.Mockito.doAnswer; +import static org.mockito.Mockito.when; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.Timeout; @@ -132,6 +143,7 @@ private void createConfig(File testDir) { 1, TimeUnit.SECONDS); conf.setTimeDuration(HDDS_CONTAINER_REPORT_INTERVAL, 200, TimeUnit.MILLISECONDS); + conf.setBoolean(OZONE_SNAPSHOT_DEEP_CLEANING_ENABLED, true); conf.setQuietMode(false); } @@ -285,6 +297,115 @@ void checkDeletedTableCleanUpForSnapshot() throws Exception { assertEquals(0, rangeKVs.size()); } + /* + * Create key k1 + * Create snap1 + * Rename k1 to k2 + * Delete k2 + * Wait for KeyDeletingService to start processing deleted key k2 + * Create snap2 by making the KeyDeletingService thread wait till snap2 is flushed + * Resume KeyDeletingService thread. + * Read k1 from snap1. + */ + @Test + public void testAOSKeyDeletingWithSnapshotCreateParallelExecution() + throws Exception { + Table snapshotInfoTable = + om.getMetadataManager().getSnapshotInfoTable(); + Table deletedTable = + om.getMetadataManager().getDeletedTable(); + Table renameTable = om.getMetadataManager().getSnapshotRenamedTable(); + + // Suspend KeyDeletingService + keyDeletingService.suspend(); + SnapshotDeletingService snapshotDeletingService = om.getKeyManager().getSnapshotDeletingService(); + snapshotDeletingService.suspend(); + GenericTestUtils.waitFor(() -> !keyDeletingService.isRunningOnAOS(), 1000, 10000); + final String volumeName = getTestName(); + final String bucketName = uniqueObjectName("bucket"); + OzoneManager ozoneManager = Mockito.spy(om); + OmSnapshotManager omSnapshotManager = Mockito.spy(om.getOmSnapshotManager()); + KeyManager km = Mockito.spy(new KeyManagerImpl(ozoneManager, ozoneManager.getScmClient(), conf, + om.getPerfMetrics())); + when(ozoneManager.getOmSnapshotManager()).thenAnswer(i -> { + return omSnapshotManager; + }); + KeyDeletingService service = new KeyDeletingService(ozoneManager, scmBlockTestingClient, km, 10000, + 100000, conf, false); + service.shutdown(); + final long initialSnapshotCount = metadataManager.countRowsInTable(snapshotInfoTable); + final long initialDeletedCount = metadataManager.countRowsInTable(deletedTable); + final long initialRenameCount = metadataManager.countRowsInTable(renameTable); + // Create Volume and Buckets + createVolumeAndBucket(volumeName, bucketName, false); + OmKeyArgs args = createAndCommitKey(volumeName, bucketName, + "key1", 3); + String snap1 = uniqueObjectName("snap"); + String snap2 = uniqueObjectName("snap"); + writeClient.createSnapshot(volumeName, bucketName, snap1); + KeyInfoWithVolumeContext keyInfo = writeClient.getKeyInfo(args, false); + AtomicLong objectId = new AtomicLong(keyInfo.getKeyInfo().getObjectID()); + renameKey(volumeName, bucketName, "key1", "key2"); + deleteKey(volumeName, bucketName, "key2"); + assertTableRowCount(deletedTable, initialDeletedCount + 1, metadataManager); + assertTableRowCount(renameTable, initialRenameCount + 1, metadataManager); + + String[] deletePathKey = {metadataManager.getOzoneDeletePathKey(objectId.get(), + metadataManager.getOzoneKey(volumeName, + bucketName, "key2"))}; + assertNotNull(deletedTable.get(deletePathKey[0])); + Mockito.doAnswer(i -> { + writeClient.createSnapshot(volumeName, bucketName, snap2); + GenericTestUtils.waitFor(() -> { + try { + SnapshotInfo snapshotInfo = writeClient.getSnapshotInfo(volumeName, bucketName, snap2); + return OmSnapshotManager.areSnapshotChangesFlushedToDB(metadataManager, snapshotInfo); + } catch (IOException e) { + throw new RuntimeException(e); + } + }, 1000, 100000); + GenericTestUtils.waitFor(() -> { + try { + return renameTable.get(metadataManager.getRenameKey(volumeName, bucketName, objectId.get())) == null; + } catch (IOException e) { + throw new RuntimeException(e); + } + }, 1000, 10000); + return i.callRealMethod(); + }).when(omSnapshotManager).getSnapshot(ArgumentMatchers.eq(volumeName), ArgumentMatchers.eq(bucketName), + ArgumentMatchers.eq(snap1)); + assertTableRowCount(snapshotInfoTable, initialSnapshotCount + 1, metadataManager); + doAnswer(i -> { + PendingKeysDeletion pendingKeysDeletion = (PendingKeysDeletion) i.callRealMethod(); + for (BlockGroup group : pendingKeysDeletion.getKeyBlocksList()) { + Assertions.assertNotEquals(deletePathKey[0], group.getGroupID()); + } + return pendingKeysDeletion; + }).when(km).getPendingDeletionKeys(anyInt()); + service.runPeriodicalTaskNow(); + service.runPeriodicalTaskNow(); + assertTableRowCount(snapshotInfoTable, initialSnapshotCount + 2, metadataManager); + // Create Key3 + OmKeyArgs args2 = createAndCommitKey(volumeName, bucketName, + "key3", 3); + keyInfo = writeClient.getKeyInfo(args2, false); + objectId.set(keyInfo.getKeyInfo().getObjectID()); + // Rename Key3 to key4 + renameKey(volumeName, bucketName, "key3", "key4"); + // Delete Key4 + deleteKey(volumeName, bucketName, "key4"); + deletePathKey[0] = metadataManager.getOzoneDeletePathKey(objectId.get(), metadataManager.getOzoneKey(volumeName, + bucketName, "key4")); + // Delete snapshot + writeClient.deleteSnapshot(volumeName, bucketName, snap2); + // Run KDS and ensure key4 doesn't get purged since snap2 has not been deleted. + service.runPeriodicalTaskNow(); + writeClient.deleteSnapshot(volumeName, bucketName, snap1); + snapshotDeletingService.resume(); + assertTableRowCount(snapshotInfoTable, initialSnapshotCount, metadataManager); + keyDeletingService.resume(); + } + /* * Create Snap1 * Create 10 keys @@ -396,68 +517,68 @@ void testSnapshotExclusiveSize() throws Exception { final long initialDeletedCount = metadataManager.countRowsInTable(deletedTable); final long initialRenamedCount = metadataManager.countRowsInTable(renamedTable); - final String volumeName = getTestName(); - final String bucketName = uniqueObjectName("bucket"); + final String testVolumeName = getTestName(); + final String testBucketName = uniqueObjectName("bucket"); final String keyName = uniqueObjectName("key"); // Create Volume and Buckets - createVolumeAndBucket(volumeName, bucketName, false); + createVolumeAndBucket(testVolumeName, testBucketName, false); // Create 3 keys for (int i = 1; i <= 3; i++) { - createAndCommitKey(volumeName, bucketName, keyName + i, 3); + createAndCommitKey(testVolumeName, testBucketName, keyName + i, 3); } assertTableRowCount(keyTable, initialKeyCount + 3, metadataManager); // Create Snapshot1 String snap1 = uniqueObjectName("snap"); - writeClient.createSnapshot(volumeName, bucketName, snap1); + writeClient.createSnapshot(testVolumeName, testBucketName, snap1); assertTableRowCount(snapshotInfoTable, initialSnapshotCount + 1, metadataManager); assertTableRowCount(deletedTable, initialDeletedCount, metadataManager); // Create 2 keys for (int i = 4; i <= 5; i++) { - createAndCommitKey(volumeName, bucketName, keyName + i, 3); + createAndCommitKey(testVolumeName, testBucketName, keyName + i, 3); } // Delete a key, rename 2 keys. We will be using this to test // how we handle renamed key for exclusive size calculation. - renameKey(volumeName, bucketName, keyName + 1, "renamedKey1"); - renameKey(volumeName, bucketName, keyName + 2, "renamedKey2"); - deleteKey(volumeName, bucketName, keyName + 3); + renameKey(testVolumeName, testBucketName, keyName + 1, "renamedKey1"); + renameKey(testVolumeName, testBucketName, keyName + 2, "renamedKey2"); + deleteKey(testVolumeName, testBucketName, keyName + 3); assertTableRowCount(deletedTable, initialDeletedCount + 1, metadataManager); assertTableRowCount(renamedTable, initialRenamedCount + 2, metadataManager); // Create Snapshot2 String snap2 = uniqueObjectName("snap"); - writeClient.createSnapshot(volumeName, bucketName, snap2); + writeClient.createSnapshot(testVolumeName, testBucketName, snap2); assertTableRowCount(snapshotInfoTable, initialSnapshotCount + 2, metadataManager); assertTableRowCount(deletedTable, initialDeletedCount, metadataManager); // Create 2 keys for (int i = 6; i <= 7; i++) { - createAndCommitKey(volumeName, bucketName, keyName + i, 3); + createAndCommitKey(testVolumeName, testBucketName, keyName + i, 3); } - deleteKey(volumeName, bucketName, "renamedKey1"); - deleteKey(volumeName, bucketName, keyName + 4); + deleteKey(testVolumeName, testBucketName, "renamedKey1"); + deleteKey(testVolumeName, testBucketName, keyName + 4); // Do a second rename of already renamedKey2 - renameKey(volumeName, bucketName, "renamedKey2", "renamedKey22"); + renameKey(testVolumeName, testBucketName, "renamedKey2", "renamedKey22"); assertTableRowCount(deletedTable, initialDeletedCount + 2, metadataManager); assertTableRowCount(renamedTable, initialRenamedCount + 1, metadataManager); // Create Snapshot3 String snap3 = uniqueObjectName("snap"); - writeClient.createSnapshot(volumeName, bucketName, snap3); + writeClient.createSnapshot(testVolumeName, testBucketName, snap3); // Delete 4 keys - deleteKey(volumeName, bucketName, "renamedKey22"); + deleteKey(testVolumeName, testBucketName, "renamedKey22"); for (int i = 5; i <= 7; i++) { - deleteKey(volumeName, bucketName, keyName + i); + deleteKey(testVolumeName, testBucketName, keyName + i); } // Create Snapshot4 String snap4 = uniqueObjectName("snap"); - writeClient.createSnapshot(volumeName, bucketName, snap4); - createAndCommitKey(volumeName, bucketName, uniqueObjectName("key"), 3); + writeClient.createSnapshot(testVolumeName, testBucketName, snap4); + createAndCommitKey(testVolumeName, testBucketName, uniqueObjectName("key"), 3); long prevKdsRunCount = getRunCount(); keyDeletingService.resume(); @@ -468,6 +589,7 @@ void testSnapshotExclusiveSize() throws Exception { .put(snap3, 2000L) .put(snap4, 0L) .build(); + System.out.println(expectedSize); // Let KeyDeletingService to run for some iterations GenericTestUtils.waitFor( @@ -480,8 +602,10 @@ void testSnapshotExclusiveSize() throws Exception { while (iterator.hasNext()) { Table.KeyValue snapshotEntry = iterator.next(); String snapshotName = snapshotEntry.getValue().getName(); + Long expected = expectedSize.getOrDefault(snapshotName, 0L); assertNotNull(expected); + System.out.println(snapshotName); assertEquals(expected, snapshotEntry.getValue().getExclusiveSize()); // Since for the test we are using RATIS/THREE assertEquals(expected * 3, snapshotEntry.getValue().getExclusiveReplicatedSize()); diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/service/TestSnapshotDeletingService.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/service/TestSnapshotDeletingService.java index 3948f4fab805..e04891da83ab 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/service/TestSnapshotDeletingService.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/service/TestSnapshotDeletingService.java @@ -20,7 +20,8 @@ import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.scm.protocol.ScmBlockLocationProtocol; +import org.apache.hadoop.hdds.utils.TransactionInfo; +import org.apache.hadoop.hdds.utils.db.Table; import org.apache.hadoop.ozone.om.KeyManagerImpl; import org.apache.hadoop.ozone.om.OmMetadataManagerImpl; import org.apache.hadoop.ozone.om.OmSnapshotManager; @@ -56,25 +57,26 @@ public class TestSnapshotDeletingService { private SnapshotChainManager chainManager; @Mock private OmMetadataManagerImpl omMetadataManager; - @Mock - private ScmBlockLocationProtocol scmClient; private final OzoneConfiguration conf = new OzoneConfiguration();; private final long sdsRunInterval = Duration.ofMillis(1000).toMillis(); private final long sdsServiceTimeout = Duration.ofSeconds(10).toMillis(); - private static Stream testCasesForIgnoreSnapshotGc() { - SnapshotInfo filteredSnapshot = SnapshotInfo.newBuilder().setSstFiltered(true).setName("snap1").build(); - SnapshotInfo unFilteredSnapshot = SnapshotInfo.newBuilder().setSstFiltered(false).setName("snap1").build(); + private static Stream testCasesForIgnoreSnapshotGc() throws IOException { + SnapshotInfo flushedSnapshot = SnapshotInfo.newBuilder().setSstFiltered(true) + .setLastTransactionInfo(TransactionInfo.valueOf(1, 1).toByteString()) + .setName("snap1").build(); + SnapshotInfo unFlushedSnapshot = SnapshotInfo.newBuilder().setSstFiltered(false).setName("snap1") + .setLastTransactionInfo(TransactionInfo.valueOf(0, 0).toByteString()).build(); return Stream.of( - Arguments.of(filteredSnapshot, SnapshotInfo.SnapshotStatus.SNAPSHOT_DELETED, false), - Arguments.of(filteredSnapshot, SnapshotInfo.SnapshotStatus.SNAPSHOT_ACTIVE, true), - Arguments.of(unFilteredSnapshot, SnapshotInfo.SnapshotStatus.SNAPSHOT_DELETED, false), - Arguments.of(unFilteredSnapshot, SnapshotInfo.SnapshotStatus.SNAPSHOT_ACTIVE, true), - Arguments.of(filteredSnapshot, SnapshotInfo.SnapshotStatus.SNAPSHOT_DELETED, false), - Arguments.of(unFilteredSnapshot, SnapshotInfo.SnapshotStatus.SNAPSHOT_DELETED, false), - Arguments.of(unFilteredSnapshot, SnapshotInfo.SnapshotStatus.SNAPSHOT_ACTIVE, true), - Arguments.of(filteredSnapshot, SnapshotInfo.SnapshotStatus.SNAPSHOT_ACTIVE, true)); + Arguments.of(flushedSnapshot, SnapshotInfo.SnapshotStatus.SNAPSHOT_DELETED, false), + Arguments.of(flushedSnapshot, SnapshotInfo.SnapshotStatus.SNAPSHOT_ACTIVE, true), + Arguments.of(unFlushedSnapshot, SnapshotInfo.SnapshotStatus.SNAPSHOT_DELETED, false), + Arguments.of(unFlushedSnapshot, SnapshotInfo.SnapshotStatus.SNAPSHOT_ACTIVE, true), + Arguments.of(flushedSnapshot, SnapshotInfo.SnapshotStatus.SNAPSHOT_DELETED, false), + Arguments.of(unFlushedSnapshot, SnapshotInfo.SnapshotStatus.SNAPSHOT_DELETED, false), + Arguments.of(unFlushedSnapshot, SnapshotInfo.SnapshotStatus.SNAPSHOT_ACTIVE, true), + Arguments.of(flushedSnapshot, SnapshotInfo.SnapshotStatus.SNAPSHOT_ACTIVE, true)); } @ParameterizedTest @@ -87,9 +89,15 @@ public void testProcessSnapshotLogicInSDS(SnapshotInfo snapshotInfo, Mockito.when(ozoneManager.getOmSnapshotManager()).thenReturn(omSnapshotManager); Mockito.when(ozoneManager.getMetadataManager()).thenReturn(omMetadataManager); Mockito.when(ozoneManager.getConfiguration()).thenReturn(conf); + if (status == SnapshotInfo.SnapshotStatus.SNAPSHOT_DELETED) { + Table transactionInfoTable = Mockito.mock(Table.class); + Mockito.when(omMetadataManager.getTransactionInfoTable()).thenReturn(transactionInfoTable); + Mockito.when(transactionInfoTable.getSkipCache(Mockito.anyString())) + .thenReturn(TransactionInfo.valueOf(1, 1)); + } SnapshotDeletingService snapshotDeletingService = - new SnapshotDeletingService(sdsRunInterval, sdsServiceTimeout, ozoneManager, scmClient); + new SnapshotDeletingService(sdsRunInterval, sdsServiceTimeout, ozoneManager); snapshotInfo.setSnapshotStatus(status); assertEquals(expectedOutcome, snapshotDeletingService.shouldIgnoreSnapshot(snapshotInfo)); diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotChain.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotChain.java index c5ae809718e7..f49bfc33976e 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotChain.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotChain.java @@ -18,6 +18,7 @@ package org.apache.hadoop.ozone.om.snapshot; import com.google.common.collect.ImmutableMap; +import org.apache.commons.compress.utils.Lists; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.utils.db.Table; import org.apache.hadoop.ozone.om.OMMetadataManager; @@ -38,6 +39,7 @@ import java.io.IOException; import java.util.ArrayList; import java.util.Arrays; +import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; @@ -168,6 +170,7 @@ public void testAddSnapshot() throws Exception { } assertEquals(snapshotID3, chainManager.getLatestGlobalSnapshotId()); + assertEquals(snapshotID1, chainManager.getOldestGlobalSnapshotId()); assertEquals(snapshotID3, chainManager.getLatestPathSnapshotId( String.join("/", "vol1", "bucket1"))); @@ -285,6 +288,7 @@ public void testChainFromLoadFromTable(boolean increasingTIme) assertFalse(chainManager.isSnapshotChainCorrupted()); // check if snapshots loaded correctly from snapshotInfoTable assertEquals(snapshotID2, chainManager.getLatestGlobalSnapshotId()); + assertEquals(snapshotID1, chainManager.getOldestGlobalSnapshotId()); assertEquals(snapshotID2, chainManager.nextGlobalSnapshot(snapshotID1)); assertEquals(snapshotID1, chainManager.previousPathSnapshot(String .join("/", "vol1", "bucket1"), snapshotID2)); @@ -305,6 +309,34 @@ public void testChainFromLoadFromTable(boolean increasingTIme) () -> chainManager.nextGlobalSnapshot(snapshotID1)); } + @ParameterizedTest + @ValueSource(ints = {0, 1, 2, 5, 10}) + public void testSnapshotChainIterator(int numberOfSnapshots) throws IOException { + Table snapshotInfo = omMetadataManager.getSnapshotInfoTable(); + List snapshotInfoList = new ArrayList<>(); + + UUID prevSnapshotID = null; + long time = System.currentTimeMillis(); + for (int i = 0; i < numberOfSnapshots; i++) { + UUID snapshotID = UUID.randomUUID(); + SnapshotInfo snapInfo = createSnapshotInfo(snapshotID, prevSnapshotID, + prevSnapshotID, time++); + snapshotInfo.put(snapshotID.toString(), snapInfo); + prevSnapshotID = snapshotID; + snapshotInfoList.add(snapInfo); + } + chainManager = new SnapshotChainManager(omMetadataManager); + assertFalse(chainManager.isSnapshotChainCorrupted()); + List reverseChain = Lists.newArrayList(chainManager.iterator(true)); + Collections.reverse(reverseChain); + List forwardChain = Lists.newArrayList(chainManager.iterator(false)); + List expectedChain = snapshotInfoList.stream().map(SnapshotInfo::getSnapshotId).collect(Collectors.toList()); + assertEquals(expectedChain, reverseChain); + assertEquals(expectedChain, forwardChain); + assertEquals(forwardChain, reverseChain); + + } + private static Stream invalidSnapshotChain() { List nodes = IntStream.range(0, 5) .mapToObj(i -> UUID.randomUUID()) diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotRequestAndResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotRequestAndResponse.java new file mode 100644 index 000000000000..e60e23de22a3 --- /dev/null +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotRequestAndResponse.java @@ -0,0 +1,255 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package org.apache.hadoop.ozone.om.snapshot; + +import org.apache.commons.lang3.tuple.Pair; +import org.apache.hadoop.hdds.client.ReplicationConfig; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.utils.db.BatchOperation; +import org.apache.hadoop.hdds.utils.db.RDBStore; +import org.apache.hadoop.ozone.OzoneConfigKeys; +import org.apache.hadoop.ozone.audit.AuditLogger; +import org.apache.hadoop.ozone.audit.AuditMessage; +import org.apache.hadoop.ozone.om.OMConfigKeys; +import org.apache.hadoop.ozone.om.OMMetrics; +import org.apache.hadoop.ozone.om.OmMetadataManagerImpl; +import org.apache.hadoop.ozone.om.OmSnapshotManager; +import org.apache.hadoop.ozone.om.OzoneManager; +import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; +import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup; +import org.apache.hadoop.ozone.om.helpers.SnapshotInfo; +import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; +import org.apache.hadoop.ozone.om.request.snapshot.OMSnapshotCreateRequest; +import org.apache.hadoop.ozone.om.request.snapshot.TestOMSnapshotCreateRequest; +import org.apache.hadoop.ozone.om.response.snapshot.OMSnapshotCreateResponse; +import org.apache.hadoop.ozone.om.upgrade.OMLayoutVersionManager; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; +import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.io.TempDir; + +import java.io.File; +import java.io.IOException; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.util.ArrayList; +import java.util.List; +import java.util.UUID; +import java.util.stream.Collectors; +import java.util.stream.IntStream; + +import static org.apache.hadoop.ozone.om.request.OMRequestTestUtils.createOmKeyInfo; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.anyString; +import static org.mockito.Mockito.doNothing; +import static org.mockito.Mockito.framework; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +/** + * Base class to test snapshot functionalities. + */ +public class TestSnapshotRequestAndResponse { + @TempDir + private File testDir; + + private OzoneManager ozoneManager; + private OMMetrics omMetrics; + private OmMetadataManagerImpl omMetadataManager; + private BatchOperation batchOperation; + private OmSnapshotManager omSnapshotManager; + + private String volumeName; + private String bucketName; + private boolean isAdmin; + + public BatchOperation getBatchOperation() { + return batchOperation; + } + + public String getBucketName() { + return bucketName; + } + + public boolean isAdmin() { + return isAdmin; + } + + public OmMetadataManagerImpl getOmMetadataManager() { + return omMetadataManager; + } + + public OMMetrics getOmMetrics() { + return omMetrics; + } + + public OmSnapshotManager getOmSnapshotManager() { + return omSnapshotManager; + } + + public OzoneManager getOzoneManager() { + return ozoneManager; + } + + public File getTestDir() { + return testDir; + } + + public String getVolumeName() { + return volumeName; + } + + protected TestSnapshotRequestAndResponse() { + this.isAdmin = false; + } + + protected TestSnapshotRequestAndResponse(boolean isAdmin) { + this.isAdmin = isAdmin; + } + + @BeforeEach + public void baseSetup() throws Exception { + ozoneManager = mock(OzoneManager.class); + omMetrics = OMMetrics.create(); + OzoneConfiguration ozoneConfiguration = new OzoneConfiguration(); + ozoneConfiguration.set(OMConfigKeys.OZONE_OM_DB_DIRS, + testDir.getAbsolutePath()); + ozoneConfiguration.set(OzoneConfigKeys.OZONE_METADATA_DIRS, + testDir.getAbsolutePath()); + omMetadataManager = new OmMetadataManagerImpl(ozoneConfiguration, + ozoneManager); + when(ozoneManager.getConfiguration()).thenReturn(ozoneConfiguration); + when(ozoneManager.getMetrics()).thenReturn(omMetrics); + when(ozoneManager.getMetadataManager()).thenReturn(omMetadataManager); + when(ozoneManager.isRatisEnabled()).thenReturn(true); + when(ozoneManager.isFilesystemSnapshotEnabled()).thenReturn(true); + when(ozoneManager.isAdmin(any())).thenReturn(isAdmin); + when(ozoneManager.isOwner(any(), any())).thenReturn(false); + when(ozoneManager.getBucketOwner(any(), any(), + any(), any())).thenReturn("dummyBucketOwner"); + IAccessAuthorizer accessAuthorizer = mock(IAccessAuthorizer.class); + when(ozoneManager.getAccessAuthorizer()).thenReturn(accessAuthorizer); + when(accessAuthorizer.isNative()).thenReturn(false); + OMLayoutVersionManager lvm = mock(OMLayoutVersionManager.class); + when(lvm.isAllowed(anyString())).thenReturn(true); + when(ozoneManager.getVersionManager()).thenReturn(lvm); + AuditLogger auditLogger = mock(AuditLogger.class); + when(ozoneManager.getAuditLogger()).thenReturn(auditLogger); + doNothing().when(auditLogger).logWrite(any(AuditMessage.class)); + batchOperation = omMetadataManager.getStore().initBatchOperation(); + + volumeName = UUID.randomUUID().toString(); + bucketName = UUID.randomUUID().toString(); + OMRequestTestUtils.addVolumeAndBucketToDB(volumeName, bucketName, + omMetadataManager); + omSnapshotManager = new OmSnapshotManager(ozoneManager); + when(ozoneManager.getOmSnapshotManager()).thenReturn(omSnapshotManager); + } + + @AfterEach + public void stop() { + omMetrics.unRegister(); + framework().clearInlineMocks(); + if (batchOperation != null) { + batchOperation.close(); + } + } + + protected Path createSnapshotCheckpoint(String volume, String bucket, String snapshotName) throws Exception { + OzoneManagerProtocolProtos.OMRequest omRequest = OMRequestTestUtils + .createSnapshotRequest(volume, bucket, snapshotName); + // Pre-Execute OMSnapshotCreateRequest. + OMSnapshotCreateRequest omSnapshotCreateRequest = + TestOMSnapshotCreateRequest.doPreExecute(omRequest, ozoneManager); + + // validateAndUpdateCache OMSnapshotCreateResponse. + OMSnapshotCreateResponse omClientResponse = (OMSnapshotCreateResponse) + omSnapshotCreateRequest.validateAndUpdateCache(ozoneManager, 1); + // Add to batch and commit to DB. + try (BatchOperation batchOperation = omMetadataManager.getStore().initBatchOperation()) { + omClientResponse.addToDBBatch(omMetadataManager, batchOperation); + omMetadataManager.getStore().commitBatchOperation(batchOperation); + } + + String key = SnapshotInfo.getTableKey(volume, bucket, snapshotName); + SnapshotInfo snapshotInfo = + omMetadataManager.getSnapshotInfoTable().get(key); + assertNotNull(snapshotInfo); + + RDBStore store = (RDBStore) omMetadataManager.getStore(); + String checkpointPrefix = store.getDbLocation().getName(); + Path snapshotDirPath = Paths.get(store.getSnapshotsParentDir(), + checkpointPrefix + snapshotInfo.getCheckpointDir()); + // Check the DB is still there + assertTrue(Files.exists(snapshotDirPath)); + return snapshotDirPath; + } + + protected List>> getDeletedKeys(String volume, String bucket, + int startRange, int endRange, + int numberOfKeys, + int minVersion) { + return IntStream.range(startRange, endRange).boxed() + .map(i -> Pair.of(omMetadataManager.getOzoneDeletePathKey(i, + omMetadataManager.getOzoneKey(volume, bucket, "key" + String.format("%010d", i))), + IntStream.range(0, numberOfKeys).boxed().map(cnt -> createOmKeyInfo(volume, bucket, "key" + i, + ReplicationConfig.getDefault(ozoneManager.getConfiguration()), + new OmKeyLocationInfoGroup(minVersion + cnt, new ArrayList<>(), false)) + .setCreationTime(0).setModificationTime(0).build()) + .collect(Collectors.toList()))) + .collect(Collectors.toList()); + } + + protected List> getRenameKeys(String volume, String bucket, + int startRange, int endRange, + String renameKeyPrefix) { + return IntStream.range(startRange, endRange).boxed() + .map(i -> { + try { + return Pair.of(omMetadataManager.getRenameKey(volume, bucket, i), + omMetadataManager.getOzoneKeyFSO(volume, bucket, renameKeyPrefix + i)); + } catch (IOException e) { + throw new RuntimeException(e); + } + }).collect(Collectors.toList()); + } + + protected List>> getDeletedDirKeys(String volume, String bucket, + int startRange, int endRange, int numberOfKeys) { + return IntStream.range(startRange, endRange).boxed() + .map(i -> { + try { + return Pair.of(omMetadataManager.getOzoneDeletePathKey(i, + omMetadataManager.getOzoneKeyFSO(volume, bucket, "1/key" + i)), + IntStream.range(0, numberOfKeys).boxed().map(cnt -> createOmKeyInfo(volume, bucket, "key" + i, + ReplicationConfig.getDefault(ozoneManager.getConfiguration())).build()) + .collect(Collectors.toList())); + } catch (IOException e) { + throw new RuntimeException(e); + } + }) + .collect(Collectors.toList()); + } + +} diff --git a/hadoop-ozone/ozonefs-common/pom.xml b/hadoop-ozone/ozonefs-common/pom.xml index 6132f9bc125a..2b79beae35ea 100644 --- a/hadoop-ozone/ozonefs-common/pom.xml +++ b/hadoop-ozone/ozonefs-common/pom.xml @@ -19,12 +19,12 @@ org.apache.ozone ozone - 1.5.0-SNAPSHOT + 2.0.0-SNAPSHOT ozone-filesystem-common Apache Ozone FileSystem Common jar - 1.5.0-SNAPSHOT + 2.0.0-SNAPSHOT UTF-8 true diff --git a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneClientAdapterImpl.java b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneClientAdapterImpl.java index df8ece03486c..68a8ee7fc4bc 100644 --- a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneClientAdapterImpl.java +++ b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneClientAdapterImpl.java @@ -29,6 +29,8 @@ import java.util.List; import com.google.common.annotations.VisibleForTesting; +import org.apache.commons.collections.CollectionUtils; +import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.crypto.key.KeyProvider; import org.apache.hadoop.fs.BlockLocation; import org.apache.hadoop.fs.FileAlreadyExistsException; @@ -68,21 +70,21 @@ import org.apache.hadoop.ozone.client.rpc.RpcClient; import org.apache.hadoop.ozone.container.common.helpers.BlockData; import org.apache.hadoop.ozone.om.exceptions.OMException; +import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.ozone.om.helpers.LeaseKeyInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyArgs; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup; -import org.apache.hadoop.ozone.om.helpers.BucketLayout; -import org.apache.hadoop.ozone.om.helpers.OzoneFileStatus; import org.apache.hadoop.ozone.om.helpers.OzoneFSUtils; +import org.apache.hadoop.ozone.om.helpers.OzoneFileStatus; import org.apache.hadoop.ozone.security.OzoneTokenIdentifier; import org.apache.hadoop.ozone.snapshot.SnapshotDiffReportOzone; import org.apache.hadoop.ozone.snapshot.SnapshotDiffResponse; import org.apache.hadoop.security.token.Token; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; -import org.apache.commons.collections.CollectionUtils; -import org.apache.commons.lang3.StringUtils; import static org.apache.hadoop.ozone.OzoneConsts.OZONE_URI_DELIMITER; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.DIRECTORY_NOT_FOUND; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.FILE_NOT_FOUND; @@ -90,9 +92,6 @@ import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.NOT_A_FILE; import static org.apache.hadoop.ozone.snapshot.SnapshotDiffResponse.JobStatus.DONE; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - /** * Basic Implementation of the OzoneFileSystem calls. *

    @@ -193,18 +192,24 @@ public BasicOzoneClientAdapterImpl(String omHost, int omPort, OzoneClientFactory.getRpcClient(conf); } objectStore = ozoneClient.getObjectStore(); - this.volume = objectStore.getVolume(volumeStr); - this.bucket = volume.getBucket(bucketStr); - bucketReplicationConfig = this.bucket.getReplicationConfig(); - nextReplicationConfigRefreshTime = - clock.millis() + bucketRepConfigRefreshPeriodMS; + try { + this.volume = objectStore.getVolume(volumeStr); + this.bucket = volume.getBucket(bucketStr); + bucketReplicationConfig = this.bucket.getReplicationConfig(); + nextReplicationConfigRefreshTime = clock.millis() + bucketRepConfigRefreshPeriodMS; - // resolve the bucket layout in case of Link Bucket - BucketLayout resolvedBucketLayout = - OzoneClientUtils.resolveLinkBucketLayout(bucket, objectStore, - new HashSet<>()); + // resolve the bucket layout in case of Link Bucket + BucketLayout resolvedBucketLayout = + OzoneClientUtils.resolveLinkBucketLayout(bucket, objectStore, new HashSet<>()); - OzoneFSUtils.validateBucketLayout(bucket.getName(), resolvedBucketLayout); + OzoneFSUtils.validateBucketLayout(bucket.getName(), resolvedBucketLayout); + } catch (IOException | RuntimeException exception) { + // in case of exception, the adapter object will not be + // initialised making the client object unreachable, close the client + // to release resources in this case and rethrow. + ozoneClient.close(); + throw exception; + } this.configuredDnPort = conf.getInt( OzoneConfigKeys.HDDS_CONTAINER_IPC_PORT, @@ -589,8 +594,8 @@ private BlockLocation[] getBlockLocations(OzoneFileStatus fileStatus) { nameList.add(dn.getHostName() + ":" + port); }); - String[] hosts = hostList.toArray(new String[hostList.size()]); - String[] names = nameList.toArray(new String[nameList.size()]); + String[] hosts = hostList.toArray(new String[0]); + String[] names = nameList.toArray(new String[0]); BlockLocation blockLocation = new BlockLocation( names, hosts, offsetOfBlockInFile, omKeyLocationInfo.getLength()); diff --git a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicRootedOzoneClientAdapterImpl.java b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicRootedOzoneClientAdapterImpl.java index da278f17fbf0..41e47d91aa9a 100644 --- a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicRootedOzoneClientAdapterImpl.java +++ b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicRootedOzoneClientAdapterImpl.java @@ -32,6 +32,7 @@ import com.google.common.base.Preconditions; import org.apache.commons.collections.CollectionUtils; +import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.crypto.key.KeyProvider; import org.apache.hadoop.fs.BlockLocation; import org.apache.hadoop.fs.FileAlreadyExistsException; @@ -65,17 +66,17 @@ import org.apache.hadoop.ozone.OmUtils; import org.apache.hadoop.ozone.OzoneConfigKeys; import org.apache.hadoop.ozone.OzoneFsServerDefaults; +import org.apache.hadoop.ozone.client.BucketArgs; import org.apache.hadoop.ozone.client.ObjectStore; import org.apache.hadoop.ozone.client.OzoneBucket; import org.apache.hadoop.ozone.client.OzoneClient; import org.apache.hadoop.ozone.client.OzoneClientFactory; import org.apache.hadoop.ozone.client.OzoneKey; -import org.apache.hadoop.ozone.client.OzoneVolume; import org.apache.hadoop.ozone.client.OzoneSnapshot; +import org.apache.hadoop.ozone.client.OzoneVolume; import org.apache.hadoop.ozone.client.io.OzoneDataStreamOutput; import org.apache.hadoop.ozone.client.io.OzoneOutputStream; import org.apache.hadoop.ozone.client.protocol.ClientProtocol; -import org.apache.hadoop.ozone.client.BucketArgs; import org.apache.hadoop.ozone.client.rpc.RpcClient; import org.apache.hadoop.ozone.container.common.helpers.BlockData; import org.apache.hadoop.ozone.om.exceptions.OMException; @@ -85,30 +86,26 @@ import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup; -import org.apache.hadoop.ozone.om.helpers.OzoneFileStatus; import org.apache.hadoop.ozone.om.helpers.OzoneFSUtils; +import org.apache.hadoop.ozone.om.helpers.OzoneFileStatus; import org.apache.hadoop.ozone.om.helpers.SnapshotInfo; import org.apache.hadoop.ozone.security.OzoneTokenIdentifier; import org.apache.hadoop.ozone.snapshot.SnapshotDiffReportOzone; import org.apache.hadoop.ozone.snapshot.SnapshotDiffResponse; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.token.Token; - -import org.apache.commons.lang3.StringUtils; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import static org.apache.hadoop.ozone.OzoneConsts.OM_SNAPSHOT_INDICATOR; import static org.apache.hadoop.ozone.OzoneConsts.OZONE_URI_DELIMITER; -import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes - .BUCKET_ALREADY_EXISTS; +import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.BUCKET_ALREADY_EXISTS; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.BUCKET_NOT_FOUND; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.DIRECTORY_NOT_FOUND; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.FILE_NOT_FOUND; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.KEY_NOT_FOUND; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.NOT_A_FILE; -import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes - .VOLUME_ALREADY_EXISTS; +import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.VOLUME_ALREADY_EXISTS; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.VOLUME_NOT_FOUND; import static org.apache.hadoop.ozone.snapshot.SnapshotDiffResponse.JobStatus.DONE; @@ -221,7 +218,15 @@ public BasicRootedOzoneClientAdapterImpl(String omHost, int omPort, OzoneConfigKeys.HDDS_CONTAINER_IPC_PORT_DEFAULT); // Fetches the bucket layout to be used by OFS. - initDefaultFsBucketLayout(conf); + try { + initDefaultFsBucketLayout(conf); + } catch (IOException | RuntimeException exception) { + // in case of exception, the adapter object will not be + // initialised making the client object unreachable, close the client + // to release resources in this case and rethrow. + ozoneClient.close(); + throw exception; + } config = conf; } finally { @@ -714,7 +719,7 @@ private FileStatusAdapter getFileStatusForKeyOrSnapshot(OFSPath ofsPath, URI uri * * @param allUsers return trashRoots of all users if true, used by emptier * @param fs Pointer to the current OFS FileSystem - * @return + * @return {@code Collection} */ public Collection getTrashRoots(boolean allUsers, BasicRootedOzoneFileSystem fs) { @@ -1079,8 +1084,8 @@ private BlockLocation[] getBlockLocations(OzoneFileStatus fileStatus) { nameList.add(dn.getHostName() + ":" + port); }); - String[] hosts = hostList.toArray(new String[hostList.size()]); - String[] names = nameList.toArray(new String[nameList.size()]); + String[] hosts = hostList.toArray(new String[0]); + String[] names = nameList.toArray(new String[0]); BlockLocation blockLocation = new BlockLocation( names, hosts, offsetOfBlockInFile, omKeyLocationInfo.getLength()); diff --git a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/FileStatusAdapter.java b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/FileStatusAdapter.java index f92f8d957044..6354ee0eebe9 100644 --- a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/FileStatusAdapter.java +++ b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/FileStatusAdapter.java @@ -33,7 +33,7 @@ * information can be converted to this class, and this class can be used to * create hadoop 2.x FileStatus. *

    - * FileStatus (Hadoop 3.x) --> FileStatusAdapter --> FileStatus (Hadoop 2.x) + * FileStatus (Hadoop 3.x) --> FileStatusAdapter --> FileStatus (Hadoop 2.x) */ public final class FileStatusAdapter { diff --git a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/OzoneFSInputStream.java b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/OzoneFSInputStream.java index 4dc70bfa569d..f873b43ae983 100644 --- a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/OzoneFSInputStream.java +++ b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/OzoneFSInputStream.java @@ -193,7 +193,6 @@ public int read(long position, ByteBuffer buf) throws IOException { /** * @param buf the ByteBuffer to receive the results of the read operation. * @param position offset - * @return void * @throws IOException if there is some error performing the read * @throws EOFException if end of file reached before reading fully */ diff --git a/hadoop-ozone/ozonefs-hadoop2/pom.xml b/hadoop-ozone/ozonefs-hadoop2/pom.xml index fad83ea86c1a..8585a9dd5446 100644 --- a/hadoop-ozone/ozonefs-hadoop2/pom.xml +++ b/hadoop-ozone/ozonefs-hadoop2/pom.xml @@ -19,12 +19,12 @@ org.apache.ozone ozone - 1.5.0-SNAPSHOT + 2.0.0-SNAPSHOT ozone-filesystem-hadoop2 Apache Ozone FS Hadoop 2.x compatibility jar - 1.5.0-SNAPSHOT + 2.0.0-SNAPSHOT org.apache.hadoop.ozone.shaded diff --git a/hadoop-ozone/ozonefs-hadoop3-client/pom.xml b/hadoop-ozone/ozonefs-hadoop3-client/pom.xml index f27bd411db72..2f23a5d318e1 100644 --- a/hadoop-ozone/ozonefs-hadoop3-client/pom.xml +++ b/hadoop-ozone/ozonefs-hadoop3-client/pom.xml @@ -19,7 +19,7 @@ org.apache.ozone ozone - 1.5.0-SNAPSHOT + 2.0.0-SNAPSHOT org.apache.hadoop.ozone.shaded diff --git a/hadoop-ozone/ozonefs-shaded/pom.xml b/hadoop-ozone/ozonefs-shaded/pom.xml index df6c724883c9..4146eaaa4d51 100644 --- a/hadoop-ozone/ozonefs-shaded/pom.xml +++ b/hadoop-ozone/ozonefs-shaded/pom.xml @@ -19,12 +19,12 @@ org.apache.ozone ozone - 1.5.0-SNAPSHOT + 2.0.0-SNAPSHOT ozone-filesystem-shaded Apache Ozone FileSystem Shaded jar - 1.5.0-SNAPSHOT + 2.0.0-SNAPSHOT true diff --git a/hadoop-ozone/ozonefs/pom.xml b/hadoop-ozone/ozonefs/pom.xml index 176f21b98603..aa554c422e57 100644 --- a/hadoop-ozone/ozonefs/pom.xml +++ b/hadoop-ozone/ozonefs/pom.xml @@ -19,12 +19,12 @@ org.apache.ozone ozone - 1.5.0-SNAPSHOT + 2.0.0-SNAPSHOT ozone-filesystem Apache Ozone FileSystem jar - 1.5.0-SNAPSHOT + 2.0.0-SNAPSHOT UTF-8 true diff --git a/hadoop-ozone/pom.xml b/hadoop-ozone/pom.xml index d91d488c4345..be928e760549 100644 --- a/hadoop-ozone/pom.xml +++ b/hadoop-ozone/pom.xml @@ -16,10 +16,10 @@ org.apache.ozone ozone-main - 1.5.0-SNAPSHOT + 2.0.0-SNAPSHOT ozone - 1.5.0-SNAPSHOT + 2.0.0-SNAPSHOT Apache Ozone Project Apache Ozone pom diff --git a/hadoop-ozone/recon-codegen/pom.xml b/hadoop-ozone/recon-codegen/pom.xml index bb7756a9de37..b8345c7d343f 100644 --- a/hadoop-ozone/recon-codegen/pom.xml +++ b/hadoop-ozone/recon-codegen/pom.xml @@ -18,7 +18,7 @@ ozone org.apache.ozone - 1.5.0-SNAPSHOT + 2.0.0-SNAPSHOT 4.0.0 ozone-reconcodegen diff --git a/hadoop-ozone/recon/pom.xml b/hadoop-ozone/recon/pom.xml index a24252c1ed62..e0ce7065c1c9 100644 --- a/hadoop-ozone/recon/pom.xml +++ b/hadoop-ozone/recon/pom.xml @@ -18,7 +18,7 @@ org.apache.ozone ozone - 1.5.0-SNAPSHOT + 2.0.0-SNAPSHOT Apache Ozone Recon 4.0.0 diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ConfigurationProvider.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ConfigurationProvider.java index 6312365bf4b4..9f0a9796e283 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ConfigurationProvider.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ConfigurationProvider.java @@ -53,6 +53,8 @@ private static void addDeprecations() { @VisibleForTesting public static void setConfiguration(OzoneConfiguration conf) { + // Nullity check is used in case the configuration was already set + // in the MiniOzoneCluster if (configuration == null) { ConfigurationProvider.configuration = conf; } diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconConstants.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconConstants.java index ed657931e034..5768166c9503 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconConstants.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconConstants.java @@ -43,20 +43,20 @@ private ReconConstants() { public static final int DISK_USAGE_TOP_RECORDS_LIMIT = 30; public static final String DEFAULT_OPEN_KEY_INCLUDE_NON_FSO = "false"; public static final String DEFAULT_OPEN_KEY_INCLUDE_FSO = "false"; - public static final String DEFAULT_START_PREFIX = "/"; public static final String DEFAULT_FETCH_COUNT = "1000"; public static final String DEFAULT_KEY_SIZE = "0"; public static final String DEFAULT_BATCH_NUMBER = "1"; public static final String RECON_QUERY_BATCH_PARAM = "batchNum"; public static final String RECON_QUERY_PREVKEY = "prevKey"; + public static final String RECON_QUERY_START_PREFIX = "startPrefix"; public static final String RECON_OPEN_KEY_INCLUDE_NON_FSO = "includeNonFso"; public static final String RECON_OPEN_KEY_INCLUDE_FSO = "includeFso"; - public static final String RECON_OPEN_KEY_DEFAULT_SEARCH_LIMIT = "1000"; - public static final String RECON_OPEN_KEY_SEARCH_DEFAULT_PREV_KEY = ""; + public static final String RECON_OM_INSIGHTS_DEFAULT_START_PREFIX = "/"; + public static final String RECON_OM_INSIGHTS_DEFAULT_SEARCH_LIMIT = "1000"; + public static final String RECON_OM_INSIGHTS_DEFAULT_SEARCH_PREV_KEY = ""; public static final String RECON_QUERY_FILTER = "missingIn"; public static final String PREV_CONTAINER_ID_DEFAULT_VALUE = "0"; - public static final String PREV_DELETED_BLOCKS_TRANSACTION_ID_DEFAULT_VALUE = - "0"; + public static final String PREV_DELETED_BLOCKS_TRANSACTION_ID_DEFAULT_VALUE = "0"; // Only include containers that are missing in OM by default public static final String DEFAULT_FILTER_FOR_MISSING_CONTAINERS = "SCM"; public static final String RECON_QUERY_LIMIT = "limit"; diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconResponseUtils.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconResponseUtils.java index 41235ae54280..dc53f195f675 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconResponseUtils.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconResponseUtils.java @@ -44,7 +44,7 @@ public static Response noMatchedKeysResponse(String startPrefix) { String jsonResponse = String.format( "{\"message\": \"No keys matched the search prefix: '%s'.\"}", startPrefix); - return Response.status(Response.Status.NOT_FOUND) + return Response.status(Response.Status.NO_CONTENT) .entity(jsonResponse) .type(MediaType.APPLICATION_JSON) .build(); diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconUtils.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconUtils.java index 5c9f6a5f4e12..f65e2f30cb8c 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconUtils.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconUtils.java @@ -32,11 +32,14 @@ import java.text.ParseException; import java.text.SimpleDateFormat; import java.time.Instant; -import java.util.List; -import java.util.TimeZone; +import java.util.ArrayList; +import java.util.Collections; import java.util.Date; +import java.util.LinkedHashMap; +import java.util.List; +import java.util.Map; import java.util.Set; -import java.util.ArrayList; +import java.util.TimeZone; import java.util.concurrent.BlockingQueue; import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; @@ -54,6 +57,8 @@ import org.apache.hadoop.hdds.scm.ha.SCMNodeDetails; import org.apache.hadoop.hdds.scm.server.SCMDatanodeHeartbeatDispatcher; import org.apache.hadoop.hdds.utils.HddsServerUtil; +import org.apache.hadoop.hdds.utils.db.Table; +import org.apache.hadoop.hdds.utils.db.TableIterator; import org.apache.hadoop.hdfs.web.URLConnectionFactory; import org.apache.hadoop.io.IOUtils; @@ -352,7 +357,8 @@ private static void triggerRebuild(ReconNamespaceSummaryManager reconNamespaceSu * @param url url to call * @param isSpnego is SPNEGO enabled * @return HttpURLConnection instance of the HTTP call. - * @throws IOException, AuthenticationException While reading the response. + * @throws IOException While reading the response, + * @throws AuthenticationException */ public HttpURLConnection makeHttpCall(URLConnectionFactory connectionFactory, String url, boolean isSpnego) @@ -569,7 +575,6 @@ public static boolean isInitializationComplete(ReconOMMetadataManager omMetadata * @param dateFormat * @param timeZone * @return the epoch milliseconds representation of the date. - * @throws ParseException */ public static long convertToEpochMillis(String dateString, String dateFormat, TimeZone timeZone) { String localDateFormat = dateFormat; @@ -596,6 +601,109 @@ public static long convertToEpochMillis(String dateString, String dateFormat, Ti } } + public static boolean validateStartPrefix(String startPrefix) { + + // Ensure startPrefix starts with '/' for non-empty values + startPrefix = startPrefix.startsWith("/") ? startPrefix : "/" + startPrefix; + + // Split the path to ensure it's at least at the bucket level (volume/bucket). + String[] pathComponents = startPrefix.split("/"); + if (pathComponents.length < 3 || pathComponents[2].isEmpty()) { + return false; // Invalid if not at bucket level or deeper + } + + return true; + } + + /** + * Retrieves keys from the specified table based on pagination and prefix filtering. + * This method handles different scenarios based on the presence of {@code startPrefix} + * and {@code prevKey}, enabling efficient key retrieval from the table. + * + * The method handles the following cases: + * + * 1. {@code prevKey} provided, {@code startPrefix} empty: + * - Seeks to {@code prevKey}, skips it, and returns subsequent records up to the limit. + * + * 2. {@code prevKey} empty, {@code startPrefix} empty: + * - Iterates from the beginning of the table, retrieving all records up to the limit. + * + * 3. {@code startPrefix} provided, {@code prevKey} empty: + * - Seeks to the first key matching {@code startPrefix} and returns all matching keys up to the limit. + * + * 4. {@code startPrefix} provided, {@code prevKey} provided: + * - Seeks to {@code prevKey}, skips it, and returns subsequent keys that match {@code startPrefix}, + * up to the limit. + * + * This method also handles the following {@code limit} scenarios: + * - If {@code limit == 0} or {@code limit < -1}, no records are returned. + * - If {@code limit == -1}, all records are returned. + * - For positive {@code limit}, it retrieves records up to the specified {@code limit}. + * + * @param table The table to retrieve keys from. + * @param startPrefix The search prefix to match keys against. + * @param limit The maximum number of keys to retrieve. + * @param prevKey The key to start after for the next set of records. + * @return A map of keys and their corresponding {@code OmKeyInfo} or {@code RepeatedOmKeyInfo} objects. + * @throws IOException If there are problems accessing the table. + */ + public static Map extractKeysFromTable( + Table table, String startPrefix, int limit, String prevKey) + throws IOException { + + Map matchedKeys = new LinkedHashMap<>(); + + // Null check for the table to prevent NPE during omMetaManager initialization + if (table == null) { + log.error("Table object is null. omMetaManager might still be initializing."); + return Collections.emptyMap(); + } + + // If limit = 0, return an empty result set + if (limit == 0 || limit < -1) { + return matchedKeys; + } + + // If limit = -1, set it to Integer.MAX_VALUE to return all records + int actualLimit = (limit == -1) ? Integer.MAX_VALUE : limit; + + try (TableIterator> keyIter = table.iterator()) { + + // Scenario 1 & 4: prevKey is provided (whether startPrefix is empty or not) + if (!prevKey.isEmpty()) { + keyIter.seek(prevKey); + if (keyIter.hasNext()) { + keyIter.next(); // Skip the previous key record + } + } else if (!startPrefix.isEmpty()) { + // Scenario 3: startPrefix is provided but prevKey is empty, so seek to startPrefix + keyIter.seek(startPrefix); + } + + // Scenario 2: Both startPrefix and prevKey are empty (iterate from the start of the table) + // No seeking needed; just start iterating from the first record in the table + // This is implicit in the following loop, as the iterator will start from the beginning + + // Iterate through the keys while adhering to the limit (if the limit is not zero) + while (keyIter.hasNext() && matchedKeys.size() < actualLimit) { + Table.KeyValue entry = keyIter.next(); + String dbKey = entry.getKey(); + + // Scenario 3 & 4: If startPrefix is provided, ensure the key matches startPrefix + if (!startPrefix.isEmpty() && !dbKey.startsWith(startPrefix)) { + break; // If the key no longer matches the prefix, exit the loop + } + + // Add the valid key-value pair to the results + matchedKeys.put(dbKey, entry.getValue()); + } + } catch (IOException exception) { + log.error("Error retrieving keys from table for path: {}", startPrefix, exception); + throw exception; + } + return matchedKeys; + } + /** * Finds all subdirectories under a parent directory in an FSO bucket. It builds * a list of paths for these subdirectories. These sub-directories are then used diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/AccessHeatMapEndpoint.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/AccessHeatMapEndpoint.java index b0a9681c5b86..472cdb62a663 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/AccessHeatMapEndpoint.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/AccessHeatMapEndpoint.java @@ -65,13 +65,14 @@ public AccessHeatMapEndpoint(HeatMapServiceImpl heatMapService) { * with volume, buckets under that volume, * then directories, subdirectories and paths * under that bucket. - * E.g. -------->> + *

    +   * E.g. -------->>
        * vol1                           vol2
        * - bucket1                      - bucket2
        * - dir1/dir2/key1               - dir4/dir1/key1
        * - dir1/dir2/key2               - dir4/dir5/key2
        * - dir1/dir3/key1               - dir5/dir3/key1
    -   *
    +   * 
    * @return {@link Response} */ @GET diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/ContainerEndpoint.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/ContainerEndpoint.java index cbdc198f8aaf..33fc4fd96de3 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/ContainerEndpoint.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/ContainerEndpoint.java @@ -157,15 +157,15 @@ public ContainerEndpoint(OzoneStorageContainerManager reconSCM, } /** - * Return @{@link org.apache.hadoop.hdds.scm.container} + * Return {@code org.apache.hadoop.hdds.scm.container} * for the containers starting from the given "prev-key" query param for the * given "limit". The given "prev-key" is skipped from the results returned. * * @param prevKey the containerID after which results are returned. - * start containerID, >=0, + * start containerID, >=0, * start searching at the head if 0. * @param limit max no. of containers to get. - * count must be >= 0 + * count must be >= 0 * Usually the count will be replace with a very big * value instead of being unlimited in case the db is very big. * @return {@link Response} diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/NodeEndpoint.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/NodeEndpoint.java index 7f0efe97dd97..717a9d74f74e 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/NodeEndpoint.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/NodeEndpoint.java @@ -171,7 +171,6 @@ public Response getDatanodes() { .withVersion(nodeManager.getVersion(datanode)) .withSetupTime(nodeManager.getSetupTime(datanode)) .withRevision(nodeManager.getRevision(datanode)) - .withBuildDate(nodeManager.getBuildDate(datanode)) .withLayoutVersion( dnInfo.getLastKnownLayoutVersion().getMetadataLayoutVersion()) .withNetworkLocation(datanode.getNetworkLocation()) diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/OMDBInsightEndpoint.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/OMDBInsightEndpoint.java index 3f95c04fc916..d28275e54758 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/OMDBInsightEndpoint.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/OMDBInsightEndpoint.java @@ -66,19 +66,26 @@ import java.util.stream.Collectors; import java.util.stream.Stream; +import static org.apache.commons.lang3.StringUtils.isNotBlank; import static org.apache.hadoop.ozone.OzoneConsts.OM_KEY_PREFIX; import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.OPEN_FILE_TABLE; import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.OPEN_KEY_TABLE; import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.DELETED_TABLE; import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.DELETED_DIR_TABLE; import static org.apache.hadoop.ozone.recon.ReconConstants.DEFAULT_FETCH_COUNT; -import static org.apache.hadoop.ozone.recon.ReconConstants.DEFAULT_KEY_SIZE; -import static org.apache.hadoop.ozone.recon.ReconConstants.RECON_QUERY_LIMIT; -import static org.apache.hadoop.ozone.recon.ReconConstants.RECON_QUERY_PREVKEY; import static org.apache.hadoop.ozone.recon.ReconConstants.DEFAULT_OPEN_KEY_INCLUDE_FSO; import static org.apache.hadoop.ozone.recon.ReconConstants.DEFAULT_OPEN_KEY_INCLUDE_NON_FSO; import static org.apache.hadoop.ozone.recon.ReconConstants.RECON_OPEN_KEY_INCLUDE_FSO; import static org.apache.hadoop.ozone.recon.ReconConstants.RECON_OPEN_KEY_INCLUDE_NON_FSO; +import static org.apache.hadoop.ozone.recon.ReconConstants.RECON_QUERY_START_PREFIX; +import static org.apache.hadoop.ozone.recon.ReconConstants.DEFAULT_KEY_SIZE; +import static org.apache.hadoop.ozone.recon.ReconConstants.RECON_QUERY_LIMIT; +import static org.apache.hadoop.ozone.recon.ReconConstants.RECON_QUERY_PREVKEY; +import static org.apache.hadoop.ozone.recon.ReconResponseUtils.createBadRequestResponse; +import static org.apache.hadoop.ozone.recon.ReconResponseUtils.createInternalServerErrorResponse; +import static org.apache.hadoop.ozone.recon.ReconResponseUtils.noMatchedKeysResponse; +import static org.apache.hadoop.ozone.recon.ReconUtils.extractKeysFromTable; +import static org.apache.hadoop.ozone.recon.ReconUtils.validateStartPrefix; import static org.apache.hadoop.ozone.recon.api.handlers.BucketHandler.getBucketHandler; import static org.apache.hadoop.ozone.recon.api.handlers.EntityHandler.normalizePath; import static org.apache.hadoop.ozone.recon.api.handlers.EntityHandler.parseRequestPath; @@ -211,7 +218,7 @@ public Response getOpenKeyInfo( keyIter = openKeyTable.iterator()) { boolean skipPrevKey = false; String seekKey = prevKey; - if (!skipPrevKeyDone && StringUtils.isNotBlank(prevKey)) { + if (!skipPrevKeyDone && isNotBlank(prevKey)) { skipPrevKey = true; Table.KeyValue seekKeyValue = keyIter.seek(seekKey); @@ -219,7 +226,7 @@ public Response getOpenKeyInfo( // if not, then return empty result // In case of an empty prevKeyPrefix, all the keys are returned if (seekKeyValue == null || - (StringUtils.isNotBlank(prevKey) && + (isNotBlank(prevKey) && !seekKeyValue.getKey().equals(prevKey))) { continue; } @@ -235,6 +242,7 @@ public Response getOpenKeyInfo( continue; } KeyEntityInfo keyEntityInfo = new KeyEntityInfo(); + keyEntityInfo.setIsKey(omKeyInfo.isFile()); keyEntityInfo.setKey(key); keyEntityInfo.setPath(omKeyInfo.getKeyName()); keyEntityInfo.setInStateSince(omKeyInfo.getCreationTime()); @@ -339,62 +347,6 @@ private Long getValueFromId(GlobalStats record) { return record != null ? record.getValue() : 0L; } - private void getPendingForDeletionKeyInfo( - int limit, - String prevKey, - KeyInsightInfoResponse deletedKeyAndDirInsightInfo) { - List repeatedOmKeyInfoList = - deletedKeyAndDirInsightInfo.getRepeatedOmKeyInfoList(); - Table deletedTable = - omMetadataManager.getDeletedTable(); - try ( - TableIterator> - keyIter = deletedTable.iterator()) { - boolean skipPrevKey = false; - String seekKey = prevKey; - String lastKey = ""; - if (StringUtils.isNotBlank(prevKey)) { - skipPrevKey = true; - Table.KeyValue seekKeyValue = - keyIter.seek(seekKey); - // check if RocksDB was able to seek correctly to the given key prefix - // if not, then return empty result - // In case of an empty prevKeyPrefix, all the keys are returned - if (seekKeyValue == null || - (StringUtils.isNotBlank(prevKey) && - !seekKeyValue.getKey().equals(prevKey))) { - return; - } - } - while (keyIter.hasNext()) { - Table.KeyValue kv = keyIter.next(); - String key = kv.getKey(); - lastKey = key; - RepeatedOmKeyInfo repeatedOmKeyInfo = kv.getValue(); - // skip the prev key if prev key is present - if (skipPrevKey && key.equals(prevKey)) { - continue; - } - updateReplicatedAndUnReplicatedTotal(deletedKeyAndDirInsightInfo, - repeatedOmKeyInfo); - repeatedOmKeyInfoList.add(repeatedOmKeyInfo); - if ((repeatedOmKeyInfoList.size()) == limit) { - break; - } - } - deletedKeyAndDirInsightInfo.setLastKey(lastKey); - } catch (IOException ex) { - throw new WebApplicationException(ex, - Response.Status.INTERNAL_SERVER_ERROR); - } catch (IllegalArgumentException e) { - throw new WebApplicationException(e, Response.Status.BAD_REQUEST); - } catch (Exception ex) { - throw new WebApplicationException(ex, - Response.Status.INTERNAL_SERVER_ERROR); - } - } - /** Retrieves the summary of deleted keys. * * This method calculates and returns a summary of deleted keys. @@ -428,6 +380,7 @@ public Response getDeletedKeySummary() { * limit - limits the number of key/files returned. * prevKey - E.g. /vol1/bucket1/key1, this will skip keys till it * seeks correctly to the given prevKey. + * startPrefix - E.g. /vol1/bucket1, this will return keys matching this prefix. * Sample API Response: * { * "lastKey": "vol1/bucket1/key1", @@ -476,17 +429,90 @@ public Response getDeletedKeySummary() { @GET @Path("/deletePending") public Response getDeletedKeyInfo( - @DefaultValue(DEFAULT_FETCH_COUNT) @QueryParam(RECON_QUERY_LIMIT) - int limit, - @DefaultValue(StringUtils.EMPTY) @QueryParam(RECON_QUERY_PREVKEY) - String prevKey) { - KeyInsightInfoResponse - deletedKeyInsightInfo = new KeyInsightInfoResponse(); - getPendingForDeletionKeyInfo(limit, prevKey, - deletedKeyInsightInfo); + @DefaultValue(DEFAULT_FETCH_COUNT) @QueryParam(RECON_QUERY_LIMIT) int limit, + @DefaultValue(StringUtils.EMPTY) @QueryParam(RECON_QUERY_PREVKEY) String prevKey, + @DefaultValue(StringUtils.EMPTY) @QueryParam(RECON_QUERY_START_PREFIX) String startPrefix) { + + // Initialize the response object to hold the key information + KeyInsightInfoResponse deletedKeyInsightInfo = new KeyInsightInfoResponse(); + + boolean keysFound = false; + + try { + // Validate startPrefix if it's provided + if (isNotBlank(startPrefix) && !validateStartPrefix(startPrefix)) { + return createBadRequestResponse("Invalid startPrefix: Path must be at the bucket level or deeper."); + } + + // Perform the search based on the limit, prevKey, and startPrefix + keysFound = getPendingForDeletionKeyInfo(limit, prevKey, startPrefix, deletedKeyInsightInfo); + + } catch (IllegalArgumentException e) { + LOG.error("Invalid startPrefix provided: {}", startPrefix, e); + return createBadRequestResponse("Invalid startPrefix: " + e.getMessage()); + } catch (IOException e) { + LOG.error("I/O error while searching deleted keys in OM DB", e); + return createInternalServerErrorResponse("Error searching deleted keys in OM DB: " + e.getMessage()); + } catch (Exception e) { + LOG.error("Unexpected error occurred while searching deleted keys", e); + return createInternalServerErrorResponse("Unexpected error: " + e.getMessage()); + } + + if (!keysFound) { + return noMatchedKeysResponse(""); + } + return Response.ok(deletedKeyInsightInfo).build(); } + /** + * Retrieves keys pending deletion based on startPrefix, filtering keys matching the prefix. + * + * @param limit The limit of records to return. + * @param prevKey Pagination key. + * @param startPrefix The search prefix. + * @param deletedKeyInsightInfo The response object to populate. + */ + private boolean getPendingForDeletionKeyInfo( + int limit, String prevKey, String startPrefix, + KeyInsightInfoResponse deletedKeyInsightInfo) throws IOException { + + long replicatedTotal = 0; + long unreplicatedTotal = 0; + boolean keysFound = false; + String lastKey = null; + + // Search for deleted keys in DeletedTable + Table deletedTable = omMetadataManager.getDeletedTable(); + Map deletedKeys = + extractKeysFromTable(deletedTable, startPrefix, limit, prevKey); + + // Iterate over the retrieved keys and populate the response + for (Map.Entry entry : deletedKeys.entrySet()) { + keysFound = true; + RepeatedOmKeyInfo repeatedOmKeyInfo = entry.getValue(); + + // We know each RepeatedOmKeyInfo has just one OmKeyInfo object + OmKeyInfo keyInfo = repeatedOmKeyInfo.getOmKeyInfoList().get(0); + KeyEntityInfo keyEntityInfo = createKeyEntityInfoFromOmKeyInfo(entry.getKey(), keyInfo); + + // Add the key directly to the list without classification + deletedKeyInsightInfo.getRepeatedOmKeyInfoList().add(repeatedOmKeyInfo); + + replicatedTotal += keyInfo.getReplicatedSize(); + unreplicatedTotal += keyInfo.getDataSize(); + + lastKey = entry.getKey(); // Update lastKey + } + + // Set the aggregated totals in the response + deletedKeyInsightInfo.setReplicatedDataSize(replicatedTotal); + deletedKeyInsightInfo.setUnreplicatedDataSize(unreplicatedTotal); + deletedKeyInsightInfo.setLastKey(lastKey); + + return keysFound; + } + /** * Creates a keys summary for deleted keys and updates the provided * keysSummary map. Calculates the total number of deleted keys, replicated @@ -526,7 +552,7 @@ private void getPendingForDeletionDirInfo( boolean skipPrevKey = false; String seekKey = prevKey; String lastKey = ""; - if (StringUtils.isNotBlank(prevKey)) { + if (isNotBlank(prevKey)) { skipPrevKey = true; Table.KeyValue seekKeyValue = keyIter.seek(seekKey); @@ -534,7 +560,7 @@ private void getPendingForDeletionDirInfo( // if not, then return empty result // In case of an empty prevKeyPrefix, all the keys are returned if (seekKeyValue == null || - (StringUtils.isNotBlank(prevKey) && + (isNotBlank(prevKey) && !seekKeyValue.getKey().equals(prevKey))) { return; } @@ -549,6 +575,7 @@ private void getPendingForDeletionDirInfo( continue; } KeyEntityInfo keyEntityInfo = new KeyEntityInfo(); + keyEntityInfo.setIsKey(omKeyInfo.isFile()); keyEntityInfo.setKey(omKeyInfo.getFileName()); keyEntityInfo.setPath(createPath(omKeyInfo)); keyEntityInfo.setInStateSince(omKeyInfo.getCreationTime()); @@ -734,7 +761,7 @@ public Response getDeletedDirectorySummary() { * /volume1/fso-bucket/dir1/dir2/dir3/file1 * Input Request for OBS bucket: * - * `api/v1/keys/listKeys?startPrefix=/volume1/obs-bucket&limit=2&replicationType=RATIS` + * {@literal `api/v1/keys/listKeys?startPrefix=/volume1/obs-bucket&limit=2&replicationType=RATIS`} * Output Response: * * { @@ -832,7 +859,7 @@ public Response getDeletedDirectorySummary() { * } * Input Request for FSO bucket: * - * `api/v1/keys/listKeys?startPrefix=/volume1/fso-bucket&limit=2&replicationType=RATIS` + * {@literal `api/v1/keys/listKeys?startPrefix=/volume1/fso-bucket&limit=2&replicationType=RATIS`} * Output Response: * * { @@ -930,7 +957,6 @@ public Response getDeletedDirectorySummary() { * } * * ******************************************************** - * @throws IOException */ @GET @Path("/listKeys") @@ -960,7 +986,7 @@ public Response listKeys(@QueryParam("replicationType") String replicationType, limit, false, ""); Response response = getListKeysResponse(paramInfo); if ((response.getStatus() != Response.Status.OK.getStatusCode()) && - (response.getStatus() != Response.Status.NOT_FOUND.getStatusCode())) { + (response.getStatus() != Response.Status.NO_CONTENT.getStatusCode())) { return response; } if (response.getEntity() instanceof ListKeysResponse) { @@ -1161,7 +1187,7 @@ private Map retrieveKeysFromTable( try ( TableIterator> keyIter = table.iterator()) { - if (!paramInfo.isSkipPrevKeyDone() && StringUtils.isNotBlank(seekKey)) { + if (!paramInfo.isSkipPrevKeyDone() && isNotBlank(seekKey)) { skipPrevKey = true; Table.KeyValue seekKeyValue = keyIter.seek(seekKey); @@ -1258,6 +1284,7 @@ private KeyEntityInfo createKeyEntityInfoFromOmKeyInfo(String dbKey, OmKeyInfo keyInfo) throws IOException { KeyEntityInfo keyEntityInfo = new KeyEntityInfo(); keyEntityInfo.setKey(dbKey); // Set the DB key + keyEntityInfo.setIsKey(keyInfo.isFile()); keyEntityInfo.setPath(ReconUtils.constructFullPath(keyInfo, reconNamespaceSummaryManager, omMetadataManager)); keyEntityInfo.setSize(keyInfo.getDataSize()); @@ -1277,19 +1304,6 @@ private void createSummaryForDeletedDirectories( dirSummary.put("totalDeletedDirectories", deletedDirCount); } - private void updateReplicatedAndUnReplicatedTotal( - KeyInsightInfoResponse deletedKeyAndDirInsightInfo, - RepeatedOmKeyInfo repeatedOmKeyInfo) { - repeatedOmKeyInfo.getOmKeyInfoList().forEach(omKeyInfo -> { - deletedKeyAndDirInsightInfo.setUnreplicatedDataSize( - deletedKeyAndDirInsightInfo.getUnreplicatedDataSize() + - omKeyInfo.getDataSize()); - deletedKeyAndDirInsightInfo.setReplicatedDataSize( - deletedKeyAndDirInsightInfo.getReplicatedDataSize() + - omKeyInfo.getReplicatedSize()); - }); - } - private String createPath(OmKeyInfo omKeyInfo) { return omKeyInfo.getVolumeName() + OM_KEY_PREFIX + omKeyInfo.getBucketName() + OM_KEY_PREFIX + omKeyInfo.getKeyName(); diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/OMDBInsightSearchEndpoint.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/OMDBInsightSearchEndpoint.java index 9cd6fa33d032..fcd73fbe72f2 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/OMDBInsightSearchEndpoint.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/OMDBInsightSearchEndpoint.java @@ -20,12 +20,10 @@ import org.apache.hadoop.hdds.scm.server.OzoneStorageContainerManager; import org.apache.hadoop.hdds.utils.db.Table; -import org.apache.hadoop.hdds.utils.db.TableIterator; import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.helpers.BucketLayout; -import org.apache.hadoop.ozone.recon.ReconUtils; import org.apache.hadoop.ozone.recon.api.handlers.BucketHandler; import org.apache.hadoop.ozone.recon.api.types.KeyEntityInfo; import org.apache.hadoop.ozone.recon.api.types.KeyInsightInfoResponse; @@ -50,13 +48,16 @@ import java.util.ArrayList; import static org.apache.hadoop.ozone.OzoneConsts.OM_KEY_PREFIX; -import static org.apache.hadoop.ozone.recon.ReconConstants.DEFAULT_START_PREFIX; -import static org.apache.hadoop.ozone.recon.ReconConstants.RECON_OPEN_KEY_DEFAULT_SEARCH_LIMIT; -import static org.apache.hadoop.ozone.recon.ReconConstants.RECON_OPEN_KEY_SEARCH_DEFAULT_PREV_KEY; +import static org.apache.hadoop.ozone.recon.ReconConstants.RECON_OM_INSIGHTS_DEFAULT_START_PREFIX; +import static org.apache.hadoop.ozone.recon.ReconConstants.RECON_OM_INSIGHTS_DEFAULT_SEARCH_LIMIT; +import static org.apache.hadoop.ozone.recon.ReconConstants.RECON_OM_INSIGHTS_DEFAULT_SEARCH_PREV_KEY; import static org.apache.hadoop.ozone.recon.ReconResponseUtils.noMatchedKeysResponse; import static org.apache.hadoop.ozone.recon.ReconResponseUtils.createBadRequestResponse; import static org.apache.hadoop.ozone.recon.ReconResponseUtils.createInternalServerErrorResponse; +import static org.apache.hadoop.ozone.recon.ReconUtils.validateStartPrefix; import static org.apache.hadoop.ozone.recon.ReconUtils.constructObjectPathWithPrefix; +import static org.apache.hadoop.ozone.recon.ReconUtils.extractKeysFromTable; +import static org.apache.hadoop.ozone.recon.ReconUtils.gatherSubPaths; import static org.apache.hadoop.ozone.recon.ReconUtils.validateNames; import static org.apache.hadoop.ozone.recon.api.handlers.BucketHandler.getBucketHandler; import static org.apache.hadoop.ozone.recon.api.handlers.EntityHandler.normalizePath; @@ -64,6 +65,11 @@ /** * REST endpoint for search implementation in OM DB Insight. + * + * This class provides endpoints for searching keys in the Ozone Manager database. + * It supports searching for both open and deleted keys across File System Optimized (FSO) + * and Object Store (non-FSO) bucket layouts. The results include matching keys and their + * data sizes. */ @Path("/keys") @Produces(MediaType.APPLICATION_JSON) @@ -88,14 +94,14 @@ public OMDBInsightSearchEndpoint(OzoneStorageContainerManager reconSCM, /** - * Performs a search for open keys in the Ozone Manager (OM) database using a specified search prefix. + * Performs a search for open keys in the Ozone Manager OpenKey and OpenFile table using a specified search prefix. * This endpoint searches across both File System Optimized (FSO) and Object Store (non-FSO) layouts, * compiling a list of keys that match the given prefix along with their data sizes. - *

    + * * The search prefix must start from the bucket level ('/volumeName/bucketName/') or any specific directory * or key level (e.g., '/volA/bucketA/dir1' for everything under 'dir1' inside 'bucketA' of 'volA'). * The search operation matches the prefix against the start of keys' names within the OM DB. - *

    + * * Example Usage: * 1. A startPrefix of "/volA/bucketA/" retrieves every key under bucket 'bucketA' in volume 'volA'. * 2. Specifying "/volA/bucketA/dir1" focuses the search within 'dir1' inside 'bucketA' of 'volA'. @@ -110,25 +116,17 @@ public OMDBInsightSearchEndpoint(OzoneStorageContainerManager reconSCM, @GET @Path("/open/search") public Response searchOpenKeys( - @DefaultValue(DEFAULT_START_PREFIX) @QueryParam("startPrefix") + @DefaultValue(RECON_OM_INSIGHTS_DEFAULT_START_PREFIX) @QueryParam("startPrefix") String startPrefix, - @DefaultValue(RECON_OPEN_KEY_DEFAULT_SEARCH_LIMIT) @QueryParam("limit") + @DefaultValue(RECON_OM_INSIGHTS_DEFAULT_SEARCH_LIMIT) @QueryParam("limit") int limit, - @DefaultValue(RECON_OPEN_KEY_SEARCH_DEFAULT_PREV_KEY) @QueryParam("prevKey") String prevKey) throws IOException { + @DefaultValue(RECON_OM_INSIGHTS_DEFAULT_SEARCH_PREV_KEY) @QueryParam("prevKey") + String prevKey) throws IOException { try { - // Ensure startPrefix is not null or empty and starts with '/' - if (startPrefix == null || startPrefix.length() == 0) { - return createBadRequestResponse( - "Invalid startPrefix: Path must be at the bucket level or deeper."); - } - startPrefix = startPrefix.startsWith("/") ? startPrefix : "/" + startPrefix; - - // Split the path to ensure it's at least at the bucket level - String[] pathComponents = startPrefix.split("/"); - if (pathComponents.length < 3 || pathComponents[2].isEmpty()) { - return createBadRequestResponse( - "Invalid startPrefix: Path must be at the bucket level or deeper."); + // Validate the request parameters + if (!validateStartPrefix(startPrefix)) { + return createBadRequestResponse("Invalid startPrefix: Path must be at the bucket level or deeper."); } // Ensure the limit is non-negative @@ -145,7 +143,7 @@ public Response searchOpenKeys( Table openKeyTable = omMetadataManager.getOpenKeyTable(BucketLayout.LEGACY); Map obsKeys = - retrieveKeysFromTable(openKeyTable, startPrefix, limit, prevKey); + extractKeysFromTable(openKeyTable, startPrefix, limit, prevKey); for (Map.Entry entry : obsKeys.entrySet()) { keysFound = true; KeyEntityInfo keyEntityInfo = @@ -221,12 +219,13 @@ public Map searchOpenKeysInFSO(String startPrefix, subPaths.add(startPrefixObjectPath); // Recursively gather all subpaths - ReconUtils.gatherSubPaths(parentId, subPaths, Long.parseLong(names[0]), Long.parseLong(names[1]), + gatherSubPaths(parentId, subPaths, Long.parseLong(names[0]), Long.parseLong(names[1]), reconNamespaceSummaryManager); // Iterate over the subpaths and retrieve the open files for (String subPath : subPaths) { - matchedKeys.putAll(retrieveKeysFromTable(openFileTable, subPath, limit - matchedKeys.size(), prevKey)); + matchedKeys.putAll( + extractKeysFromTable(openFileTable, subPath, limit - matchedKeys.size(), prevKey)); if (matchedKeys.size() >= limit) { break; } @@ -235,7 +234,8 @@ public Map searchOpenKeysInFSO(String startPrefix, } // If the search level is at the volume, bucket or key level, directly search the openFileTable - matchedKeys.putAll(retrieveKeysFromTable(openFileTable, startPrefixObjectPath, limit, prevKey)); + matchedKeys.putAll( + extractKeysFromTable(openFileTable, startPrefixObjectPath, limit, prevKey)); return matchedKeys; } @@ -245,13 +245,15 @@ public Map searchOpenKeysInFSO(String startPrefix, * This method transforms a user-provided path (e.g., "volume/bucket/dir1") into * a database-friendly format ("/volumeID/bucketID/ParentId/") by replacing names * with their corresponding IDs. It simplifies database queries for FSO bucket operations. - * + *

    +   * {@code
        * Examples:
        * - Input: "volume/bucket/key" -> Output: "/volumeID/bucketID/parentDirID/key"
        * - Input: "volume/bucket/dir1" -> Output: "/volumeID/bucketID/dir1ID/"
        * - Input: "volume/bucket/dir1/key1" -> Output: "/volumeID/bucketID/dir1ID/key1"
        * - Input: "volume/bucket/dir1/dir2" -> Output: "/volumeID/bucketID/dir2ID/"
    -   *
    +   * }
    +   * 
    * @param prevKeyPrefix The path to be converted. * @return The object path as "/volumeID/bucketID/ParentId/" or an empty string if an error occurs. * @throws IOException If database access fails. @@ -325,48 +327,6 @@ public String convertToObjectPath(String prevKeyPrefix) throws IOException { return prevKeyPrefix; } - - /** - * Common method to retrieve keys from a table based on a search prefix and a limit. - * - * @param table The table to retrieve keys from. - * @param startPrefix The search prefix to match keys against. - * @param limit The maximum number of keys to retrieve. - * @param prevKey The key to start after for the next set of records. - * @return A map of keys and their corresponding OmKeyInfo objects. - * @throws IOException If there are problems accessing the table. - */ - private Map retrieveKeysFromTable( - Table table, String startPrefix, int limit, String prevKey) - throws IOException { - Map matchedKeys = new LinkedHashMap<>(); - try (TableIterator> keyIter = table.iterator()) { - // If a previous key is provided, seek to the previous key and skip it. - if (!prevKey.isEmpty()) { - keyIter.seek(prevKey); - if (keyIter.hasNext()) { - // Skip the previous key - keyIter.next(); - } - } else { - // If no previous key is provided, start from the search prefix. - keyIter.seek(startPrefix); - } - while (keyIter.hasNext() && matchedKeys.size() < limit) { - Table.KeyValue entry = keyIter.next(); - String dbKey = entry.getKey(); - if (!dbKey.startsWith(startPrefix)) { - break; // Exit the loop if the key no longer matches the prefix - } - matchedKeys.put(dbKey, entry.getValue()); - } - } catch (IOException exception) { - LOG.error("Error retrieving keys from table for path: {}", startPrefix, exception); - throw exception; - } - return matchedKeys; - } - /** * Creates a KeyEntityInfo object from an OmKeyInfo object and the corresponding key. * @@ -378,6 +338,7 @@ private KeyEntityInfo createKeyEntityInfoFromOmKeyInfo(String dbKey, OmKeyInfo keyInfo) { KeyEntityInfo keyEntityInfo = new KeyEntityInfo(); keyEntityInfo.setKey(dbKey); // Set the DB key + keyEntityInfo.setIsKey(keyInfo.isFile()); keyEntityInfo.setPath(keyInfo.getKeyName()); // Assuming path is the same as key name keyEntityInfo.setInStateSince(keyInfo.getCreationTime()); keyEntityInfo.setSize(keyInfo.getDataSize()); diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/BucketHandler.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/BucketHandler.java index 266caaa2d8e2..a2db616ec2fc 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/BucketHandler.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/BucketHandler.java @@ -118,7 +118,7 @@ public static String buildSubpath(String path, String nextLevel) { } /** - * Example: /vol1/buck1/a/b/c/d/e/file1.txt -> a/b/c/d/e/file1.txt. + * Example: {@literal /vol1/buck1/a/b/c/d/e/file1.txt -> a/b/c/d/e/file1.txt} . * @param names parsed request * @return key name */ diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/DatanodeMetadata.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/DatanodeMetadata.java index 06c20a963a2a..ec7ab6cf8eea 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/DatanodeMetadata.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/DatanodeMetadata.java @@ -84,10 +84,6 @@ public final class DatanodeMetadata { @JsonInclude(JsonInclude.Include.NON_NULL) private String revision; - @XmlElement(name = "buildDate") - @JsonInclude(JsonInclude.Include.NON_NULL) - private String buildDate; - @XmlElement(name = "layoutVersion") @JsonInclude(JsonInclude.Include.NON_DEFAULT) private int layoutVersion; @@ -110,7 +106,6 @@ private DatanodeMetadata(Builder builder) { this.version = builder.version; this.setupTime = builder.setupTime; this.revision = builder.revision; - this.buildDate = builder.buildDate; this.layoutVersion = builder.layoutVersion; this.networkLocation = builder.networkLocation; } @@ -167,10 +162,6 @@ public String getRevision() { return revision; } - public String getBuildDate() { - return buildDate; - } - public int getLayoutVersion() { return layoutVersion; } @@ -206,7 +197,6 @@ public static final class Builder { private String version; private long setupTime; private String revision; - private String buildDate; private int layoutVersion; private String networkLocation; @@ -282,11 +272,6 @@ public Builder withRevision(String revision) { return this; } - public Builder withBuildDate(String buildDate) { - this.buildDate = buildDate; - return this; - } - public Builder withLayoutVersion(int layoutVersion) { this.layoutVersion = layoutVersion; return this; diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/KeyEntityInfo.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/KeyEntityInfo.java index d7cd35991909..8a56cbbd33f0 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/KeyEntityInfo.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/KeyEntityInfo.java @@ -143,7 +143,7 @@ public boolean isKey() { return isKey; } - public void setKey(boolean key) { - isKey = key; + public void setIsKey(boolean isKey) { + this.isKey = isKey; } } diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/codec/NSSummaryCodec.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/codec/NSSummaryCodec.java index f3b273451a2d..aa6c5a765d1c 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/codec/NSSummaryCodec.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/codec/NSSummaryCodec.java @@ -58,6 +58,11 @@ private NSSummaryCodec() { // singleton } + @Override + public Class getTypeClass() { + return NSSummary.class; + } + @Override public byte[] toPersistedFormat(NSSummary object) throws IOException { Set childDirs = object.getChildDir(); diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/recovery/ReconOMMetadataManager.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/recovery/ReconOMMetadataManager.java index 1fc114eabd75..14ae997073c4 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/recovery/ReconOMMetadataManager.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/recovery/ReconOMMetadataManager.java @@ -109,7 +109,7 @@ List listBucketsUnderVolume( /** * Return the OzoneConfiguration instance used by Recon. - * @return + * @return OzoneConfiguration */ OzoneConfiguration getOzoneConfiguration(); diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ContainerReplicaHistoryList.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ContainerReplicaHistoryList.java index 5895d3e133c4..7afed9c1ce98 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ContainerReplicaHistoryList.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ContainerReplicaHistoryList.java @@ -34,11 +34,11 @@ * For Recon DB table definition. */ public class ContainerReplicaHistoryList { - private static final Codec CODEC - = new DelegatedCodec<>(Proto2Codec.get( - ContainerReplicaHistoryListProto.getDefaultInstance()), + private static final Codec CODEC = new DelegatedCodec<>( + Proto2Codec.get(ContainerReplicaHistoryListProto.getDefaultInstance()), ContainerReplicaHistoryList::fromProto, - ContainerReplicaHistoryList::toProto); + ContainerReplicaHistoryList::toProto, + ContainerReplicaHistoryList.class); public static Codec getCodec() { return CODEC; diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconNodeManager.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconNodeManager.java index 65a9530c5cac..2ebeafcccb90 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconNodeManager.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconNodeManager.java @@ -205,17 +205,6 @@ public String getRevision(DatanodeDetails datanodeDetails) { EMPTY_DATANODE_DETAILS).getRevision(); } - /** - * Returns the build date of the given node. - * - * @param datanodeDetails DatanodeDetails - * @return buildDate - */ - public String getBuildDate(DatanodeDetails datanodeDetails) { - return inMemDatanodeDetails.getOrDefault(datanodeDetails.getUuid(), - EMPTY_DATANODE_DETAILS).getBuildDate(); - } - @Override public void onMessage(CommandForDatanode commandForDatanode, EventPublisher ignored) { diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconSCMDBDefinition.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconSCMDBDefinition.java index 1ea2f7b13126..4970d5da915f 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconSCMDBDefinition.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconSCMDBDefinition.java @@ -32,27 +32,27 @@ /** * Recon SCM db file for ozone. */ -public class ReconSCMDBDefinition extends SCMDBDefinition { +public final class ReconSCMDBDefinition extends SCMDBDefinition { private static final Codec UUID_CODEC = new DelegatedCodec<>( StringCodec.get(), UUID::fromString, UUID::toString, - DelegatedCodec.CopyType.SHALLOW); + UUID.class, DelegatedCodec.CopyType.SHALLOW); public static final String RECON_SCM_DB_NAME = "recon-scm.db"; - public static final DBColumnFamilyDefinition - NODES = - new DBColumnFamilyDefinition( - "nodes", - UUID.class, - UUID_CODEC, - DatanodeDetails.class, - DatanodeDetails.getCodec()); + public static final DBColumnFamilyDefinition NODES + = new DBColumnFamilyDefinition<>("nodes", UUID_CODEC, DatanodeDetails.getCodec()); private static final Map> COLUMN_FAMILIES = DBColumnFamilyDefinition.newUnmodifiableMap( - new SCMDBDefinition().getMap(), NODES); + SCMDBDefinition.get().getMap(), NODES); - public ReconSCMDBDefinition() { + private static final ReconSCMDBDefinition INSTANCE = new ReconSCMDBDefinition(); + + public static ReconSCMDBDefinition get() { + return INSTANCE; + } + + private ReconSCMDBDefinition() { super(COLUMN_FAMILIES); } diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconStorageContainerManagerFacade.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconStorageContainerManagerFacade.java index c773187c4b1d..ea1a3440160f 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconStorageContainerManagerFacade.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconStorageContainerManagerFacade.java @@ -218,8 +218,7 @@ public ReconStorageContainerManagerFacade(OzoneConfiguration conf, this.scmStorageConfig = new ReconStorageConfig(conf, reconUtils); this.clusterMap = new NetworkTopologyImpl(conf); - this.dbStore = DBStoreBuilder - .createDBStore(ozoneConfiguration, new ReconSCMDBDefinition()); + this.dbStore = DBStoreBuilder.createDBStore(ozoneConfiguration, ReconSCMDBDefinition.get()); this.scmLayoutVersionManager = new HDDSLayoutVersionManager(scmStorageConfig.getLayoutVersion()); @@ -627,8 +626,7 @@ private void deleteOldSCMDB() throws IOException { private void initializeNewRdbStore(File dbFile) throws IOException { try { - DBStore newStore = createDBAndAddSCMTablesAndCodecs( - dbFile, new ReconSCMDBDefinition()); + final DBStore newStore = createDBAndAddSCMTablesAndCodecs(dbFile, ReconSCMDBDefinition.get()); Table nodeTable = ReconSCMDBDefinition.NODES.getTable(dbStore); Table newNodeTable = diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/ReconContainerMetadataManager.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/ReconContainerMetadataManager.java index 59957e116244..44595a43b794 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/ReconContainerMetadataManager.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/ReconContainerMetadataManager.java @@ -70,7 +70,7 @@ void batchStoreContainerKeyMapping(BatchOperation batch, Integer count) throws IOException; /** - * Store the containerID -> no. of keys count into the container DB store. + * Store the containerID -> no. of keys count into the container DB store. * * @param containerID the containerID. * @param count count of the keys within the given containerID. @@ -80,7 +80,7 @@ void batchStoreContainerKeyMapping(BatchOperation batch, void storeContainerKeyCount(Long containerID, Long count) throws IOException; /** - * Store the containerID -> no. of keys count into a batch. + * Store the containerID -> no. of keys count into a batch. * * @param batch the batch operation we store into * @param containerID the containerID. @@ -91,7 +91,7 @@ void batchStoreContainerKeyCounts(BatchOperation batch, Long containerID, Long count) throws IOException; /** - * Store the containerID -> ContainerReplicaWithTimestamp mapping to the + * Store the containerID -> ContainerReplicaWithTimestamp mapping to the * container DB store. * * @param containerID the containerID. @@ -159,7 +159,7 @@ Map getContainerReplicaHistory( * Get the stored key prefixes for the given containerId. * * @param containerId the given containerId. - * @return Map of Key prefix -> count. + * @return Map of Key prefix -> count. */ Map getKeyPrefixesForContainer( long containerId) throws IOException; @@ -170,19 +170,19 @@ Map getKeyPrefixesForContainer( * * @param containerId the given containerId. * @param prevKeyPrefix the key prefix to seek to and start scanning. - * @return Map of Key prefix -> count. + * @return Map of Key prefix -> count. */ Map getKeyPrefixesForContainer( long containerId, String prevKeyPrefix) throws IOException; /** * Get a Map of containerID, containerMetadata of Containers only for the - * given limit. If the limit is -1 or any integer <0, then return all + * given limit. If the limit is -1 or any integer < 0, then return all * the containers without any limit. * * @param limit the no. of containers to fetch. * @param prevContainer containerID after which the results are returned. - * @return Map of containerID -> containerMetadata. + * @return Map of containerID -> containerMetadata. * @throws IOException */ Map getContainers(int limit, long prevContainer) @@ -256,7 +256,7 @@ void commitBatchOperation(RDBBatchOperation rdbBatchOperation) * * @param prevKeyPrefix the key prefix to seek to and start scanning. * @param keyVersion the key version to seek - * @return Map of Key prefix -> count. + * @return Map of Key prefix -> count. */ Map getContainerForKeyPrefixes( String prevKeyPrefix, long keyVersion) throws IOException; diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/impl/ContainerKeyPrefixCodec.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/impl/ContainerKeyPrefixCodec.java index 01a630a52350..500c01bfde2e 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/impl/ContainerKeyPrefixCodec.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/impl/ContainerKeyPrefixCodec.java @@ -18,9 +18,6 @@ package org.apache.hadoop.ozone.recon.spi.impl; -import static org.apache.commons.compress.utils.CharsetNames.UTF_8; - -import java.io.IOException; import java.nio.ByteBuffer; import org.apache.commons.lang3.ArrayUtils; @@ -31,6 +28,8 @@ import com.google.common.base.Preconditions; import com.google.common.primitives.Longs; +import static java.nio.charset.StandardCharsets.UTF_8; + /** * Codec to serialize/deserialize {@link ContainerKeyPrefix}. */ @@ -51,8 +50,12 @@ private ContainerKeyPrefixCodec() { } @Override - public byte[] toPersistedFormat(ContainerKeyPrefix containerKeyPrefix) - throws IOException { + public Class getTypeClass() { + return ContainerKeyPrefix.class; + } + + @Override + public byte[] toPersistedFormat(ContainerKeyPrefix containerKeyPrefix) { Preconditions.checkNotNull(containerKeyPrefix, "Null object can't be converted to byte array."); byte[] containerIdBytes = Longs.toByteArray(containerKeyPrefix @@ -76,9 +79,7 @@ public byte[] toPersistedFormat(ContainerKeyPrefix containerKeyPrefix) } @Override - public ContainerKeyPrefix fromPersistedFormat(byte[] rawData) - throws IOException { - + public ContainerKeyPrefix fromPersistedFormat(byte[] rawData) { // First 8 bytes is the containerId. long containerIdFromDB = ByteBuffer.wrap(ArrayUtils.subarray( rawData, 0, Long.BYTES)).getLong(); diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/impl/KeyPrefixContainerCodec.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/impl/KeyPrefixContainerCodec.java index 7baca152b285..70b1d65837c2 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/impl/KeyPrefixContainerCodec.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/impl/KeyPrefixContainerCodec.java @@ -24,10 +24,9 @@ import org.apache.hadoop.hdds.utils.db.Codec; import org.apache.hadoop.ozone.recon.api.types.KeyPrefixContainer; -import java.io.IOException; import java.nio.ByteBuffer; -import static org.apache.commons.compress.utils.CharsetNames.UTF_8; +import static java.nio.charset.StandardCharsets.UTF_8; /** * Codec to serialize/deserialize {@link KeyPrefixContainer}. @@ -49,8 +48,12 @@ private KeyPrefixContainerCodec() { private static final String KEY_DELIMITER = "_"; @Override - public byte[] toPersistedFormat(KeyPrefixContainer keyPrefixContainer) - throws IOException { + public Class getTypeClass() { + return KeyPrefixContainer.class; + } + + @Override + public byte[] toPersistedFormat(KeyPrefixContainer keyPrefixContainer) { Preconditions.checkNotNull(keyPrefixContainer, "Null object can't be converted to byte array."); byte[] keyPrefixBytes = keyPrefixContainer.getKeyPrefix().getBytes(UTF_8); @@ -75,9 +78,7 @@ public byte[] toPersistedFormat(KeyPrefixContainer keyPrefixContainer) } @Override - public KeyPrefixContainer fromPersistedFormat(byte[] rawData) - throws IOException { - + public KeyPrefixContainer fromPersistedFormat(byte[] rawData) { // When reading from byte[], we can always expect to have the key, version // and version parts in the byte array. byte[] keyBytes = ArrayUtils.subarray(rawData, diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/impl/ReconContainerMetadataManagerImpl.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/impl/ReconContainerMetadataManagerImpl.java index 46b75e45fadb..42908a775a4a 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/impl/ReconContainerMetadataManagerImpl.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/impl/ReconContainerMetadataManagerImpl.java @@ -191,7 +191,7 @@ public void batchStoreContainerKeyMapping(BatchOperation batch, } /** - * Store the containerID -> no. of keys count into the container DB store. + * Store the containerID -> no. of keys count into the container DB store. * * @param containerID the containerID. * @param count count of the keys within the given containerID. @@ -204,7 +204,7 @@ public void storeContainerKeyCount(Long containerID, Long count) } /** - * Store the containerID -> no. of keys count into a batch. + * Store the containerID -> no. of keys count into a batch. * * @param batch the batch we store into * @param containerID the containerID. @@ -219,7 +219,7 @@ public void batchStoreContainerKeyCounts(BatchOperation batch, } /** - * Store the ContainerID -> ContainerReplicaHistory (container first and last + * Store the ContainerID -> ContainerReplicaHistory (container first and last * seen time) mapping to the container DB store. * * @param containerID the containerID. @@ -417,16 +417,16 @@ public Map getKeyPrefixesForContainer( } /** - * Iterate the DB to construct a Map of containerID -> containerMetadata + * Iterate the DB to construct a Map of containerID -> containerMetadata * only for the given limit from the given start key. The start containerID * is skipped from the result. * - * Return all the containers if limit < 0. + * Return all the containers if limit < 0. * * @param limit No of containers to get. * @param prevContainer containerID after which the * list of containers are scanned. - * @return Map of containerID -> containerMetadata. + * @return Map of containerID -> containerMetadata. * @throws IOException on failure. */ @Override diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/impl/ReconDBDefinition.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/impl/ReconDBDefinition.java index 8cb3b4188ed4..cde24d7570b0 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/impl/ReconDBDefinition.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/impl/ReconDBDefinition.java @@ -46,44 +46,34 @@ public ReconDBDefinition(String dbName) { CONTAINER_KEY = new DBColumnFamilyDefinition<>( "containerKeyTable", - ContainerKeyPrefix.class, ContainerKeyPrefixCodec.get(), - Integer.class, IntegerCodec.get()); public static final DBColumnFamilyDefinition KEY_CONTAINER = new DBColumnFamilyDefinition<>( "keyContainerTable", - KeyPrefixContainer.class, KeyPrefixContainerCodec.get(), - Integer.class, IntegerCodec.get()); public static final DBColumnFamilyDefinition CONTAINER_KEY_COUNT = new DBColumnFamilyDefinition<>( "containerKeyCountTable", - Long.class, LongCodec.get(), - Long.class, LongCodec.get()); public static final DBColumnFamilyDefinition REPLICA_HISTORY = new DBColumnFamilyDefinition( "replica_history", - Long.class, LongCodec.get(), - ContainerReplicaHistoryList.class, ContainerReplicaHistoryList.getCodec()); public static final DBColumnFamilyDefinition NAMESPACE_SUMMARY = new DBColumnFamilyDefinition( "namespaceSummaryTable", - Long.class, LongCodec.get(), - NSSummary.class, NSSummaryCodec.get()); // Container Replica History with bcsId tracking. @@ -91,9 +81,7 @@ public ReconDBDefinition(String dbName) { REPLICA_HISTORY_V2 = new DBColumnFamilyDefinition( "replica_history_v2", - Long.class, LongCodec.get(), - ContainerReplicaHistoryList.class, ContainerReplicaHistoryList.getCodec()); private static final Map> diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/ContainerKeyMapperTask.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/ContainerKeyMapperTask.java index fd5d88640805..bf34c9f89301 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/ContainerKeyMapperTask.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/ContainerKeyMapperTask.java @@ -57,7 +57,7 @@ /** * Class to iterate over the OM DB and populate the Recon container DB with - * the container -> Key reverse mapping. + * the container -> Key reverse mapping. */ public class ContainerKeyMapperTask implements ReconOmTask { @@ -81,8 +81,8 @@ public ContainerKeyMapperTask(ReconContainerMetadataManager } /** - * Read Key -> ContainerId data from OM snapshot DB and write reverse map - * (container, key) -> count to Recon Container DB. + * Read Key -> ContainerId data from OM snapshot DB and write reverse map + * (container, key) -> count to Recon Container DB. */ @Override public Pair reprocess(OMMetadataManager omMetadataManager) { diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/OMDBUpdatesHandler.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/OMDBUpdatesHandler.java index 41e6bf962a7e..d1f98c49bdcf 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/OMDBUpdatesHandler.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/OMDBUpdatesHandler.java @@ -49,14 +49,12 @@ public class OMDBUpdatesHandler extends ManagedWriteBatch.Handler { private OMMetadataManager omMetadataManager; private List omdbUpdateEvents = new ArrayList<>(); private Map> omdbLatestUpdateEvents = new HashMap<>(); - private OMDBDefinition omdbDefinition; - private OmUpdateEventValidator omUpdateEventValidator; + private final OMDBDefinition omdbDefinition = OMDBDefinition.get(); + private final OmUpdateEventValidator omUpdateEventValidator = new OmUpdateEventValidator(omdbDefinition); public OMDBUpdatesHandler(OMMetadataManager metadataManager) { omMetadataManager = metadataManager; tablesNames = metadataManager.getStore().getTableNames(); - omdbDefinition = new OMDBDefinition(); - omUpdateEventValidator = new OmUpdateEventValidator(omdbDefinition); } @Override diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/OmUpdateEventValidator.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/OmUpdateEventValidator.java index 3c7ce844e9cb..b5a690f5eb49 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/OmUpdateEventValidator.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/OmUpdateEventValidator.java @@ -23,7 +23,6 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import java.io.IOException; /** * OmUpdateEventValidator is a utility class for validating OMDBUpdateEvents @@ -48,7 +47,6 @@ public OmUpdateEventValidator(OMDBDefinition omdbDefinition) { * @param keyType the key type of the event. * @param action the action performed on the event. * @return true if the event is valid, false otherwise. - * @throws IOException if an I/O error occurs during the validation. */ public boolean isValidEvent(String tableName, Object actualValueType, diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/ReconOmTask.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/ReconOmTask.java index e904334bb318..2092d6a326c9 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/ReconOmTask.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/ReconOmTask.java @@ -35,14 +35,14 @@ public interface ReconOmTask { /** * Process a set of OM events on tables that the task is listening on. * @param events Set of events to be processed by the task. - * @return Pair of task name -> task success. + * @return Pair of task name -> task success. */ Pair process(OMUpdateEventBatch events); /** * Process a on tables that the task is listening on. * @param omMetadataManager OM Metadata manager instance. - * @return Pair of task name -> task success. + * @return Pair of task name -> task success. */ Pair reprocess(OMMetadataManager omMetadataManager); diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/ReconTaskController.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/ReconTaskController.java index d66a7279ccee..1a514ceb90bc 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/ReconTaskController.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/ReconTaskController.java @@ -53,7 +53,7 @@ void reInitializeTasks(ReconOMMetadataManager omMetadataManager) /** * Get set of registered tasks. - * @return Map of Task name -> Task. + * @return Map of Task name -> Task. */ Map getRegisteredTasks(); diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/api/db.json b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/api/db.json index 8cfb23ad685b..c6202027676e 100644 --- a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/api/db.json +++ b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/api/db.json @@ -1480,7 +1480,7 @@ "path": "/dummyVolume/dummyBucket", "size": 200000, "sizeWithReplica": -1, - "subPathCount": 5, + "subPathCount": 8, "subPaths": [ { "path": "/dummyVolume/dummyBucket/dir1", @@ -1923,7 +1923,7 @@ "actualReplicaCount": 2, "replicaDeltaCount": 1, "reason": null, - "keys": 1, + "keys": 4, "pipelineID": "a10ffab6-8ed5-414a-aaf5-79890ff3e8a1", "replicas": [ { @@ -1997,7 +1997,7 @@ "actualReplicaCount": 2, "replicaDeltaCount": 2, "reason": null, - "keys": 1, + "keys": 3, "pipelineID": "a10ffab6-8ed5-414a-aaf5-79890ff3e8a1", "replicas": [ { @@ -2071,7 +2071,7 @@ "actualReplicaCount": 2, "replicaDeltaCount": 2, "reason": null, - "keys": 1, + "keys": 2, "pipelineID": "a10ffab6-8ed5-414a-aaf5-79890ff3e8a1", "replicas": [ { @@ -2108,7 +2108,7 @@ "actualReplicaCount": 2, "replicaDeltaCount": 2, "reason": null, - "keys": 1, + "keys": 5, "pipelineID": "a10ffab6-8ed5-414a-aaf5-79890ff3e8a1", "replicas": [ { @@ -2145,7 +2145,7 @@ "actualReplicaCount": 2, "replicaDeltaCount": 2, "reason": null, - "keys": 1, + "keys": 3, "pipelineID": "a10ffab6-8ed5-414a-aaf5-79890ff3e8a1", "replicas": [ { @@ -2182,7 +2182,7 @@ "actualReplicaCount": 2, "replicaDeltaCount": 2, "reason": null, - "keys": 1, + "keys": 6, "pipelineID": "a10ffab6-8ed5-414a-aaf5-79890ff3e8a2", "replicas": [ { @@ -2219,7 +2219,7 @@ "actualReplicaCount": 2, "replicaDeltaCount": 2, "reason": null, - "keys": 1, + "keys": 2, "pipelineID": "a10ffab6-8ed5-414a-aaf5-79890ff3e8a3", "replicas": [ { diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/package.json b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/package.json index 141cdadcbe83..c2c046f11203 100644 --- a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/package.json +++ b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/package.json @@ -73,7 +73,7 @@ "msw": "1.3.3", "npm-run-all": "^4.1.5", "prettier": "^2.8.4", - "vite": "4.5.3", + "vite": "4.5.5", "vite-tsconfig-paths": "^3.6.0", "vitest": "^1.6.0" }, diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/pnpm-lock.yaml b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/pnpm-lock.yaml index 3c472d5f7903..dfdbc7cedcee 100644 --- a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/pnpm-lock.yaml +++ b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/pnpm-lock.yaml @@ -87,7 +87,7 @@ devDependencies: version: 5.62.0(eslint@7.32.0)(typescript@4.9.5) '@vitejs/plugin-react-swc': specifier: ^3.5.0 - version: 3.7.0(vite@4.5.3) + version: 3.7.0(vite@4.5.5) eslint: specifier: ^7.28.0 version: 7.32.0 @@ -113,11 +113,11 @@ devDependencies: specifier: ^2.8.4 version: 2.8.8 vite: - specifier: 4.5.3 - version: 4.5.3(less@3.13.1) + specifier: 4.5.5 + version: 4.5.5(less@3.13.1) vite-tsconfig-paths: specifier: ^3.6.0 - version: 3.6.0(vite@4.5.3) + version: 3.6.0(vite@4.5.5) vitest: specifier: ^1.6.0 version: 1.6.0(jsdom@24.1.3)(less@3.13.1) @@ -1464,13 +1464,13 @@ packages: eslint-visitor-keys: 3.4.3 dev: true - /@vitejs/plugin-react-swc@3.7.0(vite@4.5.3): + /@vitejs/plugin-react-swc@3.7.0(vite@4.5.5): resolution: {integrity: sha512-yrknSb3Dci6svCd/qhHqhFPDSw0QtjumcqdKMoNNzmOl5lMXTTiqzjWtG4Qask2HdvvzaNgSunbQGet8/GrKdA==} peerDependencies: vite: ^4 || ^5 dependencies: '@swc/core': 1.7.21 - vite: 4.5.3(less@3.13.1) + vite: 4.5.5(less@3.13.1) transitivePeerDependencies: - '@swc/helpers' dev: true @@ -1896,8 +1896,8 @@ packages: readable-stream: 3.6.2 dev: true - /body-parser@1.20.2: - resolution: {integrity: sha512-ml9pReCu3M61kGlqoTm2umSXTlRTuGTx0bfYj+uIUKKYycG5NtSbeetV3faSU6R7ajOPw0g/J1PvK4qNy7s5bA==} + /body-parser@1.20.3: + resolution: {integrity: sha512-7rAxByjUMqQ3/bHJy7D6OGXvx/MMc4IqBn/X0fcM1QUcAItpZrBEYhWGem+tzXH90c+G01ypMcYJBO9Y30203g==} engines: {node: '>= 0.8', npm: 1.2.8000 || >= 1.4.16} dependencies: bytes: 3.1.2 @@ -1908,7 +1908,7 @@ packages: http-errors: 2.0.0 iconv-lite: 0.4.24 on-finished: 2.4.1 - qs: 6.11.0 + qs: 6.13.0 raw-body: 2.5.2 type-is: 1.6.18 unpipe: 1.0.0 @@ -2601,6 +2601,11 @@ packages: engines: {node: '>= 0.8'} dev: true + /encodeurl@2.0.0: + resolution: {integrity: sha512-Q0n9HRi4m6JuGIV1eFlmvJB7ZEVxu93IrMyiMsGC0lrMJMWzRgx6WGquyfQgZVb31vhGgXnfmPNNXmxnOkRBrg==} + engines: {node: '>= 0.8'} + dev: true + /end-of-stream@1.4.4: resolution: {integrity: sha512-+uw1inIHVPQoaVuHzRyXd21icM+cnt4CzD5rW+NC1wjOUSTOs+Te7FOv7AhN7vS9x/oIyhLP5PR1H+phQAHu5Q==} dependencies: @@ -3025,36 +3030,36 @@ packages: - supports-color dev: true - /express@4.19.2: - resolution: {integrity: sha512-5T6nhjsT+EOMzuck8JjBHARTHfMht0POzlA60WV2pMD3gyXw2LZnZ+ueGdNxG+0calOJcWKbpFcuzLZ91YWq9Q==} + /express@4.21.0: + resolution: {integrity: sha512-VqcNGcj/Id5ZT1LZ/cfihi3ttTn+NJmkli2eZADigjq29qTlWi/hAQ43t/VLPq8+UX06FCEx3ByOYet6ZFblng==} engines: {node: '>= 0.10.0'} dependencies: accepts: 1.3.8 array-flatten: 1.1.1 - body-parser: 1.20.2 + body-parser: 1.20.3 content-disposition: 0.5.4 content-type: 1.0.5 cookie: 0.6.0 cookie-signature: 1.0.6 debug: 2.6.9 depd: 2.0.0 - encodeurl: 1.0.2 + encodeurl: 2.0.0 escape-html: 1.0.3 etag: 1.8.1 - finalhandler: 1.2.0 + finalhandler: 1.3.1 fresh: 0.5.2 http-errors: 2.0.0 - merge-descriptors: 1.0.1 + merge-descriptors: 1.0.3 methods: 1.1.2 on-finished: 2.4.1 parseurl: 1.3.3 - path-to-regexp: 0.1.7 + path-to-regexp: 0.1.10 proxy-addr: 2.0.7 - qs: 6.11.0 + qs: 6.13.0 range-parser: 1.2.1 safe-buffer: 5.2.1 - send: 0.18.0 - serve-static: 1.15.0 + send: 0.19.0 + serve-static: 1.16.2 setprototypeof: 1.2.0 statuses: 2.0.1 type-is: 1.6.18 @@ -3145,12 +3150,12 @@ packages: to-regex-range: 5.0.1 dev: true - /finalhandler@1.2.0: - resolution: {integrity: sha512-5uXcUVftlQMFnWC9qu/svkWv3GTd2PfUhK/3PLkYNAe7FbqJMt3515HaxE6eRL74GdsriiwujiawdaB1BpEISg==} + /finalhandler@1.3.1: + resolution: {integrity: sha512-6BN9trH7bp3qvnrRyzsBz+g3lZxTNZTbVO2EV1CS0WIcDbawYVdYvGflME/9QP0h0pYlCDBCTjYa9nZzMDpyxQ==} engines: {node: '>= 0.8'} dependencies: debug: 2.6.9 - encodeurl: 1.0.2 + encodeurl: 2.0.0 escape-html: 1.0.3 on-finished: 2.4.1 parseurl: 1.3.3 @@ -4079,13 +4084,13 @@ packages: engines: {node: '>=8'} hasBin: true dependencies: - body-parser: 1.20.2 + body-parser: 1.20.3 chalk: 2.4.2 compression: 1.7.4 connect-pause: 0.1.1 cors: 2.8.5 errorhandler: 1.5.1 - express: 4.19.2 + express: 4.21.0 express-urlrewrite: 1.4.0 json-parse-helpfulerror: 1.0.3 lodash: 4.17.21 @@ -4316,8 +4321,8 @@ packages: engines: {node: '>= 0.10.0'} dev: true - /merge-descriptors@1.0.1: - resolution: {integrity: sha512-cCi6g3/Zr1iqQi6ySbseM1Xvooa98N0w31jzUYrXPX2xqObmFGHJ0tQ5u74H3mVh7wLouTseZyYIq39g8cNp1w==} + /merge-descriptors@1.0.3: + resolution: {integrity: sha512-gaNvAS7TZ897/rVaZ0nMtAyxNyi/pdbjbAwUpFQpN70GqnVfOiXpeUUMKRBmzXaSQ8DdTX4/0ms62r2K+hE6mQ==} dev: true /merge-stream@2.0.0: @@ -4851,8 +4856,8 @@ packages: minipass: 7.1.2 dev: true - /path-to-regexp@0.1.7: - resolution: {integrity: sha512-5DFkuoqlv1uYQKxy8omFBeJPQcdoE07Kv2sferDCrAq1ohOU+MSDswDIbnx3YAM60qIOnYa53wBhXW0EbMonrQ==} + /path-to-regexp@0.1.10: + resolution: {integrity: sha512-7lf7qcQidTku0Gu3YDPc8DJ1q7OOucfa/BSsIwjuh56VU7katFvuM8hULfkwB3Fns/rsVF7PwPKVw1sl5KQS9w==} dev: true /path-to-regexp@1.8.0: @@ -5047,8 +5052,8 @@ packages: engines: {node: '>=6'} dev: true - /qs@6.11.0: - resolution: {integrity: sha512-MvjoMCJwEarSbUYk5O+nmoSzSutSsTwF85zcHPQ9OrlFoZOYIjaqBAJIqIXjptyD5vThxGq52Xu/MaJzRkIk4Q==} + /qs@6.13.0: + resolution: {integrity: sha512-+38qI9SOr8tfZ4QmJNplMUxqjbe7LKvvZgWdExBOmd+egZTtjLB67Gu0HRX3u/XOq7UU2Nx6nsjvS16Z9uwfpg==} engines: {node: '>=0.6'} dependencies: side-channel: 1.0.6 @@ -5856,8 +5861,8 @@ packages: glob: 7.2.3 dev: true - /rollup@3.29.4: - resolution: {integrity: sha512-oWzmBZwvYrU0iJHtDmhsm662rC15FRXmcjCk1xD771dFDx5jJ02ufAQQTn0etB2emNk4J9EZg/yWKpsn9BWGRw==} + /rollup@3.29.5: + resolution: {integrity: sha512-GVsDdsbJzzy4S/v3dqWPJ7EfvZJfCHiDqe80IyrF59LYuP+e6U1LJoUqeuqRbwAWoMNoXivMNeNAOf5E22VA1w==} engines: {node: '>=14.18.0', npm: '>=8.0.0'} hasBin: true optionalDependencies: @@ -5991,8 +5996,8 @@ packages: hasBin: true dev: true - /send@0.18.0: - resolution: {integrity: sha512-qqWzuOjSFOuqPjFe4NOsMLafToQQwBSOEpS+FwEt3A2V3vKubTquT3vmLTQpFgMXp8AlFWFuP1qKaJZOtPpVXg==} + /send@0.19.0: + resolution: {integrity: sha512-dW41u5VfLXu8SJh5bwRmyYUbAoSB3c9uQh6L8h/KtsFREPWpbX1lrljJo186Jc4nmci/sGUZ9a0a0J2zgfq2hw==} engines: {node: '>= 0.8.0'} dependencies: debug: 2.6.9 @@ -6012,14 +6017,14 @@ packages: - supports-color dev: true - /serve-static@1.15.0: - resolution: {integrity: sha512-XGuRDNjXUijsUL0vl6nSD7cwURuzEgglbOaFuZM9g3kwDXOWVTck0jLzjPzGD+TazWbboZYu52/9/XPdUgne9g==} + /serve-static@1.16.2: + resolution: {integrity: sha512-VqpjJZKadQB/PEbEwvFdO43Ax5dFBZ2UECszz8bQ7pi7wt//PWe1P6MN7eCnjsatYtBT6EuiClbjSWP2WrIoTw==} engines: {node: '>= 0.8.0'} dependencies: - encodeurl: 1.0.2 + encodeurl: 2.0.0 escape-html: 1.0.3 parseurl: 1.3.3 - send: 0.18.0 + send: 0.19.0 transitivePeerDependencies: - supports-color dev: true @@ -6812,7 +6817,7 @@ packages: - terser dev: true - /vite-tsconfig-paths@3.6.0(vite@4.5.3): + /vite-tsconfig-paths@3.6.0(vite@4.5.5): resolution: {integrity: sha512-UfsPYonxLqPD633X8cWcPFVuYzx/CMNHAjZTasYwX69sXpa4gNmQkR0XCjj82h7zhLGdTWagMjC1qfb9S+zv0A==} peerDependencies: vite: '>2.0.0-0' @@ -6821,13 +6826,13 @@ packages: globrex: 0.1.2 recrawl-sync: 2.2.3 tsconfig-paths: 4.2.0 - vite: 4.5.3(less@3.13.1) + vite: 4.5.5(less@3.13.1) transitivePeerDependencies: - supports-color dev: true - /vite@4.5.3(less@3.13.1): - resolution: {integrity: sha512-kQL23kMeX92v3ph7IauVkXkikdDRsYMGTVl5KY2E9OY4ONLvkHf04MDTbnfo6NKxZiDLWzVpP5oTa8hQD8U3dg==} + /vite@4.5.5(less@3.13.1): + resolution: {integrity: sha512-ifW3Lb2sMdX+WU91s3R0FyQlAyLxOzCSCP37ujw0+r5POeHPwe6udWVIElKQq8gk3t7b8rkmvqC6IHBpCff4GQ==} engines: {node: ^14.18.0 || >=16.0.0} hasBin: true peerDependencies: @@ -6857,7 +6862,7 @@ packages: esbuild: 0.18.20 less: 3.13.1 postcss: 8.4.41 - rollup: 3.29.4 + rollup: 3.29.5 optionalDependencies: fsevents: 2.3.3 dev: true diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/app.less b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/app.less index 1895cabc184e..44f53fa9d47d 100644 --- a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/app.less +++ b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/app.less @@ -164,4 +164,9 @@ body { .pointer { cursor: pointer; +} + +.data-container { + padding: 24px; + height: 80vh; } \ No newline at end of file diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/components/autoReloadPanel/autoReloadPanel.tsx b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/components/autoReloadPanel/autoReloadPanel.tsx index 0230d4dd61dc..6b2bab246b72 100644 --- a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/components/autoReloadPanel/autoReloadPanel.tsx +++ b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/components/autoReloadPanel/autoReloadPanel.tsx @@ -76,10 +76,11 @@ class AutoReloadPanel extends React.Component { ); const lastUpdatedDeltaFullText = lastUpdatedOMDBDelta === 0 || lastUpdatedOMDBDelta === undefined || lastUpdatedOMDBFull === 0 || lastUpdatedOMDBFull === undefined ? '' : + //omSyncLoad should be clickable at all times. If the response from the dbsync is false it will show DB update is already running else show triggered sync ( <>   | DB Synced at {lastUpdatedDeltaFullToolTip} -  )} + + ); + }); + breadCrumbs[breadCrumbs.length - 1] = generateSubMenu(currPath[currPath.length - 1]); + return breadCrumbs; + } + + return ( + } + className='breadcrumb-nav'> + {generateBreadCrumbs()} + + ) +} + +export default DUBreadcrumbNav; \ No newline at end of file diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/components/duMetadata/duMetadata.tsx b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/components/duMetadata/duMetadata.tsx new file mode 100644 index 000000000000..f2c740f7dbcb --- /dev/null +++ b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/components/duMetadata/duMetadata.tsx @@ -0,0 +1,389 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import React, { useRef, useState } from 'react'; +import moment from 'moment'; +import { AxiosError } from 'axios'; +import { Table } from 'antd'; + +import { AxiosGetHelper, cancelRequests } from '@/utils/axiosRequestHelper'; +import { byteToSize, showDataFetchError } from '@/utils/common'; + +import { Acl } from '@/v2/types/acl.types'; + + +// ------------- Types -------------- // +type CountStats = { + numBucket: number; + numDir: number; + numKey: number; + numVolume: number; +}; + +type LocationInfo = { + blockID: { + containerBlockID: { + containerID: number; + localID: number; + }; + blockCommitSequenceId: number; + containerID: number; + localID: number; + }; + length: number; + offset: number; + token: null; + createVersion: number; + pipeline: null; + partNumber: number; + containerID: number; + localID: number; + blockCommitSequenceId: number; +}; + +type ObjectInfo = { + bucketName: string; + bucketLayout: string; + encInfo: null; + fileName: string; + keyName: string; + name: string; + owner: string; + volume: string; + volumeName: string; + sourceVolume: string | null; + sourceBucket: string | null; + usedBytes: number | null; + usedNamespace: number; + storageType: string; + creationTime: number; + dataSize: number; + modificationTime: number; + quotaInBytes: number; + quotaInNamespace: number; +} + +type ReplicationConfig = { + replicationFactor: string; + requiredNodes: number; + replicationType: string; +} + +type ObjectInfoResponse = ObjectInfo & { + acls: Acl[]; + versioningEnabled: boolean; + metadata: Record; + file: boolean; + keyLocationVersions: { + version: number; + locationList: LocationInfo[]; + multipartKey: boolean; + blocksLatestVersionOnly: LocationInfo[]; + locationLists: LocationInfo[][]; + locationListCount: number; + }[]; + versioning: boolean; + encryptionInfo: null; + replicationConfig: ReplicationConfig; +}; + +type SummaryResponse = { + countStats: CountStats; + objectInfo: ObjectInfoResponse; + path: string; + status: string; + type: string; +} + +type MetadataProps = { + path: string; +}; + +type MetadataState = { + keys: string[]; + values: (string | number | boolean | null)[]; +}; + + +// ------------- Component -------------- // +const DUMetadata: React.FC = ({ + path = '/' +}) => { + const [loading, setLoading] = useState(false); + const [state, setState] = useState({ + keys: [], + values: [] + }); + const cancelSummarySignal = useRef(); + const keyMetadataSummarySignal = useRef(); + const cancelQuotaSignal = useRef(); + + const getObjectInfoMapping = React.useCallback((summaryResponse) => { + + const keys: string[] = []; + const values: (string | number | boolean | null)[] = []; + /** + * We are creating a specific set of keys under Object Info response + * which do not require us to modify anything + */ + const selectedInfoKeys = [ + 'bucketName', 'bucketLayout', 'encInfo', 'fileName', 'keyName', + 'name', 'owner', 'sourceBucket', 'sourceVolume', 'storageType', + 'usedNamespace', 'volumeName', 'volume' + ] as const; + const objectInfo: ObjectInfo = summaryResponse.objectInfo ?? {}; + + selectedInfoKeys.forEach((key) => { + if (objectInfo[key as keyof ObjectInfo] !== undefined && objectInfo[key as keyof ObjectInfo] !== -1) { + // We will use regex to convert the Object key from camel case to space separated title + // The following regex will match abcDef and produce Abc Def + let keyName = key.replace(/([a-z0-9])([A-Z])/g, '$1 $2'); + keyName = keyName.charAt(0).toUpperCase() + keyName.slice(1); + keys.push(keyName); + values.push(objectInfo[key as keyof ObjectInfo]); + } + }); + + if (objectInfo?.creationTime !== undefined && objectInfo?.creationTime !== -1) { + keys.push('Creation Time'); + values.push(moment(objectInfo.creationTime).format('ll LTS')); + } + + if (objectInfo?.usedBytes !== undefined && objectInfo?.usedBytes !== -1 && objectInfo!.usedBytes !== null) { + keys.push('Used Bytes'); + values.push(byteToSize(objectInfo.usedBytes, 3)); + } + + if (objectInfo?.dataSize !== undefined && objectInfo?.dataSize !== -1) { + keys.push('Data Size'); + values.push(byteToSize(objectInfo.dataSize, 3)); + } + + if (objectInfo?.modificationTime !== undefined && objectInfo?.modificationTime !== -1) { + keys.push('Modification Time'); + values.push(moment(objectInfo.modificationTime).format('ll LTS')); + } + + if (objectInfo?.quotaInBytes !== undefined && objectInfo?.quotaInBytes !== -1) { + keys.push('Quota In Bytes'); + values.push(byteToSize(objectInfo.quotaInBytes, 3)); + } + + if (objectInfo?.quotaInNamespace !== undefined && objectInfo?.quotaInNamespace !== -1) { + keys.push('Quota In Namespace'); + values.push(byteToSize(objectInfo.quotaInNamespace, 3)); + } + + if (summaryResponse.objectInfo?.replicationConfig?.replicationFactor !== undefined) { + keys.push('Replication Factor'); + values.push(summaryResponse.objectInfo.replicationConfig.replicationFactor); + } + + if (summaryResponse.objectInfo?.replicationConfig?.replicationType !== undefined) { + keys.push('Replication Type'); + values.push(summaryResponse.objectInfo.replicationConfig.replicationType); + } + + if (summaryResponse.objectInfo?.replicationConfig?.requiredNodes !== undefined + && summaryResponse.objectInfo?.replicationConfig?.requiredNodes !== -1) { + keys.push('Replication Required Nodes'); + values.push(summaryResponse.objectInfo.replicationConfig.requiredNodes); + } + + return { keys, values } + }, [path]); + + function loadMetadataSummary(path: string) { + cancelRequests([ + cancelSummarySignal.current!, + keyMetadataSummarySignal.current! + ]); + const keys: string[] = []; + const values: (string | number | boolean | null)[] = []; + + const { request, controller } = AxiosGetHelper( + `/api/v1/namespace/summary?path=${path}`, + cancelSummarySignal.current + ); + cancelSummarySignal.current = controller; + + request.then(response => { + const summaryResponse: SummaryResponse = response.data; + keys.push('Entity Type'); + values.push(summaryResponse.type); + + if (summaryResponse.status === 'INITIALIZING') { + showDataFetchError(`The metadata is currently initializing. Please wait a moment and try again later`); + return; + } + + if (summaryResponse.status === 'PATH_NOT_FOUND') { + showDataFetchError(`Invalid Path: ${path}`); + return; + } + + // If the entity is a Key then fetch the Key metadata only + if (summaryResponse.type === 'KEY') { + const { request: metadataRequest, controller: metadataNewController } = AxiosGetHelper( + `/api/v1/namespace/du?path=${path}&replica=true`, + keyMetadataSummarySignal.current + ); + keyMetadataSummarySignal.current = metadataNewController; + metadataRequest.then(response => { + keys.push('File Size'); + values.push(byteToSize(response.data.size, 3)); + keys.push('File Size With Replication'); + values.push(byteToSize(response.data.sizeWithReplica, 3)); + keys.push("Creation Time"); + values.push(moment(summaryResponse.objectInfo.creationTime).format('ll LTS')); + keys.push("Modification Time"); + values.push(moment(summaryResponse.objectInfo.modificationTime).format('ll LTS')); + + setState({ + keys: keys, + values: values + }); + }).catch(error => { + showDataFetchError(error.toString()); + }); + return; + } + + /** + * Will iterate over the keys of the countStats to avoid multiple if blocks + * and check from the map for the respective key name / title to insert + */ + const countStats: CountStats = summaryResponse.countStats ?? {}; + const keyToNameMap: Record = { + numVolume: 'Volumes', + numBucket: 'Buckets', + numDir: 'Total Directories', + numKey: 'Total Keys' + } + Object.keys(countStats).forEach((key: string) => { + if (countStats[key as keyof CountStats] !== undefined + && countStats[key as keyof CountStats] !== -1) { + keys.push(keyToNameMap[key]); + values.push(countStats[key as keyof CountStats]); + } + }) + + const { + keys: objectInfoKeys, + values: objectInfoValues + } = getObjectInfoMapping(summaryResponse); + + keys.push(...objectInfoKeys); + values.push(...objectInfoValues); + + setState({ + keys: keys, + values: values + }); + }).catch(error => { + showDataFetchError((error as AxiosError).toString()); + }); + } + + function loadQuotaSummary(path: string) { + cancelRequests([ + cancelQuotaSignal.current! + ]); + + const { request, controller } = AxiosGetHelper( + `/api/v1/namespace/quota?path=${path}`, + cancelQuotaSignal.current + ); + cancelQuotaSignal.current = controller; + + request.then(response => { + const quotaResponse = response.data; + + if (quotaResponse.status === 'INITIALIZING') { + return; + } + if (quotaResponse.status === 'TYPE_NOT_APPLICABLE') { + return; + } + if (quotaResponse.status === 'PATH_NOT_FOUND') { + showDataFetchError(`Invalid Path: ${path}`); + return; + } + + const keys: string[] = []; + const values: (string | number | boolean | null)[] = []; + // Append quota information + // In case the object's quota isn't set + if (quotaResponse.allowed !== undefined && quotaResponse.allowed !== -1) { + keys.push('Quota Allowed'); + values.push(byteToSize(quotaResponse.allowed, 3)); + } + + if (quotaResponse.used !== undefined && quotaResponse.used !== -1) { + keys.push('Quota Used'); + values.push(byteToSize(quotaResponse.used, 3)); + } + setState((prevState) => ({ + keys: [...prevState.keys, ...keys], + values: [...prevState.values, ...values] + })); + }).catch(error => { + showDataFetchError(error.toString()); + }); + } + + React.useEffect(() => { + setLoading(true); + loadMetadataSummary(path); + loadQuotaSummary(path); + setLoading(false); + + return (() => { + cancelRequests([ + cancelSummarySignal.current!, + keyMetadataSummarySignal.current!, + cancelQuotaSignal.current! + ]); + }) + }, [path]); + + const content = []; + for (const [i, v] of state.keys.entries()) { + content.push({ + key: v, + value: state.values[i] + }); + } + + return ( + + + +
    + ); +} + +export default DUMetadata; \ No newline at end of file diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/components/duPieChart/duPieChart.tsx b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/components/duPieChart/duPieChart.tsx new file mode 100644 index 000000000000..2601905a142e --- /dev/null +++ b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/components/duPieChart/duPieChart.tsx @@ -0,0 +1,211 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import React from 'react'; + +import EChart from '@/v2/components/eChart/eChart'; +import { byteToSize } from '@/utils/common'; +import { DUSubpath } from '@/v2/types/diskUsage.types'; + +//-------Types--------// +type PieChartProps = { + path: string; + limit: number; + size: number; + subPaths: DUSubpath[]; + subPathCount: number; + sizeWithReplica: number; + loading: boolean; +} + +//-------Constants---------// +const OTHER_PATH_NAME = 'Other Objects'; +const MIN_BLOCK_SIZE = 0.05; + + +//----------Component---------// +const DUPieChart: React.FC = ({ + path, + limit, + size, + subPaths, + subPathCount, + sizeWithReplica, + loading +}) => { + + const [subpathSize, setSubpathSize] = React.useState(0); + + function getSubpathSize(subpaths: DUSubpath[]): number { + const subpathSize = subpaths + .map((subpath) => subpath.size) + .reduce((acc, curr) => acc + curr, 0); + // If there is no subpaths, then the size will be total size of path + return (subPaths.length === 0) ? size : subpathSize; + } + + function updatePieData() { + /** + * We need to calculate the size of "Other objects" in two cases: + * + * 1) If we have more subpaths listed, than the limit. + * 2) If the limit is set to the maximum limit (30) and we have any number of subpaths. + * In this case we won't necessarily have "Other objects", but we check if the + * other objects's size is more than zero (we will have other objects if there are more than 30 subpaths, + * but we can't check on that, as the response will always have + * 30 subpaths, but from the total size and the subpaths size we can calculate it). + */ + let subpaths: DUSubpath[] = subPaths; + + let pathLabels: string[] = []; + let percentage: string[] = []; + let sizeStr: string[]; + let valuesWithMinBlockSize: number[] = []; + + if (subPathCount > limit) { + // If the subpath count is greater than the provided limit + // Slice the subpath to the limit + subpaths = subpaths.slice(0, limit); + // Add the size of the subpath + const limitedSize = getSubpathSize(subpaths); + const remainingSize = size - limitedSize; + subpaths.push({ + path: OTHER_PATH_NAME, + size: remainingSize, + sizeWithReplica: (sizeWithReplica === -1) + ? -1 + : sizeWithReplica - remainingSize, + isKey: false + }) + } + + if (subPathCount === 0 || subpaths.length === 0) { + // No more subpaths available + pathLabels = [path.split('/').pop() ?? '']; + valuesWithMinBlockSize = [0.1]; + percentage = ['100.00']; + sizeStr = [byteToSize(size, 1)]; + } else { + pathLabels = subpaths.map(subpath => { + const subpathName = subpath.path.split('/').pop() ?? ''; + // Diferentiate keys by removing trailing slash + return (subpath.isKey || subpathName === OTHER_PATH_NAME) + ? subpathName + : subpathName + '/'; + }); + + let values: number[] = [0]; + if (size > 0) { + values = subpaths.map( + subpath => (subpath.size / size) + ); + } + const valueClone = structuredClone(values); + valuesWithMinBlockSize = valueClone?.map( + (val: number) => (val > 0) + ? val + MIN_BLOCK_SIZE + : val + ); + + percentage = values.map(value => (value * 100).toFixed(2)); + sizeStr = subpaths.map((subpath) => byteToSize(subpath.size, 1)); + } + + return valuesWithMinBlockSize.map((key, idx) => { + return { + value: key, + name: pathLabels[idx], + size: sizeStr[idx], + percentage: percentage[idx] + } + }); + } + + React.useEffect(() => { + setSubpathSize(getSubpathSize(subPaths)); + }, [subPaths, limit]); + + const pieData = React.useMemo(() => updatePieData(), [path, subPaths, limit]); + + const eChartsOptions = { + title: { + text: `${byteToSize(subpathSize, 1)} / ${byteToSize(size, 1)}`, + left: 'center', + top: '95%' + }, + tooltip: { + trigger: 'item', + formatter: ({ dataIndex, name, color }) => { + const nameEl = `${name}
    `; + const dataEl = `Total Data Size: ${pieData[dataIndex]['size']}
    ` + const percentageEl = `Percentage: ${pieData[dataIndex]['percentage']} %` + return `${nameEl}${dataEl}${percentageEl}` + } + }, + legend: { + top: '10%', + orient: 'vertical', + left: '0%', + width: '80%' + }, + grid: { + + }, + series: [ + { + type: 'pie', + radius: '70%', + data: pieData.map((value) => { + return { + value: value.value, + name: value.name + } + }), + emphasis: { + itemStyle: { + shadowBlur: 10, + shadowOffsetX: 0, + shadowColor: 'rgba(0, 0, 0, 0.5)' + } + } + } + ] + }; + + const handleLegendChange = ({selected}: {selected: Record}) => { + const filteredPath = subPaths.filter((value) => { + // In case of any leading '/' remove them and add a / at end + // to make it similar to legend + const splitPath = value.path?.split('/'); + const pathName = splitPath[splitPath.length - 1] ?? '' + ((value.isKey) ? '' : '/'); + return selected[pathName]; + }) + const newSize = getSubpathSize(filteredPath); + setSubpathSize(newSize); + } + + return ( + + ); +} + +export default DUPieChart; \ No newline at end of file diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/components/eChart/eChart.tsx b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/components/eChart/eChart.tsx index 79fa07603386..9d483efd6b00 100644 --- a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/components/eChart/eChart.tsx +++ b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/components/eChart/eChart.tsx @@ -28,6 +28,10 @@ export interface EChartProps { loading?: boolean; theme?: 'light'; onClick?: () => any | void; + eventHandler?: { + name: string, + handler: (arg0: any) => void + }; } const EChart = ({ @@ -36,7 +40,8 @@ const EChart = ({ settings, loading, theme, - onClick + onClick, + eventHandler }: EChartProps): JSX.Element => { const chartRef = useRef(null); useEffect(() => { @@ -47,6 +52,10 @@ const EChart = ({ if (onClick) { chart.on('click', onClick); } + + if (eventHandler) { + chart.on(eventHandler.name, eventHandler.handler); + } } // Add chart resize listener @@ -71,6 +80,10 @@ const EChart = ({ if (onClick) { chart!.on('click', onClick); } + + if (eventHandler) { + chart!.on(eventHandler.name, eventHandler.handler); + } } }, [option, settings, theme]); // Whenever theme changes we need to add option and setting due to it being deleted in cleanup function diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/components/search/search.tsx b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/components/search/search.tsx index 8cac2a9c0477..d320fd659a69 100644 --- a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/components/search/search.tsx +++ b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/components/search/search.tsx @@ -20,6 +20,7 @@ import React from 'react'; import { Input, Select } from 'antd'; import { Option } from '@/v2/components/select/singleSelect'; +import { DownOutlined } from '@ant-design/icons'; // ------------- Types -------------- // type SearchProps = { @@ -51,6 +52,7 @@ const Search: React.FC = ({ const selectFilter = searchColumn ? (