diff --git a/.github/workflows/bazel.yml b/.github/workflows/bazel.yml index 5aa4f460c7c..510457ca46e 100644 --- a/.github/workflows/bazel.yml +++ b/.github/workflows/bazel.yml @@ -8,9 +8,6 @@ on: - develop - bazel -permissions: - actions: write - jobs: build: name: "bazel-compile (${{ matrix.os }})" @@ -23,14 +20,4 @@ jobs: - name: Build run: bazel build --config=remote //... - name: Run Tests - run: bazel test --config=remote //... - - name: Retry if failed - # if it failed , retry 2 times at most - if: failure() && fromJSON(github.run_attempt) < 3 - continue-on-error: true - env: - GH_REPO: ${{ github.repository }} - GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} - run: | - echo "Attempting to retry workflow..." - gh workflow run rerun-workflow.yml -F run_id=${{ github.run_id }} \ No newline at end of file + run: bazel test --config=remote //... \ No newline at end of file diff --git a/.github/workflows/integration-test.yml b/.github/workflows/integration-test.yml index 33b0a01ad4f..8179f362879 100644 --- a/.github/workflows/integration-test.yml +++ b/.github/workflows/integration-test.yml @@ -35,4 +35,11 @@ jobs: - name: Run integration tests with Maven run: mvn clean verify -Pit-test -Pskip-unit-tests - + - name: Publish Test Report + uses: mikepenz/action-junit-report@v3 + if: always() + with: + report_paths: 'test/target/failsafe-reports/TEST-*.xml' + annotate_only: true + include_passed: true + detailed_summary: true diff --git a/.github/workflows/maven.yaml b/.github/workflows/maven.yaml index f17c20b1ab8..d0c0ba7d9f1 100644 --- a/.github/workflows/maven.yaml +++ b/.github/workflows/maven.yaml @@ -5,9 +5,6 @@ on: push: branches: [master, develop, bazel] -permissions: - actions: write - jobs: java_build: name: "maven-compile (${{ matrix.os }}, JDK-${{ matrix.jdk }})" @@ -44,15 +41,4 @@ jobs: with: name: jvm-crash-logs path: /Users/runner/work/rocketmq/rocketmq/broker/hs_err_pid*.log - retention-days: 1 - - - name: Retry if failed - # if it failed , retry 2 times at most - if: failure() && fromJSON(github.run_attempt) < 3 - continue-on-error: true - env: - GH_REPO: ${{ github.repository }} - GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} - run: | - echo "Attempting to retry workflow..." - gh workflow run rerun-workflow.yml -F run_id=${{ github.run_id }} \ No newline at end of file + retention-days: 1 \ No newline at end of file diff --git a/.github/workflows/pr-ci.yml b/.github/workflows/pr-ci.yml index ef2db755d00..99d7309fd0c 100644 --- a/.github/workflows/pr-ci.yml +++ b/.github/workflows/pr-ci.yml @@ -21,7 +21,7 @@ jobs: - name: Build distribution tar run: | mvn -Prelease-all -DskipTests -Dspotbugs.skip=true clean install -U - - uses: actions/upload-artifact@v3 + - uses: actions/upload-artifact@v4 name: Upload distribution tar with: name: rocketmq @@ -30,7 +30,7 @@ jobs: run: | mkdir -p ./pr echo ${{ github.event.number }} > ./pr/NR - - uses: actions/upload-artifact@v2 + - uses: actions/upload-artifact@v4 with: name: pr path: pr/ diff --git a/.github/workflows/pr-e2e-test.yml b/.github/workflows/pr-e2e-test.yml index f9bb3bde75a..5b4264266ef 100644 --- a/.github/workflows/pr-e2e-test.yml +++ b/.github/workflows/pr-e2e-test.yml @@ -25,18 +25,18 @@ jobs: java-version: ["8"] steps: - name: 'Download artifact' - uses: actions/github-script@v3.1.0 + uses: actions/github-script@v6 with: script: | - var artifacts = await github.actions.listWorkflowRunArtifacts({ + let artifacts = await github.rest.actions.listWorkflowRunArtifacts({ owner: context.repo.owner, repo: context.repo.repo, run_id: ${{github.event.workflow_run.id }}, }); - var matchArtifactRmq = artifacts.data.artifacts.filter((artifact) => { + let matchArtifactRmq = artifacts.data.artifacts.filter((artifact) => { return artifact.name == "rocketmq" })[0]; - var download = await github.actions.downloadArtifact({ + let download = await github.rest.actions.downloadArtifact({ owner: context.repo.owner, repo: context.repo.repo, artifact_id: matchArtifactRmq.id, @@ -68,7 +68,7 @@ jobs: mkdir versionlist touch versionlist/"${version}-`echo ${{ matrix.base-image }} | sed -e "s/:/-/g"`" sh ./build-image-local.sh ${version} ${{ matrix.base-image }} ${{ matrix.java-version }} ${DOCKER_REPO} - - uses: actions/upload-artifact@v3 + - uses: actions/upload-artifact@v4 name: Upload distribution tar with: name: versionlist @@ -85,7 +85,7 @@ jobs: outputs: version-json: ${{ steps.show_versions.outputs.version-json }} steps: - - uses: actions/download-artifact@v3 + - uses: actions/download-artifact@v4 name: Download versionlist with: name: versionlist @@ -96,6 +96,7 @@ jobs: a=(`ls versionlist`) printf '%s\n' "${a[@]}" | jq -R . | jq -s . echo version-json=`printf '%s\n' "${a[@]}" | jq -R . | jq -s .` >> $GITHUB_OUTPUT + deploy: if: ${{ success() }} name: Deploy RocketMQ @@ -158,7 +159,7 @@ jobs: annotate_only: true include_passed: true detailed_summary: true - - uses: actions/upload-artifact@v3 + - uses: actions/upload-artifact@v4 if: always() name: Upload test log with: @@ -199,7 +200,7 @@ jobs: annotate_only: true include_passed: true detailed_summary: true - - uses: actions/upload-artifact@v3 + - uses: actions/upload-artifact@v4 if: always() name: Upload test log with: @@ -235,7 +236,7 @@ jobs: annotate_only: true include_passed: true detailed_summary: true - - uses: actions/upload-artifact@v3 + - uses: actions/upload-artifact@v4 if: always() name: Upload test log with: @@ -258,5 +259,4 @@ jobs: action: "clean" ask-config: "${{ secrets.ASK_CONFIG_VIRGINA }}" test-version: "${{ matrix.version }}" - job-id: ${{ strategy.job-index }} - + job-id: ${{ strategy.job-index }} \ No newline at end of file diff --git a/.github/workflows/push-ci.yml b/.github/workflows/push-ci.yml index 2fe62dbeb06..9e13794b318 100644 --- a/.github/workflows/push-ci.yml +++ b/.github/workflows/push-ci.yml @@ -31,7 +31,7 @@ jobs: - name: Build distribution tar run: | mvn -Prelease-all -DskipTests -Dspotbugs.skip=true clean install -U - - uses: actions/upload-artifact@v3 + - uses: actions/upload-artifact@v4 name: Upload distribution tar with: name: rocketmq @@ -53,7 +53,7 @@ jobs: repository: apache/rocketmq-docker.git ref: master path: rocketmq-docker - - uses: actions/download-artifact@v3 + - uses: actions/download-artifact@v4 name: Download distribution tar with: name: rocketmq @@ -72,13 +72,12 @@ jobs: mkdir versionlist touch versionlist/"${version}-`echo ${{ matrix.base-image }} | sed -e "s/:/-/g"`" sh ./build-image-local.sh ${version} ${{ matrix.base-image }} ${{ matrix.java-version }} ${DOCKER_REPO} - - uses: actions/upload-artifact@v3 + - uses: actions/upload-artifact@v4 name: Upload distribution tar with: name: versionlist path: rocketmq-docker/image-build-ci/versionlist/* - list-version: if: > github.repository == 'apache/rocketmq' && @@ -90,7 +89,7 @@ jobs: outputs: version-json: ${{ steps.show_versions.outputs.version-json }} steps: - - uses: actions/download-artifact@v3 + - uses: actions/download-artifact@v4 name: Download versionlist with: name: versionlist @@ -101,9 +100,10 @@ jobs: a=(`ls versionlist`) printf '%s\n' "${a[@]}" | jq -R . | jq -s . echo version-json=`printf '%s\n' "${a[@]}" | jq -R . | jq -s .` >> $GITHUB_OUTPUT - deploy: + + deploy-e2e: if: ${{ success() }} - name: Deploy RocketMQ + name: Deploy RocketMQ For E2E needs: [list-version,docker] runs-on: ubuntu-latest timeout-minutes: 60 @@ -134,10 +134,45 @@ jobs: image: repository: ${{env.DOCKER_REPO}} tag: ${{ matrix.version }} + + deploy-benchmark: + if: ${{ success() }} + name: Deploy RocketMQ For Benchmarking + needs: [list-version,docker] + runs-on: ubuntu-latest + timeout-minutes: 60 + strategy: + matrix: + version: ${{ fromJSON(needs.list-version.outputs.version-json) }} + steps: + - uses: apache/rocketmq-test-tool@7d84d276ad7755b1dc5cf9657a7a9bff6ae6d288 + name: Deploy rocketmq + with: + action: "deploy" + ask-config: "${{ secrets.ASK_CONFIG_VIRGINA }}" + test-version: "${{ matrix.version }}" + chart-git: "https://ghproxy.com/https://github.com/apache/rocketmq-docker.git" + chart-branch: "master" + chart-path: "./rocketmq-k8s-helm" + job-id: "001-${{ strategy.job-index }}" + helm-values: | + nameserver: + image: + repository: ${{env.DOCKER_REPO}} + tag: ${{ matrix.version }} + broker: + image: + repository: ${{env.DOCKER_REPO}} + tag: ${{ matrix.version }} + proxy: + image: + repository: ${{env.DOCKER_REPO}} + tag: ${{ matrix.version }} + test-e2e-grpc-java: if: ${{ success() }} name: Test E2E grpc java - needs: [list-version, deploy] + needs: [list-version, deploy-e2e] runs-on: ubuntu-latest timeout-minutes: 60 strategy: @@ -163,7 +198,7 @@ jobs: annotate_only: true include_passed: true detailed_summary: true - - uses: actions/upload-artifact@v3 + - uses: actions/upload-artifact@v4 if: always() name: Upload test log with: @@ -173,7 +208,7 @@ jobs: test-e2e-golang: if: ${{ success() }} name: Test E2E golang - needs: [list-version, deploy] + needs: [list-version, deploy-e2e] runs-on: ubuntu-latest timeout-minutes: 60 strategy: @@ -204,7 +239,7 @@ jobs: annotate_only: true include_passed: true detailed_summary: true - - uses: actions/upload-artifact@v3 + - uses: actions/upload-artifact@v4 if: always() name: Upload test log with: @@ -214,7 +249,7 @@ jobs: test-e2e-remoting-java: if: ${{ success() }} name: Test E2E remoting java - needs: [ list-version, deploy ] + needs: [ list-version, deploy-e2e ] runs-on: ubuntu-latest timeout-minutes: 60 strategy: @@ -240,17 +275,45 @@ jobs: annotate_only: true include_passed: true detailed_summary: true - - uses: actions/upload-artifact@v3 + - uses: actions/upload-artifact@v4 if: always() name: Upload test log with: name: test-e2e-remoting-java-log.txt path: testlog.txt - clean: + benchmark-test: + if: ${{ success() }} + runs-on: ubuntu-latest + name: Performance benchmark test + needs: [ list-version, deploy-benchmark ] + timeout-minutes: 60 + steps: + - uses: apache/rocketmq-test-tool/benchmark-runner@ce372e5f3906ca1891e4918b05be14608eae608e + name: Performance benchmark + with: + action: "performance-benchmark" + ask-config: "${{ secrets.ASK_CONFIG_VIRGINA }}" + job-id: "001-${{ strategy.job-index }}" + # The time to run the test, 15 minutes + test-time: "900" + # Some thresholds set in advance + min-send-tps-threshold: "12000" + max-rt-ms-threshold: "500" + avg-rt-ms-threshold: "10" + max-2c-rt-ms-threshold: "150" + avg-2c-rt-ms-threshold: "10" + - name: Upload test report + if: always() + uses: actions/upload-artifact@v4 + with: + name: benchmark-report + path: benchmark/ + + clean-e2e: if: always() - name: Clean - needs: [list-version, test-e2e-grpc-java, test-e2e-golang, test-e2e-remoting-java] + name: Clean E2E + needs: [ list-version, test-e2e-grpc-java, test-e2e-golang, test-e2e-remoting-java ] runs-on: ubuntu-latest timeout-minutes: 60 strategy: @@ -265,3 +328,20 @@ jobs: test-version: "${{ matrix.version }}" job-id: ${{ strategy.job-index }} + clean-benchmark: + if: always() + name: Clean Benchmarking + needs: [ list-version, benchmark-test ] + runs-on: ubuntu-latest + timeout-minutes: 60 + strategy: + matrix: + version: ${{ fromJSON(needs.list-version.outputs.version-json) }} + steps: + - uses: apache/rocketmq-test-tool@7d84d276ad7755b1dc5cf9657a7a9bff6ae6d288 + name: clean + with: + action: "clean" + ask-config: "${{ secrets.ASK_CONFIG_VIRGINA }}" + test-version: "${{ matrix.version }}" + job-id: "001-${{ strategy.job-index }}" \ No newline at end of file diff --git a/.github/workflows/rerun-workflow.yml b/.github/workflows/rerun-workflow.yml index bf83fc51b63..6c319505d2c 100644 --- a/.github/workflows/rerun-workflow.yml +++ b/.github/workflows/rerun-workflow.yml @@ -1,21 +1,22 @@ name: Rerun workflow on: - workflow_dispatch: - inputs: - run_id: - required: true + workflow_run: + workflows: ["Build and Run Tests by Maven" , "Build and Run Tests by Bazel"] + types: + - completed permissions: actions: write jobs: rerun: + if: github.event.workflow_run.conclusion == 'failure' && fromJSON(github.event.workflow_run.run_attempt) < 3 runs-on: ubuntu-latest steps: - - name: rerun ${{ inputs.run_id }} + - name: rerun ${{ github.event.workflow_run.id }} env: - GH_REPO: ${{ github.repository }} + GH_REPO: ${{ github.repository }} GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} run: | - gh run watch ${{ inputs.run_id }} > /dev/null 2>&1 - gh run rerun ${{ inputs.run_id }} --failed \ No newline at end of file + gh run watch ${{ github.event.workflow_run.id }} > /dev/null 2>&1 + gh run rerun ${{ github.event.workflow_run.id }} --failed \ No newline at end of file diff --git a/.github/workflows/snapshot-automation.yml b/.github/workflows/snapshot-automation.yml index 99855d3aa0d..9fb16cb13ca 100644 --- a/.github/workflows/snapshot-automation.yml +++ b/.github/workflows/snapshot-automation.yml @@ -69,7 +69,7 @@ jobs: MAVEN_SETTINGS: ${{ github.workspace }}/.github/asf-deploy-settings.xml run: | mvn -Prelease-all -DskipTests -Dspotbugs.skip=true clean install -U - - uses: actions/upload-artifact@v3 + - uses: actions/upload-artifact@v4 name: Upload distribution tar with: name: rocketmq @@ -91,7 +91,7 @@ jobs: repository: apache/rocketmq-docker.git ref: master path: rocketmq-docker - - uses: actions/download-artifact@v3 + - uses: actions/download-artifact@v4 name: Download distribution tar with: name: rocketmq @@ -110,7 +110,7 @@ jobs: mkdir versionlist touch versionlist/"${version}-`echo ${{ matrix.base-image }} | sed -e "s/:/-/g"`" sh ./build-image-local.sh ${version} ${{ matrix.base-image }} ${{ matrix.java-version }} ${DOCKER_REPO} - - uses: actions/upload-artifact@v3 + - uses: actions/upload-artifact@v4 name: Upload distribution tar with: name: versionlist @@ -125,7 +125,7 @@ jobs: outputs: version-json: ${{ steps.show_versions.outputs.version-json }} steps: - - uses: actions/download-artifact@v3 + - uses: actions/download-artifact@v4 name: Download versionlist with: name: versionlist @@ -200,7 +200,7 @@ jobs: annotate_only: true include_passed: true detailed_summary: true - - uses: actions/upload-artifact@v3 + - uses: actions/upload-artifact@v4 if: always() name: Upload test log with: diff --git a/acl/pom.xml b/acl/pom.xml index c9d5085dcc1..812dbd9fd13 100644 --- a/acl/pom.xml +++ b/acl/pom.xml @@ -13,7 +13,7 @@ org.apache.rocketmq rocketmq-all - 5.3.1-SNAPSHOT + 5.3.2-SNAPSHOT rocketmq-acl rocketmq-acl ${project.version} diff --git a/auth/pom.xml b/auth/pom.xml index 71b07c33750..f7a5417860c 100644 --- a/auth/pom.xml +++ b/auth/pom.xml @@ -13,7 +13,7 @@ org.apache.rocketmq rocketmq-all - 5.3.1-SNAPSHOT + 5.3.2-SNAPSHOT rocketmq-auth rocketmq-auth ${project.version} diff --git a/auth/src/main/java/org/apache/rocketmq/auth/authorization/builder/DefaultAuthorizationContextBuilder.java b/auth/src/main/java/org/apache/rocketmq/auth/authorization/builder/DefaultAuthorizationContextBuilder.java index 02d5df236f5..e69abdaf805 100644 --- a/auth/src/main/java/org/apache/rocketmq/auth/authorization/builder/DefaultAuthorizationContextBuilder.java +++ b/auth/src/main/java/org/apache/rocketmq/auth/authorization/builder/DefaultAuthorizationContextBuilder.java @@ -171,7 +171,7 @@ public List build(ChannelHandlerContext context, Re subject = User.of(fields.get(SessionCredentials.ACCESS_KEY)); } String remoteAddr = RemotingHelper.parseChannelRemoteAddr(context.channel()); - String sourceIp = StringUtils.substringBefore(remoteAddr, CommonConstants.COLON); + String sourceIp = StringUtils.substringBeforeLast(remoteAddr, CommonConstants.COLON); Resource topic; Resource group; @@ -394,7 +394,7 @@ private List newContext(Metadata metadata, QueryRou subject = User.of(metadata.get(GrpcConstants.AUTHORIZATION_AK)); } Resource resource = Resource.ofTopic(topic.getName()); - String sourceIp = StringUtils.substringBefore(metadata.get(GrpcConstants.REMOTE_ADDRESS), CommonConstants.COLON); + String sourceIp = StringUtils.substringBeforeLast(metadata.get(GrpcConstants.REMOTE_ADDRESS), CommonConstants.COLON); DefaultAuthorizationContext context = DefaultAuthorizationContext.of(subject, resource, Arrays.asList(Action.PUB, Action.SUB), sourceIp); return Collections.singletonList(context); } @@ -437,7 +437,7 @@ private static List newPubContext(Metadata metadata subject = User.of(metadata.get(GrpcConstants.AUTHORIZATION_AK)); } Resource resource = Resource.ofTopic(topic.getName()); - String sourceIp = StringUtils.substringBefore(metadata.get(GrpcConstants.REMOTE_ADDRESS), CommonConstants.COLON); + String sourceIp = StringUtils.substringBeforeLast(metadata.get(GrpcConstants.REMOTE_ADDRESS), CommonConstants.COLON); DefaultAuthorizationContext context = DefaultAuthorizationContext.of(subject, resource, Action.PUB, sourceIp); return Collections.singletonList(context); } @@ -483,7 +483,7 @@ private static List newSubContexts(Metadata metadat if (metadata.containsKey(GrpcConstants.AUTHORIZATION_AK)) { subject = User.of(metadata.get(GrpcConstants.AUTHORIZATION_AK)); } - String sourceIp = StringUtils.substringBefore(metadata.get(GrpcConstants.REMOTE_ADDRESS), CommonConstants.COLON); + String sourceIp = StringUtils.substringBeforeLast(metadata.get(GrpcConstants.REMOTE_ADDRESS), CommonConstants.COLON); result.add(DefaultAuthorizationContext.of(subject, resource, Action.SUB, sourceIp)); return result; } diff --git a/auth/src/main/java/org/apache/rocketmq/auth/authorization/factory/AuthorizationFactory.java b/auth/src/main/java/org/apache/rocketmq/auth/authorization/factory/AuthorizationFactory.java index f87a5304cb7..29748a9ed44 100644 --- a/auth/src/main/java/org/apache/rocketmq/auth/authorization/factory/AuthorizationFactory.java +++ b/auth/src/main/java/org/apache/rocketmq/auth/authorization/factory/AuthorizationFactory.java @@ -105,7 +105,7 @@ public static AuthorizationEvaluator getEvaluator(AuthConfig config, Supplier public static AuthorizationStrategy getStrategy(AuthConfig config, Supplier metadataService) { try { Class clazz = StatelessAuthorizationStrategy.class; - if (StringUtils.isNotBlank(config.getAuthenticationStrategy())) { + if (StringUtils.isNotBlank(config.getAuthorizationStrategy())) { clazz = (Class) Class.forName(config.getAuthorizationStrategy()); } return clazz.getDeclaredConstructor(AuthConfig.class, Supplier.class).newInstance(config, metadataService); diff --git a/broker/pom.xml b/broker/pom.xml index 7f74059a969..f74c12989a1 100644 --- a/broker/pom.xml +++ b/broker/pom.xml @@ -13,7 +13,7 @@ org.apache.rocketmq rocketmq-all - 5.3.1-SNAPSHOT + 5.3.2-SNAPSHOT 4.0.0 diff --git a/broker/src/main/java/org/apache/rocketmq/broker/BrokerController.java b/broker/src/main/java/org/apache/rocketmq/broker/BrokerController.java index 22ac7fedf1c..aaf06caddf8 100644 --- a/broker/src/main/java/org/apache/rocketmq/broker/BrokerController.java +++ b/broker/src/main/java/org/apache/rocketmq/broker/BrokerController.java @@ -18,7 +18,6 @@ import com.google.common.annotations.VisibleForTesting; import com.google.common.collect.Lists; -import java.io.IOException; import java.net.InetSocketAddress; import java.util.AbstractMap; import java.util.ArrayList; @@ -789,6 +788,9 @@ public boolean initializeMessageStore() { defaultMessageStore = new RocksDBMessageStore(this.messageStoreConfig, this.brokerStatsManager, this.messageArrivingListener, this.brokerConfig, topicConfigManager.getTopicConfigTable()); } else { defaultMessageStore = new DefaultMessageStore(this.messageStoreConfig, this.brokerStatsManager, this.messageArrivingListener, this.brokerConfig, topicConfigManager.getTopicConfigTable()); + if (messageStoreConfig.isRocksdbCQDoubleWriteEnable()) { + defaultMessageStore.enableRocksdbCQWrite(); + } } if (messageStoreConfig.isEnableDLegerCommitLog()) { @@ -812,7 +814,7 @@ public boolean initializeMessageStore() { this.timerMessageStore.registerEscapeBridgeHook(msg -> escapeBridge.putMessage(msg)); this.messageStore.setTimerMessageStore(this.timerMessageStore); } - } catch (IOException e) { + } catch (Exception e) { result = false; LOG.error("BrokerController#initialize: unexpected error occurs", e); } diff --git a/broker/src/main/java/org/apache/rocketmq/broker/controller/ReplicasManager.java b/broker/src/main/java/org/apache/rocketmq/broker/controller/ReplicasManager.java index c294f860ba3..f22f22a12bd 100644 --- a/broker/src/main/java/org/apache/rocketmq/broker/controller/ReplicasManager.java +++ b/broker/src/main/java/org/apache/rocketmq/broker/controller/ReplicasManager.java @@ -525,7 +525,7 @@ private boolean applyBrokerId() { return true; } catch (Exception e) { - LOGGER.error("fail to apply broker id: {}", e, tempBrokerMetadata.getBrokerId()); + LOGGER.error("fail to apply broker id: {}", tempBrokerMetadata.getBrokerId(), e); return false; } } @@ -686,7 +686,7 @@ private void schedulingSyncBrokerMetadata() { } /** - * Scheduling sync controller medata. + * Scheduling sync controller metadata. */ private boolean schedulingSyncControllerMetadata() { // Get controller metadata first. diff --git a/broker/src/main/java/org/apache/rocketmq/broker/failover/EscapeBridge.java b/broker/src/main/java/org/apache/rocketmq/broker/failover/EscapeBridge.java index 762d917d640..dd37f42b2c5 100644 --- a/broker/src/main/java/org/apache/rocketmq/broker/failover/EscapeBridge.java +++ b/broker/src/main/java/org/apache/rocketmq/broker/failover/EscapeBridge.java @@ -137,7 +137,7 @@ public SendResult putMessageToRemoteBroker(MessageExtBrokerInner messageExt, Str brokerNameToSend = mqSelected.getBrokerName(); if (this.brokerController.getBrokerConfig().getBrokerName().equals(brokerNameToSend)) { LOG.warn("putMessageToRemoteBroker failed, remote broker not found. Topic: {}, MsgId: {}, Broker: {}", - messageExt.getTopic(), messageExt.getMsgId(), brokerNameToSend); + messageExt.getTopic(), messageExt.getMsgId(), brokerNameToSend); return null; } } else { @@ -147,7 +147,7 @@ public SendResult putMessageToRemoteBroker(MessageExtBrokerInner messageExt, Str final String brokerAddrToSend = this.brokerController.getTopicRouteInfoManager().findBrokerAddressInPublish(brokerNameToSend); if (null == brokerAddrToSend) { LOG.warn("putMessageToRemoteBroker failed, remote broker address not found. Topic: {}, MsgId: {}, Broker: {}", - messageExt.getTopic(), messageExt.getMsgId(), brokerNameToSend); + messageExt.getTopic(), messageExt.getMsgId(), brokerNameToSend); return null; } @@ -197,7 +197,7 @@ public CompletableFuture asyncPutMessage(MessageExtBrokerInner producerGroup, SEND_TIMEOUT); return future.exceptionally(throwable -> null) - .thenApplyAsync(sendResult -> transformSendResult2PutResult(sendResult), this.defaultAsyncSenderExecutor) + .thenApplyAsync(this::transformSendResult2PutResult, this.defaultAsyncSenderExecutor) .exceptionally(throwable -> transformSendResult2PutResult(null)); } catch (Exception e) { @@ -211,7 +211,6 @@ public CompletableFuture asyncPutMessage(MessageExtBrokerInner } } - private String getProducerGroup(MessageExtBrokerInner messageExt) { if (null == messageExt) { return this.innerProducerGroupName; @@ -223,12 +222,29 @@ private String getProducerGroup(MessageExtBrokerInner messageExt) { return producerGroup; } - public PutMessageResult putMessageToSpecificQueue(MessageExtBrokerInner messageExt) { BrokerController masterBroker = this.brokerController.peekMasterBroker(); if (masterBroker != null) { return masterBroker.getMessageStore().putMessage(messageExt); - } else if (this.brokerController.getBrokerConfig().isEnableSlaveActingMaster() + } + try { + return asyncRemotePutMessageToSpecificQueue(messageExt).get(SEND_TIMEOUT, TimeUnit.MILLISECONDS); + } catch (Exception e) { + LOG.error("Put message to specific queue error", e); + return new PutMessageResult(PutMessageStatus.UNKNOWN_ERROR, null, true); + } + } + + public CompletableFuture asyncPutMessageToSpecificQueue(MessageExtBrokerInner messageExt) { + BrokerController masterBroker = this.brokerController.peekMasterBroker(); + if (masterBroker != null) { + return masterBroker.getMessageStore().asyncPutMessage(messageExt); + } + return asyncRemotePutMessageToSpecificQueue(messageExt); + } + + public CompletableFuture asyncRemotePutMessageToSpecificQueue(MessageExtBrokerInner messageExt) { + if (this.brokerController.getBrokerConfig().isEnableSlaveActingMaster() && this.brokerController.getBrokerConfig().isEnableRemoteEscape()) { try { messageExt.setWaitStoreMsgOK(false); @@ -237,7 +253,7 @@ public PutMessageResult putMessageToSpecificQueue(MessageExtBrokerInner messageE List mqs = topicPublishInfo.getMessageQueueList(); if (null == mqs || mqs.isEmpty()) { - return new PutMessageResult(PutMessageStatus.PUT_TO_REMOTE_BROKER_FAIL, null, true); + return CompletableFuture.completedFuture(new PutMessageResult(PutMessageStatus.PUT_TO_REMOTE_BROKER_FAIL, null, true)); } String id = messageExt.getTopic() + messageExt.getStoreHost(); @@ -248,19 +264,17 @@ public PutMessageResult putMessageToSpecificQueue(MessageExtBrokerInner messageE String brokerNameToSend = mq.getBrokerName(); String brokerAddrToSend = this.brokerController.getTopicRouteInfoManager().findBrokerAddressInPublish(brokerNameToSend); - final SendResult sendResult = this.brokerController.getBrokerOuterAPI().sendMessageToSpecificBroker( + return this.brokerController.getBrokerOuterAPI().sendMessageToSpecificBrokerAsync( brokerAddrToSend, brokerNameToSend, - messageExt, this.getProducerGroup(messageExt), SEND_TIMEOUT); - - return transformSendResult2PutResult(sendResult); + messageExt, this.getProducerGroup(messageExt), SEND_TIMEOUT).thenCompose(sendResult -> CompletableFuture.completedFuture(transformSendResult2PutResult(sendResult))); } catch (Exception e) { LOG.error("sendMessageInFailover to remote failed", e); - return new PutMessageResult(PutMessageStatus.PUT_TO_REMOTE_BROKER_FAIL, null, true); + return CompletableFuture.completedFuture(new PutMessageResult(PutMessageStatus.PUT_TO_REMOTE_BROKER_FAIL, null, true)); } } else { LOG.warn("Put message to specific queue failed, enableSlaveActingMaster={}, enableRemoteEscape={}.", this.brokerController.getBrokerConfig().isEnableSlaveActingMaster(), this.brokerController.getBrokerConfig().isEnableRemoteEscape()); - return new PutMessageResult(PutMessageStatus.SERVICE_NOT_AVAILABLE, null); + return CompletableFuture.completedFuture(new PutMessageResult(PutMessageStatus.SERVICE_NOT_AVAILABLE, null)); } } @@ -282,12 +296,14 @@ private PutMessageResult transformSendResult2PutResult(SendResult sendResult) { } } - public Triple getMessage(String topic, long offset, int queueId, String brokerName, boolean deCompressBody) { + public Triple getMessage(String topic, long offset, int queueId, String brokerName, + boolean deCompressBody) { return getMessageAsync(topic, offset, queueId, brokerName, deCompressBody).join(); } // Triple, check info and retry if and only if MessageExt is null - public CompletableFuture> getMessageAsync(String topic, long offset, int queueId, String brokerName, boolean deCompressBody) { + public CompletableFuture> getMessageAsync(String topic, long offset, + int queueId, String brokerName, boolean deCompressBody) { MessageStore messageStore = brokerController.getMessageStoreByBrokerName(brokerName); if (messageStore != null) { return messageStore.getMessageAsync(innerConsumerGroupName, topic, queueId, offset, 1, null) @@ -300,9 +316,9 @@ public CompletableFuture> getMessageAsync(St if (list == null || list.isEmpty()) { // OFFSET_FOUND_NULL returned by TieredMessageStore indicates exception occurred boolean needRetry = GetMessageStatus.OFFSET_FOUND_NULL.equals(result.getStatus()) - && messageStore instanceof TieredMessageStore; + && messageStore instanceof TieredMessageStore; LOG.warn("Can not get msg , topic {}, offset {}, queueId {}, needRetry {}, result is {}", - topic, offset, queueId, needRetry, result); + topic, offset, queueId, needRetry, result); return Triple.of(null, "Can not get msg", needRetry); } return Triple.of(list.get(0), "", false); @@ -340,12 +356,14 @@ protected List decodeMsgList(GetMessageResult getMessageResult, bool return foundList; } - protected Triple getMessageFromRemote(String topic, long offset, int queueId, String brokerName) { + protected Triple getMessageFromRemote(String topic, long offset, int queueId, + String brokerName) { return getMessageFromRemoteAsync(topic, offset, queueId, brokerName).join(); } // Triple, check info and retry if and only if MessageExt is null - protected CompletableFuture> getMessageFromRemoteAsync(String topic, long offset, int queueId, String brokerName) { + protected CompletableFuture> getMessageFromRemoteAsync(String topic, + long offset, int queueId, String brokerName) { try { String brokerAddr = this.brokerController.getTopicRouteInfoManager().findBrokerAddressInSubscribe(brokerName, MixAll.MASTER_ID, false); if (null == brokerAddr) { @@ -359,11 +377,11 @@ protected CompletableFuture> getMessageFromR } return this.brokerController.getBrokerOuterAPI().pullMessageFromSpecificBrokerAsync(brokerName, - brokerAddr, this.innerConsumerGroupName, topic, queueId, offset, 1, DEFAULT_PULL_TIMEOUT_MILLIS) + brokerAddr, this.innerConsumerGroupName, topic, queueId, offset, 1, DEFAULT_PULL_TIMEOUT_MILLIS) .thenApply(pullResult -> { if (pullResult.getLeft() != null - && PullStatus.FOUND.equals(pullResult.getLeft().getPullStatus()) - && CollectionUtils.isNotEmpty(pullResult.getLeft().getMsgFoundList())) { + && PullStatus.FOUND.equals(pullResult.getLeft().getPullStatus()) + && CollectionUtils.isNotEmpty(pullResult.getLeft().getMsgFoundList())) { return Triple.of(pullResult.getLeft().getMsgFoundList().get(0), "", false); } return Triple.of(null, pullResult.getMiddle(), pullResult.getRight()); diff --git a/broker/src/main/java/org/apache/rocketmq/broker/offset/ConsumerOffsetManager.java b/broker/src/main/java/org/apache/rocketmq/broker/offset/ConsumerOffsetManager.java index 21f20dde325..403324137cc 100644 --- a/broker/src/main/java/org/apache/rocketmq/broker/offset/ConsumerOffsetManager.java +++ b/broker/src/main/java/org/apache/rocketmq/broker/offset/ConsumerOffsetManager.java @@ -31,6 +31,7 @@ import org.apache.rocketmq.broker.BrokerController; import org.apache.rocketmq.broker.BrokerPathConfigHelper; import org.apache.rocketmq.common.ConfigManager; +import org.apache.rocketmq.common.MixAll; import org.apache.rocketmq.common.UtilAll; import org.apache.rocketmq.common.constant.LoggerName; import org.apache.rocketmq.logging.org.slf4j.Logger; @@ -373,6 +374,25 @@ public void setDataVersion(DataVersion dataVersion) { this.dataVersion = dataVersion; } + public boolean loadDataVersion() { + String fileName = null; + try { + fileName = this.configFilePath(); + String jsonString = MixAll.file2String(fileName); + if (jsonString != null) { + ConsumerOffsetManager obj = RemotingSerializable.fromJson(jsonString, ConsumerOffsetManager.class); + if (obj != null) { + this.dataVersion = obj.dataVersion; + } + LOG.info("load consumer offset dataVersion success,{},{} ", fileName, jsonString); + } + return true; + } catch (Exception e) { + LOG.error("load consumer offset dataVersion failed " + fileName, e); + return false; + } + } + public void removeOffset(final String group) { Iterator>> it = this.offsetTable.entrySet().iterator(); while (it.hasNext()) { diff --git a/broker/src/main/java/org/apache/rocketmq/broker/offset/LmqConsumerOffsetManager.java b/broker/src/main/java/org/apache/rocketmq/broker/offset/LmqConsumerOffsetManager.java index ce70b1a820f..53e9e2e0634 100644 --- a/broker/src/main/java/org/apache/rocketmq/broker/offset/LmqConsumerOffsetManager.java +++ b/broker/src/main/java/org/apache/rocketmq/broker/offset/LmqConsumerOffsetManager.java @@ -17,6 +17,7 @@ package org.apache.rocketmq.broker.offset; import java.util.HashMap; +import java.util.Iterator; import java.util.Map; import java.util.concurrent.ConcurrentHashMap; @@ -110,4 +111,25 @@ public ConcurrentHashMap getLmqOffsetTable() { public void setLmqOffsetTable(ConcurrentHashMap lmqOffsetTable) { this.lmqOffsetTable = lmqOffsetTable; } + + @Override + public void removeOffset(String group) { + if (!MixAll.isLmq(group)) { + super.removeOffset(group); + return; + } + Iterator> it = this.lmqOffsetTable.entrySet().iterator(); + while (it.hasNext()) { + Map.Entry next = it.next(); + String topicAtGroup = next.getKey(); + if (topicAtGroup.contains(group)) { + String[] arrays = topicAtGroup.split(TOPIC_GROUP_SEPARATOR); + if (arrays.length == 2 && group.equals(arrays[1])) { + it.remove(); + removeConsumerOffset(topicAtGroup); + LOG.warn("clean lmq group offset {}", topicAtGroup); + } + } + } + } } diff --git a/broker/src/main/java/org/apache/rocketmq/broker/offset/RocksDBConsumerOffsetManager.java b/broker/src/main/java/org/apache/rocketmq/broker/offset/RocksDBConsumerOffsetManager.java index de293fc4992..1e7cda71eed 100644 --- a/broker/src/main/java/org/apache/rocketmq/broker/offset/RocksDBConsumerOffsetManager.java +++ b/broker/src/main/java/org/apache/rocketmq/broker/offset/RocksDBConsumerOffsetManager.java @@ -16,26 +16,31 @@ */ package org.apache.rocketmq.broker.offset; +import com.alibaba.fastjson.JSON; +import com.alibaba.fastjson.serializer.SerializerFeature; import java.io.File; import java.util.Iterator; import java.util.Map.Entry; import java.util.concurrent.ConcurrentMap; - import org.apache.rocketmq.broker.BrokerController; import org.apache.rocketmq.broker.RocksDBConfigManager; +import org.apache.rocketmq.common.UtilAll; +import org.apache.rocketmq.common.constant.LoggerName; import org.apache.rocketmq.common.utils.DataConverter; +import org.apache.rocketmq.logging.org.slf4j.Logger; +import org.apache.rocketmq.logging.org.slf4j.LoggerFactory; +import org.apache.rocketmq.remoting.protocol.DataVersion; import org.rocksdb.WriteBatch; -import com.alibaba.fastjson.JSON; -import com.alibaba.fastjson.serializer.SerializerFeature; - public class RocksDBConsumerOffsetManager extends ConsumerOffsetManager { + protected static final Logger log = LoggerFactory.getLogger(LoggerName.BROKER_LOGGER_NAME); + protected RocksDBConfigManager rocksDBConfigManager; public RocksDBConsumerOffsetManager(BrokerController brokerController) { super(brokerController); - this.rocksDBConfigManager = new RocksDBConfigManager(configFilePath(), brokerController.getMessageStoreConfig().getMemTableFlushIntervalMs()); + this.rocksDBConfigManager = new RocksDBConfigManager(rocksdbConfigFilePath(), brokerController.getMessageStoreConfig().getMemTableFlushIntervalMs()); } @Override @@ -43,9 +48,47 @@ public boolean load() { if (!rocksDBConfigManager.init()) { return false; } - return this.rocksDBConfigManager.loadData(this::decodeOffset); + if (!loadDataVersion() || !loadConsumerOffset()) { + return false; + } + + return true; + } + + public boolean loadConsumerOffset() { + return this.rocksDBConfigManager.loadData(this::decodeOffset) && merge(); + } + + private boolean merge() { + if (!brokerController.getMessageStoreConfig().isTransferOffsetJsonToRocksdb()) { + log.info("the switch transferOffsetJsonToRocksdb is off, no merge offset operation is needed."); + return true; + } + if (!UtilAll.isPathExists(this.configFilePath()) && !UtilAll.isPathExists(this.configFilePath() + ".bak")) { + log.info("consumerOffset json file does not exist, so skip merge"); + return true; + } + if (!super.loadDataVersion()) { + log.error("load json consumerOffset dataVersion error, startup will exit"); + return false; + } + + final DataVersion dataVersion = super.getDataVersion(); + final DataVersion kvDataVersion = this.getDataVersion(); + if (dataVersion.getCounter().get() > kvDataVersion.getCounter().get()) { + if (!super.load()) { + log.error("load json consumerOffset info failed, startup will exit"); + return false; + } + this.persist(); + this.getDataVersion().assignNewOne(dataVersion); + updateDataVersion(); + log.info("update offset from json, dataVersion:{}, offsetTable: {} ", this.getDataVersion(), JSON.toJSONString(this.getOffsetTable())); + } + return true; } + @Override public boolean stop() { return this.rocksDBConfigManager.stop(); @@ -69,8 +112,7 @@ protected void decodeOffset(final byte[] key, final byte[] body) { LOG.info("load exist local offset, {}, {}", topicAtGroup, wrapper.getOffsetTable()); } - @Override - public String configFilePath() { + public String rocksdbConfigFilePath() { return this.brokerController.getMessageStoreConfig().getStorePathRootDir() + File.separator + "config" + File.separator + "consumerOffsets" + File.separator; } @@ -103,4 +145,23 @@ private void putWriteBatch(final WriteBatch writeBatch, final String topicGroupN byte[] valueBytes = JSON.toJSONBytes(wrapper, SerializerFeature.BrowserCompatible); writeBatch.put(keyBytes, valueBytes); } + + @Override + public boolean loadDataVersion() { + return this.rocksDBConfigManager.loadDataVersion(); + } + + @Override + public DataVersion getDataVersion() { + return rocksDBConfigManager.getKvDataVersion(); + } + + public void updateDataVersion() { + try { + rocksDBConfigManager.updateKvDataVersion(); + } catch (Exception e) { + log.error("update consumer offset dataVersion error", e); + throw new RuntimeException(e); + } + } } diff --git a/broker/src/main/java/org/apache/rocketmq/broker/processor/AckMessageProcessor.java b/broker/src/main/java/org/apache/rocketmq/broker/processor/AckMessageProcessor.java index 6f7b7e8a24e..dc1b1b53a32 100644 --- a/broker/src/main/java/org/apache/rocketmq/broker/processor/AckMessageProcessor.java +++ b/broker/src/main/java/org/apache/rocketmq/broker/processor/AckMessageProcessor.java @@ -98,7 +98,7 @@ public boolean isPopReviveServiceRunning() { @Override public RemotingCommand processRequest(final ChannelHandlerContext ctx, - RemotingCommand request) throws RemotingCommandException { + RemotingCommand request) throws RemotingCommandException { return this.processRequest(ctx.channel(), request, true); } @@ -108,7 +108,7 @@ public boolean rejectRequest() { } private RemotingCommand processRequest(final Channel channel, RemotingCommand request, - boolean brokerAllowSuspend) throws RemotingCommandException { + boolean brokerAllowSuspend) throws RemotingCommandException { AckMessageRequestHeader requestHeader; BatchAckMessageRequestBody reqBody = null; final RemotingCommand response = RemotingCommand.createResponseCommand(ResponseCode.SUCCESS, null); @@ -126,7 +126,7 @@ private RemotingCommand processRequest(final Channel channel, RemotingCommand re if (requestHeader.getQueueId() >= topicConfig.getReadQueueNums() || requestHeader.getQueueId() < 0) { String errorInfo = String.format("queueId[%d] is illegal, topic:[%s] topicConfig.readQueueNums:[%d] consumer:[%s]", - requestHeader.getQueueId(), requestHeader.getTopic(), topicConfig.getReadQueueNums(), channel.remoteAddress()); + requestHeader.getQueueId(), requestHeader.getTopic(), topicConfig.getReadQueueNums(), channel.remoteAddress()); POP_LOGGER.warn(errorInfo); response.setCode(ResponseCode.MESSAGE_ILLEGAL); response.setRemark(errorInfo); @@ -137,7 +137,7 @@ private RemotingCommand processRequest(final Channel channel, RemotingCommand re long maxOffset = this.brokerController.getMessageStore().getMaxOffsetInQueue(requestHeader.getTopic(), requestHeader.getQueueId()); if (requestHeader.getOffset() < minOffset || requestHeader.getOffset() > maxOffset) { String errorInfo = String.format("offset is illegal, key:%s@%d, commit:%d, store:%d~%d", - requestHeader.getTopic(), requestHeader.getQueueId(), requestHeader.getOffset(), minOffset, maxOffset); + requestHeader.getTopic(), requestHeader.getQueueId(), requestHeader.getOffset(), minOffset, maxOffset); POP_LOGGER.warn(errorInfo); response.setCode(ResponseCode.NO_MESSAGE); response.setRemark(errorInfo); @@ -165,7 +165,8 @@ private RemotingCommand processRequest(final Channel channel, RemotingCommand re return response; } - private void appendAck(final AckMessageRequestHeader requestHeader, final BatchAck batchAck, final RemotingCommand response, final Channel channel, String brokerName) { + private void appendAck(final AckMessageRequestHeader requestHeader, final BatchAck batchAck, + final RemotingCommand response, final Channel channel, String brokerName) { String[] extraInfo; String consumeGroup, topic; int qId, rqId; @@ -268,18 +269,36 @@ private void appendAck(final AckMessageRequestHeader requestHeader, final BatchA msgInner.setDeliverTimeMs(popTime + invisibleTime); msgInner.getProperties().put(MessageConst.PROPERTY_UNIQ_CLIENT_MESSAGE_ID_KEYIDX, PopMessageProcessor.genAckUniqueId(ackMsg)); msgInner.setPropertiesString(MessageDecoder.messageProperties2String(msgInner.getProperties())); - PutMessageResult putMessageResult = this.brokerController.getEscapeBridge().putMessageToSpecificQueue(msgInner); + if (brokerController.getBrokerConfig().isAppendAckAsync()) { + int finalAckCount = ackCount; + this.brokerController.getEscapeBridge().asyncPutMessageToSpecificQueue(msgInner).thenAccept(putMessageResult -> { + handlePutMessageResult(putMessageResult, ackMsg, topic, consumeGroup, popTime, qId, finalAckCount); + }).exceptionally(throwable -> { + handlePutMessageResult(new PutMessageResult(PutMessageStatus.UNKNOWN_ERROR, null, false), + ackMsg, topic, consumeGroup, popTime, qId, finalAckCount); + POP_LOGGER.error("put ack msg error ", throwable); + return null; + }); + } else { + PutMessageResult putMessageResult = this.brokerController.getEscapeBridge().putMessageToSpecificQueue(msgInner); + handlePutMessageResult(putMessageResult, ackMsg, topic, consumeGroup, popTime, qId, ackCount); + } + } + + private void handlePutMessageResult(PutMessageResult putMessageResult, AckMsg ackMsg, String topic, + String consumeGroup, long popTime, int qId, int ackCount) { if (putMessageResult.getPutMessageStatus() != PutMessageStatus.PUT_OK - && putMessageResult.getPutMessageStatus() != PutMessageStatus.FLUSH_DISK_TIMEOUT - && putMessageResult.getPutMessageStatus() != PutMessageStatus.FLUSH_SLAVE_TIMEOUT - && putMessageResult.getPutMessageStatus() != PutMessageStatus.SLAVE_NOT_AVAILABLE) { + && putMessageResult.getPutMessageStatus() != PutMessageStatus.FLUSH_DISK_TIMEOUT + && putMessageResult.getPutMessageStatus() != PutMessageStatus.FLUSH_SLAVE_TIMEOUT + && putMessageResult.getPutMessageStatus() != PutMessageStatus.SLAVE_NOT_AVAILABLE) { POP_LOGGER.error("put ack msg error:" + putMessageResult); } PopMetricsManager.incPopReviveAckPutCount(ackMsg, putMessageResult.getPutMessageStatus()); brokerController.getPopInflightMessageCounter().decrementInFlightMessageNum(topic, consumeGroup, popTime, qId, ackCount); } - protected void ackOrderly(String topic, String consumeGroup, int qId, long ackOffset, long popTime, long invisibleTime, Channel channel, RemotingCommand response) { + protected void ackOrderly(String topic, String consumeGroup, int qId, long ackOffset, long popTime, + long invisibleTime, Channel channel, RemotingCommand response) { String lockKey = topic + PopAckConstants.SPLIT + consumeGroup + PopAckConstants.SPLIT + qId; long oldOffset = this.brokerController.getConsumerOffsetManager().queryOffset(consumeGroup, topic, qId); if (ackOffset < oldOffset) { diff --git a/broker/src/main/java/org/apache/rocketmq/broker/processor/AdminBrokerProcessor.java b/broker/src/main/java/org/apache/rocketmq/broker/processor/AdminBrokerProcessor.java index 28bd2549145..80f3f44facb 100644 --- a/broker/src/main/java/org/apache/rocketmq/broker/processor/AdminBrokerProcessor.java +++ b/broker/src/main/java/org/apache/rocketmq/broker/processor/AdminBrokerProcessor.java @@ -18,9 +18,11 @@ import com.alibaba.fastjson.JSON; import com.alibaba.fastjson.JSONObject; +import com.google.common.collect.ImmutableMap; import com.google.common.collect.Sets; import io.netty.channel.Channel; import io.netty.channel.ChannelHandlerContext; +import io.opentelemetry.api.common.Attributes; import java.io.UnsupportedEncodingException; import java.net.UnknownHostException; import java.nio.charset.StandardCharsets; @@ -38,7 +40,6 @@ import java.util.concurrent.ConcurrentMap; import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; -import io.opentelemetry.api.common.Attributes; import org.apache.commons.collections.CollectionUtils; import org.apache.commons.lang3.StringUtils; import org.apache.rocketmq.acl.AccessValidator; @@ -69,6 +70,7 @@ import org.apache.rocketmq.common.LockCallback; import org.apache.rocketmq.common.MQVersion; import org.apache.rocketmq.common.MixAll; +import org.apache.rocketmq.common.Pair; import org.apache.rocketmq.common.PlainAccessConfig; import org.apache.rocketmq.common.TopicConfig; import org.apache.rocketmq.common.UnlockCallback; @@ -137,6 +139,7 @@ import org.apache.rocketmq.remoting.protocol.body.TopicList; import org.apache.rocketmq.remoting.protocol.body.UnlockBatchRequestBody; import org.apache.rocketmq.remoting.protocol.body.UserInfo; +import org.apache.rocketmq.remoting.protocol.header.CheckRocksdbCqWriteProgressRequestHeader; import org.apache.rocketmq.remoting.protocol.header.CloneGroupOffsetRequestHeader; import org.apache.rocketmq.remoting.protocol.header.ConsumeMessageDirectlyResultRequestHeader; import org.apache.rocketmq.remoting.protocol.header.CreateAccessConfigRequestHeader; @@ -209,16 +212,19 @@ import org.apache.rocketmq.store.MessageStore; import org.apache.rocketmq.store.PutMessageResult; import org.apache.rocketmq.store.PutMessageStatus; +import org.apache.rocketmq.store.RocksDBMessageStore; import org.apache.rocketmq.store.SelectMappedBufferResult; import org.apache.rocketmq.store.config.BrokerRole; +import org.apache.rocketmq.store.plugin.AbstractPluginMessageStore; import org.apache.rocketmq.store.queue.ConsumeQueueInterface; import org.apache.rocketmq.store.queue.CqUnit; import org.apache.rocketmq.store.queue.ReferredIterator; import org.apache.rocketmq.store.timer.TimerCheckpoint; import org.apache.rocketmq.store.timer.TimerMessageStore; import org.apache.rocketmq.store.util.LibC; -import static org.apache.rocketmq.broker.metrics.BrokerMetricsConstant.LABEL_IS_SYSTEM; + import static org.apache.rocketmq.broker.metrics.BrokerMetricsConstant.LABEL_INVOCATION_STATUS; +import static org.apache.rocketmq.broker.metrics.BrokerMetricsConstant.LABEL_IS_SYSTEM; import static org.apache.rocketmq.remoting.protocol.RemotingCommand.buildErrorResponse; public class AdminBrokerProcessor implements NettyRequestProcessor { @@ -339,6 +345,8 @@ public RemotingCommand processRequest(ChannelHandlerContext ctx, return fetchAllConsumeStatsInBroker(ctx, request); case RequestCode.QUERY_CONSUME_QUEUE: return queryConsumeQueue(ctx, request); + case RequestCode.CHECK_ROCKSDB_CQ_WRITE_PROGRESS: + return this.checkRocksdbCqWriteProgress(ctx, request); case RequestCode.UPDATE_AND_GET_GROUP_FORBIDDEN: return this.updateAndGetGroupForbidden(ctx, request); case RequestCode.GET_SUBSCRIPTIONGROUP_CONFIG: @@ -458,6 +466,76 @@ private RemotingCommand updateAndGetGroupForbidden(ChannelHandlerContext ctx, Re return response; } + private RemotingCommand checkRocksdbCqWriteProgress(ChannelHandlerContext ctx, RemotingCommand request) throws RemotingCommandException { + CheckRocksdbCqWriteProgressRequestHeader requestHeader = request.decodeCommandCustomHeader(CheckRocksdbCqWriteProgressRequestHeader.class); + String requestTopic = requestHeader.getTopic(); + final RemotingCommand response = RemotingCommand.createResponseCommand(null); + response.setCode(ResponseCode.SUCCESS); + MessageStore messageStore = brokerController.getMessageStore(); + DefaultMessageStore defaultMessageStore; + if (messageStore instanceof AbstractPluginMessageStore) { + defaultMessageStore = (DefaultMessageStore) ((AbstractPluginMessageStore) messageStore).getNext(); + } else { + defaultMessageStore = (DefaultMessageStore) messageStore; + } + RocksDBMessageStore rocksDBMessageStore = defaultMessageStore.getRocksDBMessageStore(); + if (!defaultMessageStore.getMessageStoreConfig().isRocksdbCQDoubleWriteEnable()) { + response.setBody(JSON.toJSONBytes(ImmutableMap.of("diffResult", "rocksdbCQWriteEnable is false, checkRocksdbCqWriteProgressCommand is invalid"))); + return response; + } + + ConcurrentMap> cqTable = defaultMessageStore.getConsumeQueueTable(); + StringBuilder diffResult = new StringBuilder(); + try { + if (StringUtils.isNotBlank(requestTopic)) { + processConsumeQueuesForTopic(cqTable.get(requestTopic), requestTopic, rocksDBMessageStore, diffResult,false); + response.setBody(JSON.toJSONBytes(ImmutableMap.of("diffResult", diffResult.toString()))); + return response; + } + for (Map.Entry> topicEntry : cqTable.entrySet()) { + String topic = topicEntry.getKey(); + processConsumeQueuesForTopic(topicEntry.getValue(), topic, rocksDBMessageStore, diffResult,true); + } + diffResult.append("check all topic successful, size:").append(cqTable.size()); + response.setBody(JSON.toJSONBytes(ImmutableMap.of("diffResult", diffResult.toString()))); + + } catch (Exception e) { + LOGGER.error("CheckRocksdbCqWriteProgressCommand error", e); + response.setBody(JSON.toJSONBytes(ImmutableMap.of("diffResult", e.getMessage()))); + } + return response; + } + + private void processConsumeQueuesForTopic(ConcurrentMap queueMap, String topic, RocksDBMessageStore rocksDBMessageStore, StringBuilder diffResult, boolean checkAll) { + for (Map.Entry queueEntry : queueMap.entrySet()) { + Integer queueId = queueEntry.getKey(); + ConsumeQueueInterface jsonCq = queueEntry.getValue(); + ConsumeQueueInterface kvCq = rocksDBMessageStore.getConsumeQueue(topic, queueId); + if (!checkAll) { + String format = String.format("\n[topic: %s, queue: %s] \n kvEarliest : %s | kvLatest : %s \n fileEarliest: %s | fileEarliest: %s ", + topic, queueId, kvCq.getEarliestUnit(), kvCq.getLatestUnit(), jsonCq.getEarliestUnit(), jsonCq.getLatestUnit()); + diffResult.append(format).append("\n"); + } + long maxFileOffsetInQueue = jsonCq.getMaxOffsetInQueue(); + long minOffsetInQueue = kvCq.getMinOffsetInQueue(); + for (long i = minOffsetInQueue; i < maxFileOffsetInQueue; i++) { + Pair fileCqUnit = jsonCq.getCqUnitAndStoreTime(i); + Pair kvCqUnit = kvCq.getCqUnitAndStoreTime(i); + if (fileCqUnit == null || kvCqUnit == null) { + diffResult.append(String.format("[topic: %s, queue: %s, offset: %s] \n kv : %s \n file : %s \n", + topic, queueId, i, kvCqUnit != null ? kvCqUnit.getObject1() : "null", fileCqUnit != null ? fileCqUnit.getObject1() : "null")); + return; + } + if (!checkCqUnitEqual(kvCqUnit.getObject1(), fileCqUnit.getObject1())) { + String diffInfo = String.format("[topic:%s, queue: %s offset: %s] \n file : %s \n kv : %s \n", + topic, queueId, i, kvCqUnit.getObject1(), fileCqUnit.getObject1()); + LOGGER.error(diffInfo); + diffResult.append(diffInfo).append(System.lineSeparator()); + return; + } + } + } + } @Override public boolean rejectRequest() { return false; @@ -3305,4 +3383,20 @@ private boolean validateBlackListConfigExist(Properties properties) { } return false; } + + private boolean checkCqUnitEqual(CqUnit cqUnit1, CqUnit cqUnit2) { + if (cqUnit1.getQueueOffset() != cqUnit2.getQueueOffset()) { + return false; + } + if (cqUnit1.getSize() != cqUnit2.getSize()) { + return false; + } + if (cqUnit1.getPos() != cqUnit2.getPos()) { + return false; + } + if (cqUnit1.getBatchNum() != cqUnit2.getBatchNum()) { + return false; + } + return cqUnit1.getTagsCode() == cqUnit2.getTagsCode(); + } } diff --git a/broker/src/main/java/org/apache/rocketmq/broker/processor/ChangeInvisibleTimeProcessor.java b/broker/src/main/java/org/apache/rocketmq/broker/processor/ChangeInvisibleTimeProcessor.java index bdfffff096a..af3b8ae6f05 100644 --- a/broker/src/main/java/org/apache/rocketmq/broker/processor/ChangeInvisibleTimeProcessor.java +++ b/broker/src/main/java/org/apache/rocketmq/broker/processor/ChangeInvisibleTimeProcessor.java @@ -19,6 +19,8 @@ import com.alibaba.fastjson.JSON; import io.netty.channel.Channel; import io.netty.channel.ChannelHandlerContext; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.TimeUnit; import org.apache.rocketmq.broker.BrokerController; import org.apache.rocketmq.broker.metrics.PopMetricsManager; import org.apache.rocketmq.common.PopAckConstants; @@ -33,13 +35,13 @@ import org.apache.rocketmq.logging.org.slf4j.LoggerFactory; import org.apache.rocketmq.remoting.common.RemotingHelper; import org.apache.rocketmq.remoting.exception.RemotingCommandException; +import org.apache.rocketmq.remoting.netty.NettyRemotingAbstract; import org.apache.rocketmq.remoting.netty.NettyRequestProcessor; import org.apache.rocketmq.remoting.protocol.RemotingCommand; import org.apache.rocketmq.remoting.protocol.ResponseCode; import org.apache.rocketmq.remoting.protocol.header.ChangeInvisibleTimeRequestHeader; import org.apache.rocketmq.remoting.protocol.header.ChangeInvisibleTimeResponseHeader; import org.apache.rocketmq.remoting.protocol.header.ExtraInfoUtil; -import org.apache.rocketmq.store.PutMessageResult; import org.apache.rocketmq.store.PutMessageStatus; import org.apache.rocketmq.store.pop.AckMsg; import org.apache.rocketmq.store.pop.PopCheckPoint; @@ -67,6 +69,35 @@ public boolean rejectRequest() { private RemotingCommand processRequest(final Channel channel, RemotingCommand request, boolean brokerAllowSuspend) throws RemotingCommandException { + + CompletableFuture responseFuture = processRequestAsync(channel, request, brokerAllowSuspend); + + if (brokerController.getBrokerConfig().isAppendCkAsync() && brokerController.getBrokerConfig().isAppendAckAsync()) { + responseFuture.thenAccept(response -> doResponse(channel, request, response)).exceptionally(throwable -> { + RemotingCommand response = RemotingCommand.createResponseCommand(ChangeInvisibleTimeResponseHeader.class); + response.setCode(ResponseCode.SYSTEM_ERROR); + response.setOpaque(request.getOpaque()); + doResponse(channel, request, response); + POP_LOGGER.error("append checkpoint or ack origin failed", throwable); + return null; + }); + } else { + RemotingCommand response; + try { + response = responseFuture.get(3000, TimeUnit.MILLISECONDS); + } catch (Exception e) { + response = RemotingCommand.createResponseCommand(ChangeInvisibleTimeResponseHeader.class); + response.setCode(ResponseCode.SYSTEM_ERROR); + response.setOpaque(request.getOpaque()); + POP_LOGGER.error("append checkpoint or ack origin failed", e); + } + return response; + } + return null; + } + + public CompletableFuture processRequestAsync(final Channel channel, RemotingCommand request, + boolean brokerAllowSuspend) throws RemotingCommandException { final ChangeInvisibleTimeRequestHeader requestHeader = (ChangeInvisibleTimeRequestHeader) request.decodeCommandCustomHeader(ChangeInvisibleTimeRequestHeader.class); RemotingCommand response = RemotingCommand.createResponseCommand(ChangeInvisibleTimeResponseHeader.class); response.setCode(ResponseCode.SUCCESS); @@ -77,7 +108,7 @@ private RemotingCommand processRequest(final Channel channel, RemotingCommand re POP_LOGGER.error("The topic {} not exist, consumer: {} ", requestHeader.getTopic(), RemotingHelper.parseChannelRemoteAddr(channel)); response.setCode(ResponseCode.TOPIC_NOT_EXIST); response.setRemark(String.format("topic[%s] not exist, apply first please! %s", requestHeader.getTopic(), FAQUrl.suggestTodo(FAQUrl.APPLY_TOPIC_URL))); - return response; + return CompletableFuture.completedFuture(response); } if (requestHeader.getQueueId() >= topicConfig.getReadQueueNums() || requestHeader.getQueueId() < 0) { @@ -86,46 +117,35 @@ private RemotingCommand processRequest(final Channel channel, RemotingCommand re POP_LOGGER.warn(errorInfo); response.setCode(ResponseCode.MESSAGE_ILLEGAL); response.setRemark(errorInfo); - return response; + return CompletableFuture.completedFuture(response); } long minOffset = this.brokerController.getMessageStore().getMinOffsetInQueue(requestHeader.getTopic(), requestHeader.getQueueId()); long maxOffset = this.brokerController.getMessageStore().getMaxOffsetInQueue(requestHeader.getTopic(), requestHeader.getQueueId()); if (requestHeader.getOffset() < minOffset || requestHeader.getOffset() > maxOffset) { response.setCode(ResponseCode.NO_MESSAGE); - return response; + return CompletableFuture.completedFuture(response); } String[] extraInfo = ExtraInfoUtil.split(requestHeader.getExtraInfo()); if (ExtraInfoUtil.isOrder(extraInfo)) { - return processChangeInvisibleTimeForOrder(requestHeader, extraInfo, response, responseHeader); + return CompletableFuture.completedFuture(processChangeInvisibleTimeForOrder(requestHeader, extraInfo, response, responseHeader)); } // add new ck long now = System.currentTimeMillis(); - PutMessageResult ckResult = appendCheckPoint(requestHeader, ExtraInfoUtil.getReviveQid(extraInfo), requestHeader.getQueueId(), requestHeader.getOffset(), now, ExtraInfoUtil.getBrokerName(extraInfo)); - - if (ckResult.getPutMessageStatus() != PutMessageStatus.PUT_OK - && ckResult.getPutMessageStatus() != PutMessageStatus.FLUSH_DISK_TIMEOUT - && ckResult.getPutMessageStatus() != PutMessageStatus.FLUSH_SLAVE_TIMEOUT - && ckResult.getPutMessageStatus() != PutMessageStatus.SLAVE_NOT_AVAILABLE) { - POP_LOGGER.error("change Invisible, put new ck error: {}", ckResult); - response.setCode(ResponseCode.SYSTEM_ERROR); - return response; - } - - // ack old msg. - try { - ackOrigin(requestHeader, extraInfo); - } catch (Throwable e) { - POP_LOGGER.error("change Invisible, put ack msg error: {}, {}", requestHeader.getExtraInfo(), e.getMessage()); - // cancel new ck? - } - responseHeader.setInvisibleTime(requestHeader.getInvisibleTime()); - responseHeader.setPopTime(now); - responseHeader.setReviveQid(ExtraInfoUtil.getReviveQid(extraInfo)); - return response; + CompletableFuture futureResult = appendCheckPointThenAckOrigin(requestHeader, ExtraInfoUtil.getReviveQid(extraInfo), requestHeader.getQueueId(), requestHeader.getOffset(), now, extraInfo); + return futureResult.thenCompose(result -> { + if (result) { + responseHeader.setInvisibleTime(requestHeader.getInvisibleTime()); + responseHeader.setPopTime(now); + responseHeader.setReviveQid(ExtraInfoUtil.getReviveQid(extraInfo)); + } else { + response.setCode(ResponseCode.SYSTEM_ERROR); + } + return CompletableFuture.completedFuture(response); + }); } protected RemotingCommand processChangeInvisibleTimeForOrder(ChangeInvisibleTimeRequestHeader requestHeader, @@ -158,7 +178,8 @@ protected RemotingCommand processChangeInvisibleTimeForOrder(ChangeInvisibleTime return response; } - private void ackOrigin(final ChangeInvisibleTimeRequestHeader requestHeader, String[] extraInfo) { + private CompletableFuture ackOrigin(final ChangeInvisibleTimeRequestHeader requestHeader, + String[] extraInfo) { MessageExtBrokerInner msgInner = new MessageExtBrokerInner(); AckMsg ackMsg = new AckMsg(); @@ -176,7 +197,7 @@ private void ackOrigin(final ChangeInvisibleTimeRequestHeader requestHeader, Str this.brokerController.getBrokerStatsManager().incGroupAckNums(requestHeader.getConsumerGroup(), requestHeader.getTopic(), 1); if (brokerController.getPopMessageProcessor().getPopBufferMergeService().addAk(rqId, ackMsg)) { - return; + return CompletableFuture.completedFuture(true); } msgInner.setTopic(reviveTopic); @@ -189,18 +210,25 @@ private void ackOrigin(final ChangeInvisibleTimeRequestHeader requestHeader, Str msgInner.setDeliverTimeMs(ExtraInfoUtil.getPopTime(extraInfo) + ExtraInfoUtil.getInvisibleTime(extraInfo)); msgInner.getProperties().put(MessageConst.PROPERTY_UNIQ_CLIENT_MESSAGE_ID_KEYIDX, PopMessageProcessor.genAckUniqueId(ackMsg)); msgInner.setPropertiesString(MessageDecoder.messageProperties2String(msgInner.getProperties())); - PutMessageResult putMessageResult = this.brokerController.getEscapeBridge().putMessageToSpecificQueue(msgInner); - if (putMessageResult.getPutMessageStatus() != PutMessageStatus.PUT_OK - && putMessageResult.getPutMessageStatus() != PutMessageStatus.FLUSH_DISK_TIMEOUT - && putMessageResult.getPutMessageStatus() != PutMessageStatus.FLUSH_SLAVE_TIMEOUT - && putMessageResult.getPutMessageStatus() != PutMessageStatus.SLAVE_NOT_AVAILABLE) { - POP_LOGGER.error("change Invisible, put ack msg fail: {}, {}", ackMsg, putMessageResult); - } - PopMetricsManager.incPopReviveAckPutCount(ackMsg, putMessageResult.getPutMessageStatus()); + return this.brokerController.getEscapeBridge().asyncPutMessageToSpecificQueue(msgInner).thenCompose(putMessageResult -> { + if (putMessageResult.getPutMessageStatus() != PutMessageStatus.PUT_OK + && putMessageResult.getPutMessageStatus() != PutMessageStatus.FLUSH_DISK_TIMEOUT + && putMessageResult.getPutMessageStatus() != PutMessageStatus.FLUSH_SLAVE_TIMEOUT + && putMessageResult.getPutMessageStatus() != PutMessageStatus.SLAVE_NOT_AVAILABLE) { + POP_LOGGER.error("change Invisible, put ack msg fail: {}, {}", ackMsg, putMessageResult); + } + PopMetricsManager.incPopReviveAckPutCount(ackMsg, putMessageResult.getPutMessageStatus()); + return CompletableFuture.completedFuture(true); + }).exceptionally(e -> { + POP_LOGGER.error("change Invisible, put ack msg error: {}, {}", requestHeader.getExtraInfo(), e.getMessage()); + return false; + }); } - private PutMessageResult appendCheckPoint(final ChangeInvisibleTimeRequestHeader requestHeader, int reviveQid, - int queueId, long offset, long popTime, String brokerName) { + private CompletableFuture appendCheckPointThenAckOrigin( + final ChangeInvisibleTimeRequestHeader requestHeader, + int reviveQid, + int queueId, long offset, long popTime, String[] extraInfo) { // add check point msg to revive log MessageExtBrokerInner msgInner = new MessageExtBrokerInner(); msgInner.setTopic(reviveTopic); @@ -214,7 +242,7 @@ private PutMessageResult appendCheckPoint(final ChangeInvisibleTimeRequestHeader ck.setTopic(requestHeader.getTopic()); ck.setQueueId(queueId); ck.addDiff(0); - ck.setBrokerName(brokerName); + ck.setBrokerName(ExtraInfoUtil.getBrokerName(extraInfo)); msgInner.setBody(JSON.toJSONString(ck).getBytes(DataConverter.CHARSET_UTF8)); msgInner.setQueueId(reviveQid); @@ -225,21 +253,36 @@ private PutMessageResult appendCheckPoint(final ChangeInvisibleTimeRequestHeader msgInner.setDeliverTimeMs(ck.getReviveTime() - PopAckConstants.ackTimeInterval); msgInner.getProperties().put(MessageConst.PROPERTY_UNIQ_CLIENT_MESSAGE_ID_KEYIDX, PopMessageProcessor.genCkUniqueId(ck)); msgInner.setPropertiesString(MessageDecoder.messageProperties2String(msgInner.getProperties())); - PutMessageResult putMessageResult = this.brokerController.getEscapeBridge().putMessageToSpecificQueue(msgInner); - - if (brokerController.getBrokerConfig().isEnablePopLog()) { - POP_LOGGER.info("change Invisible , appendCheckPoint, topic {}, queueId {},reviveId {}, cid {}, startOffset {}, rt {}, result {}", requestHeader.getTopic(), queueId, reviveQid, requestHeader.getConsumerGroup(), offset, - ck.getReviveTime(), putMessageResult); - } + return this.brokerController.getEscapeBridge().asyncPutMessageToSpecificQueue(msgInner).thenCompose(putMessageResult -> { + if (brokerController.getBrokerConfig().isEnablePopLog()) { + POP_LOGGER.info("change Invisible, appendCheckPoint, topic {}, queueId {},reviveId {}, cid {}, startOffset {}, rt {}, result {}", requestHeader.getTopic(), queueId, reviveQid, requestHeader.getConsumerGroup(), offset, + ck.getReviveTime(), putMessageResult); + } - if (putMessageResult != null) { - PopMetricsManager.incPopReviveCkPutCount(ck, putMessageResult.getPutMessageStatus()); - if (putMessageResult.isOk()) { - this.brokerController.getBrokerStatsManager().incBrokerCkNums(1); - this.brokerController.getBrokerStatsManager().incGroupCkNums(requestHeader.getConsumerGroup(), requestHeader.getTopic(), 1); + if (putMessageResult != null) { + PopMetricsManager.incPopReviveCkPutCount(ck, putMessageResult.getPutMessageStatus()); + if (putMessageResult.isOk()) { + this.brokerController.getBrokerStatsManager().incBrokerCkNums(1); + this.brokerController.getBrokerStatsManager().incGroupCkNums(requestHeader.getConsumerGroup(), requestHeader.getTopic(), 1); + } } - } + if (putMessageResult.getPutMessageStatus() != PutMessageStatus.PUT_OK + && putMessageResult.getPutMessageStatus() != PutMessageStatus.FLUSH_DISK_TIMEOUT + && putMessageResult.getPutMessageStatus() != PutMessageStatus.FLUSH_SLAVE_TIMEOUT + && putMessageResult.getPutMessageStatus() != PutMessageStatus.SLAVE_NOT_AVAILABLE) { + POP_LOGGER.error("change invisible, put new ck error: {}", putMessageResult); + return CompletableFuture.completedFuture(false); + } else { + return ackOrigin(requestHeader, extraInfo); + } + }).exceptionally(throwable -> { + POP_LOGGER.error("change invisible, put new ck error", throwable); + return null; + }); + } - return putMessageResult; + protected void doResponse(Channel channel, RemotingCommand request, + final RemotingCommand response) { + NettyRemotingAbstract.writeResponse(channel, request, response); } } diff --git a/broker/src/main/java/org/apache/rocketmq/broker/processor/PeekMessageProcessor.java b/broker/src/main/java/org/apache/rocketmq/broker/processor/PeekMessageProcessor.java index 55552003d80..2c0a1cd54a2 100644 --- a/broker/src/main/java/org/apache/rocketmq/broker/processor/PeekMessageProcessor.java +++ b/broker/src/main/java/org/apache/rocketmq/broker/processor/PeekMessageProcessor.java @@ -258,8 +258,8 @@ private long peekMsgFromQueue(boolean isRetry, GetMessageResult getMessageResult BrokerMetricsManager.throughputOutTotal.add(getMessageResult.getBufferTotalSize(), attributes); } - for (SelectMappedBufferResult mapedBuffer : getMessageTmpResult.getMessageMapedList()) { - getMessageResult.addMessage(mapedBuffer); + for (SelectMappedBufferResult mappedBuffer : getMessageTmpResult.getMessageMapedList()) { + getMessageResult.addMessage(mappedBuffer); } } return restNum; diff --git a/broker/src/main/java/org/apache/rocketmq/broker/processor/PopBufferMergeService.java b/broker/src/main/java/org/apache/rocketmq/broker/processor/PopBufferMergeService.java index 8a85dd8fec8..9f10b483ddb 100644 --- a/broker/src/main/java/org/apache/rocketmq/broker/processor/PopBufferMergeService.java +++ b/broker/src/main/java/org/apache/rocketmq/broker/processor/PopBufferMergeService.java @@ -216,7 +216,8 @@ private void scanGarbage() { private void scan() { long startTime = System.currentTimeMillis(); - int count = 0, countCk = 0; + AtomicInteger count = new AtomicInteger(0); + int countCk = 0; Iterator> iterator = buffer.entrySet().iterator(); while (iterator.hasNext()) { Map.Entry entry = iterator.next(); @@ -257,14 +258,14 @@ private void scan() { } else if (pointWrapper.isJustOffset()) { // just offset should be in store. if (pointWrapper.getReviveQueueOffset() < 0) { - putCkToStore(pointWrapper, false); + putCkToStore(pointWrapper, this.brokerController.getBrokerConfig().isAppendCkAsync()); countCk++; } continue; } else if (removeCk) { // put buffer ak to store if (pointWrapper.getReviveQueueOffset() < 0) { - putCkToStore(pointWrapper, false); + putCkToStore(pointWrapper, this.brokerController.getBrokerConfig().isAppendCkAsync()); countCk++; } @@ -278,17 +279,12 @@ private void scan() { for (byte i = 0; i < point.getNum(); i++) { // reput buffer ak to store if (DataConverter.getBit(pointWrapper.getBits().get(), i) - && !DataConverter.getBit(pointWrapper.getToStoreBits().get(), i)) { + && !DataConverter.getBit(pointWrapper.getToStoreBits().get(), i)) { indexList.add(i); } } if (indexList.size() > 0) { - if (putBatchAckToStore(pointWrapper, indexList)) { - count += indexList.size(); - for (Byte i : indexList) { - markBitCAS(pointWrapper.getToStoreBits(), i); - } - } + putBatchAckToStore(pointWrapper, indexList, count); } } finally { indexList.clear(); @@ -297,11 +293,8 @@ private void scan() { for (byte i = 0; i < point.getNum(); i++) { // reput buffer ak to store if (DataConverter.getBit(pointWrapper.getBits().get(), i) - && !DataConverter.getBit(pointWrapper.getToStoreBits().get(), i)) { - if (putAckToStore(pointWrapper, i)) { - count++; - markBitCAS(pointWrapper.getToStoreBits(), i); - } + && !DataConverter.getBit(pointWrapper.getToStoreBits().get(), i)) { + putAckToStore(pointWrapper, i, count); } } } @@ -312,7 +305,6 @@ private void scan() { } iterator.remove(); counter.decrementAndGet(); - continue; } } } @@ -323,13 +315,13 @@ private void scan() { if (eclipse > brokerController.getBrokerConfig().getPopCkStayBufferTimeOut() - 1000) { POP_LOGGER.warn("[PopBuffer]scan stop, because eclipse too long, PopBufferEclipse={}, " + "PopBufferToStoreAck={}, PopBufferToStoreCk={}, PopBufferSize={}, PopBufferOffsetSize={}", - eclipse, count, countCk, counter.get(), offsetBufferSize); + eclipse, count.get(), countCk, counter.get(), offsetBufferSize); this.serving = false; } else { if (scanTimes % countOfSecond1 == 0) { POP_LOGGER.info("[PopBuffer]scan, PopBufferEclipse={}, " + "PopBufferToStoreAck={}, PopBufferToStoreCk={}, PopBufferSize={}, PopBufferOffsetSize={}", - eclipse, count, countCk, counter.get(), offsetBufferSize); + eclipse, count.get(), countCk, counter.get(), offsetBufferSize); } } PopMetricsManager.recordPopBufferScanTimeConsume(eclipse); @@ -429,7 +421,8 @@ private boolean checkQueueOk(PopCheckPointWrapper pointWrapper) { * @param nextBeginOffset * @return */ - public boolean addCkJustOffset(PopCheckPoint point, int reviveQueueId, long reviveQueueOffset, long nextBeginOffset) { + public boolean addCkJustOffset(PopCheckPoint point, int reviveQueueId, long reviveQueueOffset, + long nextBeginOffset) { PopCheckPointWrapper pointWrapper = new PopCheckPointWrapper(reviveQueueId, reviveQueueOffset, point, nextBeginOffset, true); if (this.buffer.containsKey(pointWrapper.getMergeKey())) { @@ -439,7 +432,7 @@ public boolean addCkJustOffset(PopCheckPoint point, int reviveQueueId, long revi return false; } - this.putCkToStore(pointWrapper, !checkQueueOk(pointWrapper)); + this.putCkToStore(pointWrapper, checkQueueOk(pointWrapper)); putOffsetQueue(pointWrapper); this.buffer.put(pointWrapper.getMergeKey(), pointWrapper); @@ -447,7 +440,7 @@ public boolean addCkJustOffset(PopCheckPoint point, int reviveQueueId, long revi if (brokerController.getBrokerConfig().isEnablePopLog()) { POP_LOGGER.info("[PopBuffer]add ck just offset, {}", pointWrapper); } - return true; + return true; } public void addCkMock(String group, String topic, int queueId, long startOffset, long invisibleTime, @@ -597,13 +590,32 @@ private void putCkToStore(final PopCheckPointWrapper pointWrapper, final boolean if (pointWrapper.getReviveQueueOffset() >= 0) { return; } + MessageExtBrokerInner msgInner = popMessageProcessor.buildCkMsg(pointWrapper.getCk(), pointWrapper.getReviveQueueId()); - PutMessageResult putMessageResult = brokerController.getEscapeBridge().putMessageToSpecificQueue(msgInner); + + // Indicates that ck message is storing + pointWrapper.setReviveQueueOffset(Long.MAX_VALUE); + if (brokerController.getBrokerConfig().isAppendCkAsync() && runInCurrent) { + brokerController.getEscapeBridge().asyncPutMessageToSpecificQueue(msgInner).thenAccept(putMessageResult -> { + handleCkMessagePutResult(putMessageResult, pointWrapper); + }).exceptionally(throwable -> { + POP_LOGGER.error("[PopBuffer]put ck to store fail: {}", pointWrapper, throwable); + pointWrapper.setReviveQueueOffset(-1); + return null; + }); + } else { + PutMessageResult putMessageResult = brokerController.getEscapeBridge().putMessageToSpecificQueue(msgInner); + handleCkMessagePutResult(putMessageResult, pointWrapper); + } + } + + private void handleCkMessagePutResult(PutMessageResult putMessageResult, final PopCheckPointWrapper pointWrapper) { PopMetricsManager.incPopReviveCkPutCount(pointWrapper.getCk(), putMessageResult.getPutMessageStatus()); if (putMessageResult.getPutMessageStatus() != PutMessageStatus.PUT_OK && putMessageResult.getPutMessageStatus() != PutMessageStatus.FLUSH_DISK_TIMEOUT && putMessageResult.getPutMessageStatus() != PutMessageStatus.FLUSH_SLAVE_TIMEOUT && putMessageResult.getPutMessageStatus() != PutMessageStatus.SLAVE_NOT_AVAILABLE) { + pointWrapper.setReviveQueueOffset(-1); POP_LOGGER.error("[PopBuffer]put ck to store fail: {}, {}", pointWrapper, putMessageResult); return; } @@ -621,7 +633,7 @@ private void putCkToStore(final PopCheckPointWrapper pointWrapper, final boolean } } - private boolean putAckToStore(final PopCheckPointWrapper pointWrapper, byte msgIndex) { + private void putAckToStore(final PopCheckPointWrapper pointWrapper, byte msgIndex, AtomicInteger count) { PopCheckPoint point = pointWrapper.getCk(); MessageExtBrokerInner msgInner = new MessageExtBrokerInner(); final AckMsg ackMsg = new AckMsg(); @@ -632,6 +644,7 @@ private boolean putAckToStore(final PopCheckPointWrapper pointWrapper, byte msgI ackMsg.setTopic(point.getTopic()); ackMsg.setQueueId(point.getQueueId()); ackMsg.setPopTime(point.getPopTime()); + ackMsg.setBrokerName(point.getBrokerName()); msgInner.setTopic(popMessageProcessor.reviveTopic); msgInner.setBody(JSON.toJSONString(ackMsg).getBytes(DataConverter.CHARSET_UTF8)); msgInner.setQueueId(pointWrapper.getReviveQueueId()); @@ -643,23 +656,39 @@ private boolean putAckToStore(final PopCheckPointWrapper pointWrapper, byte msgI msgInner.getProperties().put(MessageConst.PROPERTY_UNIQ_CLIENT_MESSAGE_ID_KEYIDX, PopMessageProcessor.genAckUniqueId(ackMsg)); msgInner.setPropertiesString(MessageDecoder.messageProperties2String(msgInner.getProperties())); - PutMessageResult putMessageResult = brokerController.getEscapeBridge().putMessageToSpecificQueue(msgInner); + + if (brokerController.getBrokerConfig().isAppendAckAsync()) { + brokerController.getEscapeBridge().asyncPutMessageToSpecificQueue(msgInner).thenAccept(putMessageResult -> { + handleAckPutMessageResult(ackMsg, putMessageResult, pointWrapper, count, msgIndex); + }).exceptionally(throwable -> { + POP_LOGGER.error("[PopBuffer]put ack to store fail: {}, {}", pointWrapper, ackMsg, throwable); + return null; + }); + } else { + PutMessageResult putMessageResult = brokerController.getEscapeBridge().putMessageToSpecificQueue(msgInner); + handleAckPutMessageResult(ackMsg, putMessageResult, pointWrapper, count, msgIndex); + } + } + + private void handleAckPutMessageResult(AckMsg ackMsg, PutMessageResult putMessageResult, + PopCheckPointWrapper pointWrapper, AtomicInteger count, byte msgIndex) { PopMetricsManager.incPopReviveAckPutCount(ackMsg, putMessageResult.getPutMessageStatus()); if (putMessageResult.getPutMessageStatus() != PutMessageStatus.PUT_OK && putMessageResult.getPutMessageStatus() != PutMessageStatus.FLUSH_DISK_TIMEOUT && putMessageResult.getPutMessageStatus() != PutMessageStatus.FLUSH_SLAVE_TIMEOUT && putMessageResult.getPutMessageStatus() != PutMessageStatus.SLAVE_NOT_AVAILABLE) { POP_LOGGER.error("[PopBuffer]put ack to store fail: {}, {}, {}", pointWrapper, ackMsg, putMessageResult); - return false; + return; } if (brokerController.getBrokerConfig().isEnablePopLog()) { POP_LOGGER.info("[PopBuffer]put ack to store ok: {}, {}, {}", pointWrapper, ackMsg, putMessageResult); } - - return true; + count.incrementAndGet(); + markBitCAS(pointWrapper.getToStoreBits(), msgIndex); } - private boolean putBatchAckToStore(final PopCheckPointWrapper pointWrapper, final List msgIndexList) { + private void putBatchAckToStore(final PopCheckPointWrapper pointWrapper, final List msgIndexList, + AtomicInteger count) { PopCheckPoint point = pointWrapper.getCk(); MessageExtBrokerInner msgInner = new MessageExtBrokerInner(); final BatchAckMsg batchAckMsg = new BatchAckMsg(); @@ -683,19 +712,36 @@ private boolean putBatchAckToStore(final PopCheckPointWrapper pointWrapper, fina msgInner.getProperties().put(MessageConst.PROPERTY_UNIQ_CLIENT_MESSAGE_ID_KEYIDX, PopMessageProcessor.genBatchAckUniqueId(batchAckMsg)); msgInner.setPropertiesString(MessageDecoder.messageProperties2String(msgInner.getProperties())); - PutMessageResult putMessageResult = brokerController.getEscapeBridge().putMessageToSpecificQueue(msgInner); + if (brokerController.getBrokerConfig().isAppendAckAsync()) { + brokerController.getEscapeBridge().asyncPutMessageToSpecificQueue(msgInner).thenAccept(putMessageResult -> { + handleBatchAckPutMessageResult(batchAckMsg, putMessageResult, pointWrapper, count, msgIndexList); + }).exceptionally(throwable -> { + POP_LOGGER.error("[PopBuffer]put batchAckMsg to store fail: {}, {}", pointWrapper, batchAckMsg, throwable); + return null; + }); + } else { + PutMessageResult putMessageResult = brokerController.getEscapeBridge().putMessageToSpecificQueue(msgInner); + handleBatchAckPutMessageResult(batchAckMsg, putMessageResult, pointWrapper, count, msgIndexList); + } + } + + private void handleBatchAckPutMessageResult(BatchAckMsg batchAckMsg, PutMessageResult putMessageResult, + PopCheckPointWrapper pointWrapper, AtomicInteger count, List msgIndexList) { if (putMessageResult.getPutMessageStatus() != PutMessageStatus.PUT_OK - && putMessageResult.getPutMessageStatus() != PutMessageStatus.FLUSH_DISK_TIMEOUT - && putMessageResult.getPutMessageStatus() != PutMessageStatus.FLUSH_SLAVE_TIMEOUT - && putMessageResult.getPutMessageStatus() != PutMessageStatus.SLAVE_NOT_AVAILABLE) { + && putMessageResult.getPutMessageStatus() != PutMessageStatus.FLUSH_DISK_TIMEOUT + && putMessageResult.getPutMessageStatus() != PutMessageStatus.FLUSH_SLAVE_TIMEOUT + && putMessageResult.getPutMessageStatus() != PutMessageStatus.SLAVE_NOT_AVAILABLE) { POP_LOGGER.error("[PopBuffer]put batch ack to store fail: {}, {}, {}", pointWrapper, batchAckMsg, putMessageResult); - return false; + return; } if (brokerController.getBrokerConfig().isEnablePopLog()) { POP_LOGGER.info("[PopBuffer]put batch ack to store ok: {}, {}, {}", pointWrapper, batchAckMsg, putMessageResult); } - return true; + count.addAndGet(msgIndexList.size()); + for (Byte i : msgIndexList) { + markBitCAS(pointWrapper.getToStoreBits(), i); + } } private boolean cancelCkTimer(final PopCheckPointWrapper pointWrapper) { diff --git a/broker/src/main/java/org/apache/rocketmq/broker/processor/PopMessageProcessor.java b/broker/src/main/java/org/apache/rocketmq/broker/processor/PopMessageProcessor.java index 5430fdec94d..2d76c5a3caa 100644 --- a/broker/src/main/java/org/apache/rocketmq/broker/processor/PopMessageProcessor.java +++ b/broker/src/main/java/org/apache/rocketmq/broker/processor/PopMessageProcessor.java @@ -373,7 +373,7 @@ public RemotingCommand processRequest(final ChannelHandlerContext ctx, RemotingC // considered the same type because they share the same retry flag in previous fields. // Therefore, needRetryV1 is designed as a subset of needRetry, and within a single request, // only one type of retry topic is able to call popMsgFromQueue. - boolean needRetry = randomQ % 5 == 0; + boolean needRetry = randomQ < brokerConfig.getPopFromRetryProbability(); boolean needRetryV1 = false; if (brokerConfig.isEnableRetryTopicV2() && brokerConfig.isRetrieveMessageFromPopRetryTopicV1()) { needRetryV1 = randomQ % 2 == 0; diff --git a/broker/src/main/java/org/apache/rocketmq/broker/processor/PullMessageProcessor.java b/broker/src/main/java/org/apache/rocketmq/broker/processor/PullMessageProcessor.java index d53454f215d..6dd8b300478 100644 --- a/broker/src/main/java/org/apache/rocketmq/broker/processor/PullMessageProcessor.java +++ b/broker/src/main/java/org/apache/rocketmq/broker/processor/PullMessageProcessor.java @@ -799,7 +799,7 @@ public void executeRequestWhenWakeup(final Channel channel, final RemotingComman } } } catch (RemotingCommandException e1) { - LOGGER.error("excuteRequestWhenWakeup run", e1); + LOGGER.error("executeRequestWhenWakeup run", e1); } }; this.brokerController.getPullMessageExecutor().submit(new RequestTask(run, channel, request)); diff --git a/broker/src/main/java/org/apache/rocketmq/broker/processor/ReplyMessageProcessor.java b/broker/src/main/java/org/apache/rocketmq/broker/processor/ReplyMessageProcessor.java index d3bb048f75d..a70b48debe1 100644 --- a/broker/src/main/java/org/apache/rocketmq/broker/processor/ReplyMessageProcessor.java +++ b/broker/src/main/java/org/apache/rocketmq/broker/processor/ReplyMessageProcessor.java @@ -115,10 +115,10 @@ private RemotingCommand processReplyMessageRequest(final ChannelHandlerContext c response.addExtField(MessageConst.PROPERTY_TRACE_SWITCH, String.valueOf(this.brokerController.getBrokerConfig().isTraceOn())); log.debug("receive SendReplyMessage request command, {}", request); - final long startTimstamp = this.brokerController.getBrokerConfig().getStartAcceptSendRequestTimeStamp(); - if (this.brokerController.getMessageStore().now() < startTimstamp) { + final long startTimestamp = this.brokerController.getBrokerConfig().getStartAcceptSendRequestTimeStamp(); + if (this.brokerController.getMessageStore().now() < startTimestamp) { response.setCode(ResponseCode.SYSTEM_ERROR); - response.setRemark(String.format("broker unable to service, until %s", UtilAll.timeMillisToHumanString2(startTimstamp))); + response.setRemark(String.format("broker unable to service, until %s", UtilAll.timeMillisToHumanString2(startTimestamp))); return response; } diff --git a/broker/src/main/java/org/apache/rocketmq/broker/subscription/RocksDBSubscriptionGroupManager.java b/broker/src/main/java/org/apache/rocketmq/broker/subscription/RocksDBSubscriptionGroupManager.java index 7df72dbe686..5119f78672c 100644 --- a/broker/src/main/java/org/apache/rocketmq/broker/subscription/RocksDBSubscriptionGroupManager.java +++ b/broker/src/main/java/org/apache/rocketmq/broker/subscription/RocksDBSubscriptionGroupManager.java @@ -19,6 +19,12 @@ import com.alibaba.fastjson.JSON; import com.alibaba.fastjson.JSONObject; import com.alibaba.fastjson.serializer.SerializerFeature; +import java.io.File; +import java.util.Map; +import java.util.Set; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.ConcurrentMap; +import java.util.function.BiConsumer; import org.apache.rocketmq.broker.BrokerController; import org.apache.rocketmq.broker.RocksDBConfigManager; import org.apache.rocketmq.common.UtilAll; @@ -27,13 +33,6 @@ import org.apache.rocketmq.remoting.protocol.subscription.SubscriptionGroupConfig; import org.rocksdb.RocksIterator; -import java.io.File; -import java.util.Map; -import java.util.Set; -import java.util.concurrent.ConcurrentHashMap; -import java.util.concurrent.ConcurrentMap; -import java.util.function.BiConsumer; - public class RocksDBSubscriptionGroupManager extends SubscriptionGroupManager { protected RocksDBConfigManager rocksDBConfigManager; @@ -79,28 +78,30 @@ public boolean loadForbidden(BiConsumer biConsumer) { private boolean merge() { if (!brokerController.getMessageStoreConfig().isTransferMetadataJsonToRocksdb()) { - log.info("The switch is off, no merge operation is needed."); + log.info("the switch transferMetadataJsonToRocksdb is off, no merge subGroup operation is needed."); return true; } if (!UtilAll.isPathExists(this.configFilePath()) && !UtilAll.isPathExists(this.configFilePath() + ".bak")) { - log.info("json file and json back file not exist, so skip merge"); + log.info("subGroup json file does not exist, so skip merge"); return true; } - - if (!super.load()) { - log.error("load group and forbidden info from json file error, startup will exit"); + if (!super.loadDataVersion()) { + log.error("load json subGroup dataVersion error, startup will exit"); return false; } - - final ConcurrentMap groupTable = this.getSubscriptionGroupTable(); - final ConcurrentMap> forbiddenTable = this.getForbiddenTable(); final DataVersion dataVersion = super.getDataVersion(); final DataVersion kvDataVersion = this.getDataVersion(); if (dataVersion.getCounter().get() > kvDataVersion.getCounter().get()) { + if (!super.load()) { + log.error("load group and forbidden info from json file error, startup will exit"); + return false; + } + final ConcurrentMap groupTable = this.getSubscriptionGroupTable(); for (Map.Entry entry : groupTable.entrySet()) { putSubscriptionGroupConfig(entry.getValue()); log.info("import subscription config to rocksdb, group={}", entry.getValue()); } + final ConcurrentMap> forbiddenTable = this.getForbiddenTable(); for (Map.Entry> entry : forbiddenTable.entrySet()) { try { this.rocksDBConfigManager.updateForbidden(entry.getKey(), JSON.toJSONString(entry.getValue())); @@ -110,8 +111,10 @@ private boolean merge() { return false; } } - this.rocksDBConfigManager.getKvDataVersion().assignNewOne(dataVersion); + this.getDataVersion().assignNewOne(dataVersion); updateDataVersion(); + } else { + log.info("dataVersion is not greater than kvDataVersion, no need to merge group metaData, dataVersion={}, kvDataVersion={}", dataVersion, kvDataVersion); } log.info("finish marge subscription config from json file and merge to rocksdb"); this.persist(); @@ -196,6 +199,7 @@ public void updateDataVersion() { try { rocksDBConfigManager.updateKvDataVersion(); } catch (Exception e) { + log.error("update group config dataVersion error", e); throw new RuntimeException(e); } } diff --git a/broker/src/main/java/org/apache/rocketmq/broker/subscription/SubscriptionGroupManager.java b/broker/src/main/java/org/apache/rocketmq/broker/subscription/SubscriptionGroupManager.java index f2a7e0482b1..e6855ef9a2a 100644 --- a/broker/src/main/java/org/apache/rocketmq/broker/subscription/SubscriptionGroupManager.java +++ b/broker/src/main/java/org/apache/rocketmq/broker/subscription/SubscriptionGroupManager.java @@ -334,6 +334,26 @@ public DataVersion getDataVersion() { return dataVersion; } + public boolean loadDataVersion() { + String fileName = null; + try { + fileName = this.configFilePath(); + String jsonString = MixAll.file2String(fileName); + if (jsonString != null) { + SubscriptionGroupManager obj = RemotingSerializable.fromJson(jsonString, SubscriptionGroupManager.class); + if (obj != null) { + this.dataVersion.assignNewOne(obj.dataVersion); + this.printLoadDataWhenFirstBoot(obj); + log.info("load subGroup dataVersion success,{},{}", fileName, obj.dataVersion); + } + } + return true; + } catch (Exception e) { + log.error("load subGroup dataVersion failed" + fileName, e); + return false; + } + } + public void deleteSubscriptionGroupConfig(final String groupName) { SubscriptionGroupConfig old = removeSubscriptionGroupConfig(groupName); this.forbiddenTable.remove(groupName); diff --git a/broker/src/main/java/org/apache/rocketmq/broker/topic/RocksDBTopicConfigManager.java b/broker/src/main/java/org/apache/rocketmq/broker/topic/RocksDBTopicConfigManager.java index 2a89dd7e024..466e6416f98 100644 --- a/broker/src/main/java/org/apache/rocketmq/broker/topic/RocksDBTopicConfigManager.java +++ b/broker/src/main/java/org/apache/rocketmq/broker/topic/RocksDBTopicConfigManager.java @@ -18,6 +18,9 @@ import com.alibaba.fastjson.JSON; import com.alibaba.fastjson.serializer.SerializerFeature; +import java.io.File; +import java.util.Map; +import java.util.concurrent.ConcurrentMap; import org.apache.rocketmq.broker.BrokerController; import org.apache.rocketmq.broker.RocksDBConfigManager; import org.apache.rocketmq.common.TopicConfig; @@ -25,10 +28,6 @@ import org.apache.rocketmq.common.utils.DataConverter; import org.apache.rocketmq.remoting.protocol.DataVersion; -import java.io.File; -import java.util.Map; -import java.util.concurrent.ConcurrentMap; - public class RocksDBTopicConfigManager extends TopicConfigManager { protected RocksDBConfigManager rocksDBConfigManager; @@ -60,29 +59,35 @@ public boolean loadDataVersion() { private boolean merge() { if (!brokerController.getMessageStoreConfig().isTransferMetadataJsonToRocksdb()) { - log.info("The switch is off, no merge operation is needed."); + log.info("the switch transferMetadataJsonToRocksdb is off, no merge topic operation is needed."); return true; } if (!UtilAll.isPathExists(this.configFilePath()) && !UtilAll.isPathExists(this.configFilePath() + ".bak")) { - log.info("json file and json back file not exist, so skip merge"); + log.info("topic json file does not exist, so skip merge"); return true; } - if (!super.load()) { - log.error("load topic config from json file error, startup will exit"); + if (!super.loadDataVersion()) { + log.error("load json topic dataVersion error, startup will exit"); return false; } - final ConcurrentMap topicConfigTable = this.getTopicConfigTable(); final DataVersion dataVersion = super.getDataVersion(); final DataVersion kvDataVersion = this.getDataVersion(); if (dataVersion.getCounter().get() > kvDataVersion.getCounter().get()) { + if (!super.load()) { + log.error("load topic config from json file error, startup will exit"); + return false; + } + final ConcurrentMap topicConfigTable = this.getTopicConfigTable(); for (Map.Entry entry : topicConfigTable.entrySet()) { putTopicConfig(entry.getValue()); log.info("import topic config to rocksdb, topic={}", entry.getValue()); } - this.rocksDBConfigManager.getKvDataVersion().assignNewOne(dataVersion); + this.getDataVersion().assignNewOne(dataVersion); updateDataVersion(); + } else { + log.info("dataVersion is not greater than kvDataVersion, no need to merge topic metaData, dataVersion={}, kvDataVersion={}", dataVersion, kvDataVersion); } log.info("finish read topic config from json file and merge to rocksdb"); this.persist(); @@ -150,6 +155,7 @@ public void updateDataVersion() { try { rocksDBConfigManager.updateKvDataVersion(); } catch (Exception e) { + log.error("update topic config dataVersion error", e); throw new RuntimeException(e); } } diff --git a/broker/src/main/java/org/apache/rocketmq/broker/topic/TopicConfigManager.java b/broker/src/main/java/org/apache/rocketmq/broker/topic/TopicConfigManager.java index eab2896b001..25d3218f2ab 100644 --- a/broker/src/main/java/org/apache/rocketmq/broker/topic/TopicConfigManager.java +++ b/broker/src/main/java/org/apache/rocketmq/broker/topic/TopicConfigManager.java @@ -637,6 +637,26 @@ public String encode() { return encode(false); } + public boolean loadDataVersion() { + String fileName = null; + try { + fileName = this.configFilePath(); + String jsonString = MixAll.file2String(fileName); + if (jsonString != null) { + TopicConfigSerializeWrapper topicConfigSerializeWrapper = + TopicConfigSerializeWrapper.fromJson(jsonString, TopicConfigSerializeWrapper.class); + if (topicConfigSerializeWrapper != null) { + this.dataVersion.assignNewOne(topicConfigSerializeWrapper.getDataVersion()); + log.info("load topic metadata dataVersion success {}, {}", fileName, topicConfigSerializeWrapper.getDataVersion()); + } + } + return true; + } catch (Exception e) { + log.error("load topic metadata dataVersion failed" + fileName, e); + return false; + } + } + @Override public String configFilePath() { return BrokerPathConfigHelper.getTopicConfigPath(this.brokerController.getMessageStoreConfig().getStorePathRootDir()); diff --git a/broker/src/test/java/org/apache/rocketmq/broker/client/rebalance/RebalanceLockManagerTest.java b/broker/src/test/java/org/apache/rocketmq/broker/client/rebalance/RebalanceLockManagerTest.java new file mode 100644 index 00000000000..e231d61b6a7 --- /dev/null +++ b/broker/src/test/java/org/apache/rocketmq/broker/client/rebalance/RebalanceLockManagerTest.java @@ -0,0 +1,167 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.rocketmq.broker.client.rebalance; + +import org.apache.commons.lang3.reflect.FieldUtils; +import org.apache.rocketmq.common.message.MessageQueue; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.Mock; +import org.mockito.junit.MockitoJUnitRunner; + +import java.util.HashSet; +import java.util.Set; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.ConcurrentMap; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; +import static org.mockito.Mockito.when; + +@RunWith(MockitoJUnitRunner.class) +public class RebalanceLockManagerTest { + + @Mock + private RebalanceLockManager.LockEntry lockEntry; + + private final RebalanceLockManager rebalanceLockManager = new RebalanceLockManager(); + + private final String defaultTopic = "defaultTopic"; + + private final String defaultBroker = "defaultBroker"; + + private final String defaultGroup = "defaultGroup"; + + private final String defaultClientId = "defaultClientId"; + + @Test + public void testIsLockAllExpiredGroupNotExist() { + assertTrue(rebalanceLockManager.isLockAllExpired(defaultGroup)); + } + + @Test + public void testIsLockAllExpiredGroupExist() throws IllegalAccessException { + FieldUtils.writeDeclaredField(rebalanceLockManager, "mqLockTable", createMQLockTable(), true); + when(lockEntry.isExpired()).thenReturn(false); + assertFalse(rebalanceLockManager.isLockAllExpired(defaultGroup)); + } + + @Test + public void testIsLockAllExpiredGroupExistSomeExpired() throws IllegalAccessException { + FieldUtils.writeDeclaredField(rebalanceLockManager, "mqLockTable", createMQLockTable(), true); + when(lockEntry.isExpired()).thenReturn(true).thenReturn(false); + assertFalse(rebalanceLockManager.isLockAllExpired(defaultGroup)); + } + + @Test + public void testTryLockNotLocked() { + assertTrue(rebalanceLockManager.tryLock(defaultGroup, createDefaultMessageQueue(), defaultClientId)); + } + + @Test + public void testTryLockSameClient() throws IllegalAccessException { + when(lockEntry.isLocked(defaultClientId)).thenReturn(true); + FieldUtils.writeDeclaredField(rebalanceLockManager, "mqLockTable", createMQLockTable(), true); + assertTrue(rebalanceLockManager.tryLock(defaultGroup, createDefaultMessageQueue(), defaultClientId)); + } + + @Test + public void testTryLockDifferentClient() throws Exception { + when(lockEntry.isLocked(defaultClientId)).thenReturn(false); + FieldUtils.writeDeclaredField(rebalanceLockManager, "mqLockTable", createMQLockTable(), true); + assertFalse(rebalanceLockManager.tryLock(defaultGroup, createDefaultMessageQueue(), defaultClientId)); + } + + @Test + public void testTryLockButExpired() throws IllegalAccessException { + when(lockEntry.isExpired()).thenReturn(true); + FieldUtils.writeDeclaredField(rebalanceLockManager, "mqLockTable", createMQLockTable(), true); + assertTrue(rebalanceLockManager.tryLock(defaultGroup, createDefaultMessageQueue(), defaultClientId)); + } + + @Test + public void testTryLockBatchAllLocked() { + Set mqs = createMessageQueue(2); + Set actual = rebalanceLockManager.tryLockBatch(defaultGroup, mqs, defaultClientId); + assertEquals(mqs, actual); + } + + @Test + public void testTryLockBatchNoneLocked() throws IllegalAccessException { + when(lockEntry.isLocked(defaultClientId)).thenReturn(false); + FieldUtils.writeDeclaredField(rebalanceLockManager, "mqLockTable", createMQLockTable(), true); + Set actual = rebalanceLockManager.tryLockBatch(defaultGroup, createMessageQueue(2), defaultClientId); + assertTrue(actual.isEmpty()); + } + + @Test + public void testTryLockBatchSomeLocked() throws IllegalAccessException { + Set mqs = new HashSet<>(); + MessageQueue mq1 = new MessageQueue(defaultTopic, defaultBroker, 0); + MessageQueue mq2 = new MessageQueue(defaultTopic, defaultBroker, 1); + mqs.add(mq1); + mqs.add(mq2); + when(lockEntry.isLocked(defaultClientId)).thenReturn(true).thenReturn(false); + FieldUtils.writeDeclaredField(rebalanceLockManager, "mqLockTable", createMQLockTable(), true); + Set actual = rebalanceLockManager.tryLockBatch(defaultGroup, mqs, defaultClientId); + Set expected = new HashSet<>(); + expected.add(mq2); + assertEquals(expected, actual); + } + + @Test + public void testUnlockBatch() throws IllegalAccessException { + when(lockEntry.getClientId()).thenReturn(defaultClientId); + ConcurrentMap> mqLockTable = createMQLockTable(); + FieldUtils.writeDeclaredField(rebalanceLockManager, "mqLockTable", mqLockTable, true); + rebalanceLockManager.unlockBatch(defaultGroup, createMessageQueue(1), defaultClientId); + assertEquals(1, mqLockTable.get(defaultGroup).values().size()); + } + + @Test + public void testUnlockBatchByOtherClient() throws IllegalAccessException { + when(lockEntry.getClientId()).thenReturn("otherClientId"); + ConcurrentMap> mqLockTable = createMQLockTable(); + FieldUtils.writeDeclaredField(rebalanceLockManager, "mqLockTable", mqLockTable, true); + rebalanceLockManager.unlockBatch(defaultGroup, createMessageQueue(1), defaultClientId); + assertEquals(2, mqLockTable.get(defaultGroup).values().size()); + } + + private MessageQueue createDefaultMessageQueue() { + return createMessageQueue(1).iterator().next(); + } + + private Set createMessageQueue(final int count) { + Set result = new HashSet<>(); + for (int i = 0; i < count; i++) { + result.add(new MessageQueue(defaultTopic, defaultBroker, i)); + } + return result; + } + + private ConcurrentMap> createMQLockTable() { + MessageQueue messageQueue1 = new MessageQueue(defaultTopic, defaultBroker, 0); + MessageQueue messageQueue2 = new MessageQueue(defaultTopic, defaultBroker, 1); + ConcurrentHashMap lockEntryMap = new ConcurrentHashMap<>(); + lockEntryMap.put(messageQueue1, lockEntry); + lockEntryMap.put(messageQueue2, lockEntry); + ConcurrentMap> result = new ConcurrentHashMap<>(); + result.put(defaultGroup, lockEntryMap); + return result; + } +} diff --git a/broker/src/test/java/org/apache/rocketmq/broker/longpolling/PopLongPollingServiceTest.java b/broker/src/test/java/org/apache/rocketmq/broker/longpolling/PopLongPollingServiceTest.java new file mode 100644 index 00000000000..6527beeb682 --- /dev/null +++ b/broker/src/test/java/org/apache/rocketmq/broker/longpolling/PopLongPollingServiceTest.java @@ -0,0 +1,220 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.rocketmq.broker.longpolling; + +import com.googlecode.concurrentlinkedhashmap.ConcurrentLinkedHashMap; +import io.netty.channel.Channel; +import io.netty.channel.ChannelHandlerContext; +import org.apache.commons.lang3.reflect.FieldUtils; +import org.apache.rocketmq.broker.BrokerController; +import org.apache.rocketmq.common.BrokerConfig; +import org.apache.rocketmq.common.KeyBuilder; +import org.apache.rocketmq.remoting.netty.NettyRequestProcessor; +import org.apache.rocketmq.remoting.protocol.RemotingCommand; +import org.apache.rocketmq.remoting.protocol.heartbeat.SubscriptionData; +import org.apache.rocketmq.store.MessageFilter; +import org.junit.Before; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.ArgumentCaptor; +import org.mockito.Mock; +import org.mockito.junit.MockitoJUnitRunner; + +import java.util.Map; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.ConcurrentSkipListSet; +import java.util.concurrent.ExecutorService; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.doNothing; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.spy; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + +@RunWith(MockitoJUnitRunner.class) +public class PopLongPollingServiceTest { + + @Mock + private BrokerController brokerController; + + @Mock + private NettyRequestProcessor processor; + + @Mock + private ChannelHandlerContext ctx; + + @Mock + private ExecutorService pullMessageExecutor; + + private PopLongPollingService popLongPollingService; + + private final String defaultTopic = "defaultTopic"; + + @Before + public void init() { + BrokerConfig brokerConfig = new BrokerConfig(); + brokerConfig.setPopPollingMapSize(100); + when(brokerController.getBrokerConfig()).thenReturn(brokerConfig); + popLongPollingService = spy(new PopLongPollingService(brokerController, processor, true)); + } + + @Test + public void testNotifyMessageArrivingWithRetryTopic() { + int queueId = 0; + doNothing().when(popLongPollingService).notifyMessageArrivingWithRetryTopic(defaultTopic, queueId, null, 0L, null, null); + popLongPollingService.notifyMessageArrivingWithRetryTopic(defaultTopic, queueId); + verify(popLongPollingService, times(1)).notifyMessageArrivingWithRetryTopic(defaultTopic, queueId, null, 0L, null, null); + } + + @Test + public void testNotifyMessageArriving() { + int queueId = 0; + Long tagsCode = 123L; + long msgStoreTime = System.currentTimeMillis(); + byte[] filterBitMap = new byte[]{0x01}; + Map properties = new ConcurrentHashMap<>(); + doNothing().when(popLongPollingService).notifyMessageArriving(defaultTopic, queueId, tagsCode, msgStoreTime, filterBitMap, properties); + popLongPollingService.notifyMessageArrivingWithRetryTopic(defaultTopic, queueId, tagsCode, msgStoreTime, filterBitMap, properties); + verify(popLongPollingService).notifyMessageArriving(defaultTopic, queueId, tagsCode, msgStoreTime, filterBitMap, properties); + } + + @Test + public void testNotifyMessageArrivingValidRequest() throws Exception { + String cid = "CID_1"; + int queueId = 0; + ConcurrentHashMap> topicCidMap = new ConcurrentHashMap<>(); + ConcurrentHashMap cids = new ConcurrentHashMap<>(); + cids.put(cid, (byte) 1); + topicCidMap.put(defaultTopic, cids); + popLongPollingService = new PopLongPollingService(brokerController, processor, true); + ConcurrentLinkedHashMap> pollingMap = + new ConcurrentLinkedHashMap.Builder>().maximumWeightedCapacity(this.brokerController.getBrokerConfig().getPopPollingMapSize()).build(); + Channel channel = mock(Channel.class); + when(channel.isActive()).thenReturn(true); + PopRequest popRequest = mock(PopRequest.class); + MessageFilter messageFilter = mock(MessageFilter.class); + SubscriptionData subscriptionData = mock(SubscriptionData.class); + when(popRequest.getMessageFilter()).thenReturn(messageFilter); + when(popRequest.getSubscriptionData()).thenReturn(subscriptionData); + when(popRequest.getChannel()).thenReturn(channel); + String pollingKey = KeyBuilder.buildPollingKey(defaultTopic, cid, queueId); + ConcurrentSkipListSet popRequests = mock(ConcurrentSkipListSet.class); + when(popRequests.pollLast()).thenReturn(popRequest); + pollingMap.put(pollingKey, popRequests); + FieldUtils.writeDeclaredField(popLongPollingService, "topicCidMap", topicCidMap, true); + FieldUtils.writeDeclaredField(popLongPollingService, "pollingMap", pollingMap, true); + boolean actual = popLongPollingService.notifyMessageArriving(defaultTopic, queueId, cid, null, 0, null, null); + assertFalse(actual); + } + + @Test + public void testWakeUpNullRequest() { + assertFalse(popLongPollingService.wakeUp(null)); + } + + @Test + public void testWakeUpIncompleteRequest() { + PopRequest request = mock(PopRequest.class); + when(request.complete()).thenReturn(false); + assertFalse(popLongPollingService.wakeUp(request)); + } + + @Test + public void testWakeUpInactiveChannel() { + PopRequest request = mock(PopRequest.class); + when(request.complete()).thenReturn(true); + when(request.getCtx()).thenReturn(ctx); + Channel channel = mock(Channel.class); + when(ctx.channel()).thenReturn(channel); + when(channel.isActive()).thenReturn(true); + when(brokerController.getPullMessageExecutor()).thenReturn(pullMessageExecutor); + assertTrue(popLongPollingService.wakeUp(request)); + } + + @Test + public void testWakeUpValidRequestWithException() throws Exception { + PopRequest request = mock(PopRequest.class); + when(request.complete()).thenReturn(true); + when(request.getCtx()).thenReturn(ctx); + Channel channel = mock(Channel.class); + when(ctx.channel()).thenReturn(channel); + when(request.getChannel()).thenReturn(channel); + when(channel.isActive()).thenReturn(true); + when(brokerController.getPullMessageExecutor()).thenReturn(pullMessageExecutor); + when(processor.processRequest(any(), any())).thenThrow(new RuntimeException("Test Exception")); + assertTrue(popLongPollingService.wakeUp(request)); + ArgumentCaptor captor = ArgumentCaptor.forClass(Runnable.class); + verify(pullMessageExecutor).submit(captor.capture()); + captor.getValue().run(); + verify(processor).processRequest(any(), any()); + } + + @Test + public void testPollingNotPolling() { + ChannelHandlerContext ctx = mock(ChannelHandlerContext.class); + RemotingCommand remotingCommand = mock(RemotingCommand.class); + PollingHeader requestHeader = mock(PollingHeader.class); + SubscriptionData subscriptionData = mock(SubscriptionData.class); + MessageFilter messageFilter = mock(MessageFilter.class); + when(requestHeader.getPollTime()).thenReturn(0L); + PollingResult result = popLongPollingService.polling(ctx, remotingCommand, requestHeader, subscriptionData, messageFilter); + assertEquals(PollingResult.NOT_POLLING, result); + } + + @Test + public void testPollingServicePollingTimeout() throws IllegalAccessException { + String cid = "CID_1"; + popLongPollingService = new PopLongPollingService(brokerController, processor, true); + popLongPollingService.shutdown(); + ChannelHandlerContext ctx = mock(ChannelHandlerContext.class); + RemotingCommand remotingCommand = mock(RemotingCommand.class); + PollingHeader requestHeader = mock(PollingHeader.class); + SubscriptionData subscriptionData = mock(SubscriptionData.class); + MessageFilter messageFilter = mock(MessageFilter.class); + when(requestHeader.getPollTime()).thenReturn(1000L); + when(requestHeader.getTopic()).thenReturn(defaultTopic); + when(requestHeader.getConsumerGroup()).thenReturn("defaultGroup"); + ConcurrentHashMap> topicCidMap = new ConcurrentHashMap<>(); + ConcurrentHashMap cids = new ConcurrentHashMap<>(); + cids.put(cid, (byte) 1); + topicCidMap.put(defaultTopic, cids); + FieldUtils.writeDeclaredField(popLongPollingService, "topicCidMap", topicCidMap, true); + PollingResult result = popLongPollingService.polling(ctx, remotingCommand, requestHeader, subscriptionData, messageFilter); + assertEquals(PollingResult.POLLING_TIMEOUT, result); + } + + @Test + public void testPollingPollingSuc() { + ChannelHandlerContext ctx = mock(ChannelHandlerContext.class); + RemotingCommand remotingCommand = mock(RemotingCommand.class); + PollingHeader requestHeader = mock(PollingHeader.class); + SubscriptionData subscriptionData = mock(SubscriptionData.class); + MessageFilter messageFilter = mock(MessageFilter.class); + when(requestHeader.getPollTime()).thenReturn(1000L); + when(requestHeader.getBornTime()).thenReturn(System.currentTimeMillis()); + when(requestHeader.getTopic()).thenReturn("topic"); + when(requestHeader.getConsumerGroup()).thenReturn("cid"); + when(requestHeader.getQueueId()).thenReturn(0); + PollingResult result = popLongPollingService.polling(ctx, remotingCommand, requestHeader, subscriptionData, messageFilter); + assertEquals(PollingResult.POLLING_SUC, result); + } +} diff --git a/broker/src/test/java/org/apache/rocketmq/broker/offset/RocksdbTransferOffsetAndCqTest.java b/broker/src/test/java/org/apache/rocketmq/broker/offset/RocksdbTransferOffsetAndCqTest.java new file mode 100644 index 00000000000..b4800aec24e --- /dev/null +++ b/broker/src/test/java/org/apache/rocketmq/broker/offset/RocksdbTransferOffsetAndCqTest.java @@ -0,0 +1,154 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.rocketmq.broker.offset; + +import java.io.IOException; +import java.nio.file.Paths; +import java.util.HashMap; +import java.util.UUID; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.ConcurrentMap; +import org.apache.commons.collections.MapUtils; +import org.apache.rocketmq.broker.BrokerController; +import org.apache.rocketmq.common.BrokerConfig; +import org.apache.rocketmq.common.MixAll; +import org.apache.rocketmq.common.Pair; +import org.apache.rocketmq.common.TopicConfig; +import org.apache.rocketmq.store.DefaultMessageStore; +import org.apache.rocketmq.store.DispatchRequest; +import org.apache.rocketmq.store.RocksDBMessageStore; +import org.apache.rocketmq.store.config.MessageStoreConfig; +import org.apache.rocketmq.store.queue.ConsumeQueueInterface; +import org.apache.rocketmq.store.queue.ConsumeQueueStoreInterface; +import org.apache.rocketmq.store.queue.CqUnit; +import org.apache.rocketmq.store.stats.BrokerStatsManager; +import org.junit.Assert; +import org.junit.Before; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.Mock; +import org.mockito.Mockito; +import org.mockito.junit.MockitoJUnitRunner; +import org.rocksdb.RocksDBException; + +@RunWith(MockitoJUnitRunner.class) +public class RocksdbTransferOffsetAndCqTest { + + private final String basePath = Paths.get(System.getProperty("user.home"), + "unit-test-store", UUID.randomUUID().toString().substring(0, 16).toUpperCase()).toString(); + + private final String topic = "topic"; + private final String group = "group"; + private final String clientHost = "clientHost"; + private final int queueId = 1; + + private RocksDBConsumerOffsetManager rocksdbConsumerOffsetManager; + + private ConsumerOffsetManager consumerOffsetManager; + + private DefaultMessageStore defaultMessageStore; + + @Mock + private BrokerController brokerController; + + @Before + public void init() throws IOException { + if (notToBeExecuted()) { + return; + } + BrokerConfig brokerConfig = new BrokerConfig(); + brokerConfig.setConsumerOffsetUpdateVersionStep(10); + MessageStoreConfig messageStoreConfig = new MessageStoreConfig(); + messageStoreConfig.setStorePathRootDir(basePath); + messageStoreConfig.setTransferOffsetJsonToRocksdb(true); + messageStoreConfig.setRocksdbCQDoubleWriteEnable(true); + Mockito.lenient().when(brokerController.getBrokerConfig()).thenReturn(brokerConfig); + Mockito.lenient().when(brokerController.getMessageStoreConfig()).thenReturn(messageStoreConfig); + + defaultMessageStore = new DefaultMessageStore(messageStoreConfig, new BrokerStatsManager("aaa", true), null, + brokerConfig, new ConcurrentHashMap()); + defaultMessageStore.enableRocksdbCQWrite(); + defaultMessageStore.loadCheckPoint(); + + consumerOffsetManager = new ConsumerOffsetManager(brokerController); + consumerOffsetManager.load(); + + rocksdbConsumerOffsetManager = new RocksDBConsumerOffsetManager(brokerController); + } + + @Test + public void testTransferOffset() { + if (notToBeExecuted()) { + return; + } + + for (int i = 0; i < 200; i++) { + consumerOffsetManager.commitOffset(clientHost, group, topic, queueId, i); + } + + ConcurrentMap> offsetTable = consumerOffsetManager.getOffsetTable(); + ConcurrentMap map = offsetTable.get(topic + "@" + group); + Assert.assertTrue(MapUtils.isNotEmpty(map)); + + Long offset = map.get(queueId); + Assert.assertEquals(199L, (long) offset); + + long offsetDataVersion = consumerOffsetManager.getDataVersion().getCounter().get(); + Assert.assertEquals(20L, offsetDataVersion); + + consumerOffsetManager.persist(); + + boolean loadResult = rocksdbConsumerOffsetManager.load(); + Assert.assertTrue(loadResult); + + ConcurrentMap> rocksdbOffsetTable = rocksdbConsumerOffsetManager.getOffsetTable(); + + ConcurrentMap rocksdbMap = rocksdbOffsetTable.get(topic + "@" + group); + Assert.assertTrue(MapUtils.isNotEmpty(rocksdbMap)); + + Long aLong1 = rocksdbMap.get(queueId); + Assert.assertEquals(199L, (long) aLong1); + + long rocksdbOffset = rocksdbConsumerOffsetManager.getDataVersion().getCounter().get(); + Assert.assertEquals(21L, rocksdbOffset); + } + + @Test + public void testRocksdbCqWrite() throws RocksDBException { + if (notToBeExecuted()) { + return; + } + RocksDBMessageStore kvStore = defaultMessageStore.getRocksDBMessageStore(); + ConsumeQueueStoreInterface store = kvStore.getConsumeQueueStore(); + ConsumeQueueInterface rocksdbCq = defaultMessageStore.getRocksDBMessageStore().findConsumeQueue(topic, queueId); + ConsumeQueueInterface fileCq = defaultMessageStore.findConsumeQueue(topic, queueId); + for (int i = 0; i < 200; i++) { + DispatchRequest request = new DispatchRequest(topic, queueId, i, 200, 0, System.currentTimeMillis(), i, "", "", 0, 0, new HashMap<>()); + fileCq.putMessagePositionInfoWrapper(request); + store.putMessagePositionInfoWrapper(request); + } + Pair unit = rocksdbCq.getCqUnitAndStoreTime(100); + Pair unit1 = fileCq.getCqUnitAndStoreTime(100); + Assert.assertTrue(unit.getObject1().getPos() == unit1.getObject1().getPos()); + } + + private boolean notToBeExecuted() { + return MixAll.isMac(); + } + +} diff --git a/broker/src/test/java/org/apache/rocketmq/broker/processor/ChangeInvisibleTimeProcessorTest.java b/broker/src/test/java/org/apache/rocketmq/broker/processor/ChangeInvisibleTimeProcessorTest.java index ee11f046d01..a7aae7ee3dc 100644 --- a/broker/src/test/java/org/apache/rocketmq/broker/processor/ChangeInvisibleTimeProcessorTest.java +++ b/broker/src/test/java/org/apache/rocketmq/broker/processor/ChangeInvisibleTimeProcessorTest.java @@ -19,6 +19,7 @@ import io.netty.channel.Channel; import io.netty.channel.ChannelHandlerContext; import java.lang.reflect.Field; +import java.util.concurrent.CompletableFuture; import org.apache.rocketmq.broker.BrokerController; import org.apache.rocketmq.broker.client.ClientChannelInfo; import org.apache.rocketmq.broker.client.net.Broker2Client; @@ -108,7 +109,7 @@ public void init() throws IllegalAccessException, NoSuchFieldException { @Test public void testProcessRequest_Success() throws RemotingCommandException, InterruptedException, RemotingTimeoutException, RemotingSendRequestException { - when(escapeBridge.putMessageToSpecificQueue(any(MessageExtBrokerInner.class))).thenReturn(new PutMessageResult(PutMessageStatus.PUT_OK, new AppendMessageResult(AppendMessageStatus.PUT_OK))); + when(escapeBridge.asyncPutMessageToSpecificQueue(any(MessageExtBrokerInner.class))).thenReturn(CompletableFuture.completedFuture(new PutMessageResult(PutMessageStatus.PUT_OK, new AppendMessageResult(AppendMessageStatus.PUT_OK)))); int queueId = 0; long queueOffset = 0; long popTime = System.currentTimeMillis() - 1_000; diff --git a/client/pom.xml b/client/pom.xml index 5a6c92f97dd..e13d106a17d 100644 --- a/client/pom.xml +++ b/client/pom.xml @@ -19,7 +19,7 @@ org.apache.rocketmq rocketmq-all - 5.3.1-SNAPSHOT + 5.3.2-SNAPSHOT 4.0.0 diff --git a/client/src/main/java/org/apache/rocketmq/client/consumer/PopStatus.java b/client/src/main/java/org/apache/rocketmq/client/consumer/PopStatus.java index 17dda9a2001..57fbe67bcca 100644 --- a/client/src/main/java/org/apache/rocketmq/client/consumer/PopStatus.java +++ b/client/src/main/java/org/apache/rocketmq/client/consumer/PopStatus.java @@ -23,7 +23,7 @@ public enum PopStatus { FOUND, /** * No new message can be pull after polling time out - * delete after next realease + * delete after next release */ NO_NEW_MSG, /** diff --git a/client/src/main/java/org/apache/rocketmq/client/impl/MQClientAPIImpl.java b/client/src/main/java/org/apache/rocketmq/client/impl/MQClientAPIImpl.java index 8a3d3dd0dcb..0e5571eb130 100644 --- a/client/src/main/java/org/apache/rocketmq/client/impl/MQClientAPIImpl.java +++ b/client/src/main/java/org/apache/rocketmq/client/impl/MQClientAPIImpl.java @@ -78,6 +78,7 @@ import org.apache.rocketmq.common.namesrv.TopAddressing; import org.apache.rocketmq.common.sysflag.PullSysFlag; import org.apache.rocketmq.common.topic.TopicValidator; +import org.apache.rocketmq.common.utils.StartAndShutdown; import org.apache.rocketmq.logging.org.slf4j.Logger; import org.apache.rocketmq.logging.org.slf4j.LoggerFactory; import org.apache.rocketmq.remoting.ChannelEventListener; @@ -112,6 +113,7 @@ import org.apache.rocketmq.remoting.protocol.body.BrokerReplicasInfo; import org.apache.rocketmq.remoting.protocol.body.BrokerStatsData; import org.apache.rocketmq.remoting.protocol.body.CheckClientRequestBody; +import org.apache.rocketmq.remoting.protocol.body.CheckRocksdbCqWriteProgressResponseBody; import org.apache.rocketmq.remoting.protocol.body.ClusterAclVersionInfo; import org.apache.rocketmq.remoting.protocol.body.ClusterInfo; import org.apache.rocketmq.remoting.protocol.body.ConsumeMessageDirectlyResult; @@ -147,6 +149,7 @@ import org.apache.rocketmq.remoting.protocol.header.AddBrokerRequestHeader; import org.apache.rocketmq.remoting.protocol.header.ChangeInvisibleTimeRequestHeader; import org.apache.rocketmq.remoting.protocol.header.ChangeInvisibleTimeResponseHeader; +import org.apache.rocketmq.remoting.protocol.header.CheckRocksdbCqWriteProgressRequestHeader; import org.apache.rocketmq.remoting.protocol.header.CloneGroupOffsetRequestHeader; import org.apache.rocketmq.remoting.protocol.header.ConsumeMessageDirectlyResultRequestHeader; import org.apache.rocketmq.remoting.protocol.header.ConsumerSendMsgBackRequestHeader; @@ -184,9 +187,9 @@ import org.apache.rocketmq.remoting.protocol.header.GetTopicStatsInfoRequestHeader; import org.apache.rocketmq.remoting.protocol.header.GetTopicsByClusterRequestHeader; import org.apache.rocketmq.remoting.protocol.header.GetUserRequestHeader; +import org.apache.rocketmq.remoting.protocol.header.HeartbeatRequestHeader; import org.apache.rocketmq.remoting.protocol.header.ListAclsRequestHeader; import org.apache.rocketmq.remoting.protocol.header.ListUsersRequestHeader; -import org.apache.rocketmq.remoting.protocol.header.HeartbeatRequestHeader; import org.apache.rocketmq.remoting.protocol.header.LockBatchMqRequestHeader; import org.apache.rocketmq.remoting.protocol.header.PopMessageRequestHeader; import org.apache.rocketmq.remoting.protocol.header.PopMessageResponseHeader; @@ -247,7 +250,7 @@ import static org.apache.rocketmq.remoting.protocol.RemotingSysResponseCode.SUCCESS; -public class MQClientAPIImpl implements NameServerUpdateCallback { +public class MQClientAPIImpl implements NameServerUpdateCallback, StartAndShutdown { private final static Logger log = LoggerFactory.getLogger(MQClientAPIImpl.class); private static boolean sendSmartMsg = Boolean.parseBoolean(System.getProperty("org.apache.rocketmq.client.sendSmartMsg", "true")); @@ -1616,10 +1619,10 @@ public void queryMessage( final QueryMessageRequestHeader requestHeader, final long timeoutMillis, final InvokeCallback invokeCallback, - final Boolean isUnqiueKey + final Boolean isUniqueKey ) throws RemotingException, MQBrokerException, InterruptedException { RemotingCommand request = RemotingCommand.createRequestCommand(RequestCode.QUERY_MESSAGE, requestHeader); - request.addExtField(MixAll.UNIQUE_MSG_QUERY_FLAG, isUnqiueKey.toString()); + request.addExtField(MixAll.UNIQUE_MSG_QUERY_FLAG, isUniqueKey.toString()); this.remotingClient.invokeAsync(MixAll.brokerVIPChannel(this.clientConfig.isVipChannelEnabled(), addr), request, timeoutMillis, invokeCallback); } @@ -3016,6 +3019,19 @@ public QueryConsumeQueueResponseBody queryConsumeQueue(final String brokerAddr, throw new MQClientException(response.getCode(), response.getRemark()); } + public CheckRocksdbCqWriteProgressResponseBody checkRocksdbCqWriteProgress(final String brokerAddr, final String topic, final long timeoutMillis) throws InterruptedException, + RemotingTimeoutException, RemotingSendRequestException, RemotingConnectException, MQClientException { + CheckRocksdbCqWriteProgressRequestHeader header = new CheckRocksdbCqWriteProgressRequestHeader(); + header.setTopic(topic); + RemotingCommand request = RemotingCommand.createRequestCommand(RequestCode.CHECK_ROCKSDB_CQ_WRITE_PROGRESS, header); + RemotingCommand response = this.remotingClient.invokeSync(brokerAddr, request, timeoutMillis); + assert response != null; + if (ResponseCode.SUCCESS == response.getCode()) { + return CheckRocksdbCqWriteProgressResponseBody.decode(response.getBody(), CheckRocksdbCqWriteProgressResponseBody.class); + } + throw new MQClientException(response.getCode(), response.getRemark()); + } + public void checkClientInBroker(final String brokerAddr, final String consumerGroup, final String clientId, final SubscriptionData subscriptionData, final long timeoutMillis) diff --git a/client/src/main/java/org/apache/rocketmq/client/impl/consumer/DefaultLitePullConsumerImpl.java b/client/src/main/java/org/apache/rocketmq/client/impl/consumer/DefaultLitePullConsumerImpl.java index a3276cd7823..3f90b67ec99 100644 --- a/client/src/main/java/org/apache/rocketmq/client/impl/consumer/DefaultLitePullConsumerImpl.java +++ b/client/src/main/java/org/apache/rocketmq/client/impl/consumer/DefaultLitePullConsumerImpl.java @@ -164,10 +164,6 @@ private enum SubscriptionType { public DefaultLitePullConsumerImpl(final DefaultLitePullConsumer defaultLitePullConsumer, final RPCHook rpcHook) { this.defaultLitePullConsumer = defaultLitePullConsumer; this.rpcHook = rpcHook; - this.scheduledThreadPoolExecutor = new ScheduledThreadPoolExecutor( - this.defaultLitePullConsumer.getPullThreadNums(), - new ThreadFactoryImpl("PullMsgThread-" + this.defaultLitePullConsumer.getConsumerGroup()) - ); this.scheduledExecutorService = Executors.newSingleThreadScheduledExecutor(new ThreadFactoryImpl("MonitorMessageQueueChangeThread")); this.pullTimeDelayMillsWhenException = defaultLitePullConsumer.getPullTimeDelayMillsWhenException(); } @@ -293,6 +289,8 @@ public synchronized void start() throws MQClientException { this.defaultLitePullConsumer.changeInstanceNameToPID(); } + initScheduledThreadPoolExecutor(); + initMQClientFactory(); initRebalanceImpl(); @@ -324,6 +322,13 @@ public synchronized void start() throws MQClientException { } } + private void initScheduledThreadPoolExecutor() { + this.scheduledThreadPoolExecutor = new ScheduledThreadPoolExecutor( + this.defaultLitePullConsumer.getPullThreadNums(), + new ThreadFactoryImpl("PullMsgThread-" + this.defaultLitePullConsumer.getConsumerGroup()) + ); + } + private void initMQClientFactory() throws MQClientException { this.mQClientFactory = MQClientManager.getInstance().getOrCreateMQClientInstance(this.defaultLitePullConsumer, this.rpcHook); boolean registerOK = mQClientFactory.registerConsumer(this.defaultLitePullConsumer.getConsumerGroup(), this); diff --git a/client/src/main/java/org/apache/rocketmq/client/impl/mqclient/MQClientAPIFactory.java b/client/src/main/java/org/apache/rocketmq/client/impl/mqclient/MQClientAPIFactory.java index c68859b2889..0fa31b66406 100644 --- a/client/src/main/java/org/apache/rocketmq/client/impl/mqclient/MQClientAPIFactory.java +++ b/client/src/main/java/org/apache/rocketmq/client/impl/mqclient/MQClientAPIFactory.java @@ -26,6 +26,7 @@ import org.apache.rocketmq.client.common.NameserverAccessConfig; import org.apache.rocketmq.client.impl.ClientRemotingProcessor; import org.apache.rocketmq.common.MixAll; +import org.apache.rocketmq.common.utils.AsyncShutdownHelper; import org.apache.rocketmq.common.utils.StartAndShutdown; import org.apache.rocketmq.remoting.RPCHook; import org.apache.rocketmq.remoting.netty.NettyClientConfig; @@ -85,9 +86,11 @@ public void start() throws Exception { @Override public void shutdown() throws Exception { + AsyncShutdownHelper helper = new AsyncShutdownHelper(); for (int i = 0; i < this.clientNum; i++) { - clients[i].shutdown(); + helper.addTarget(clients[i]); } + helper.shutdown().await(Integer.MAX_VALUE, TimeUnit.SECONDS); } protected MQClientAPIExt createAndStart(String instanceName) { diff --git a/client/src/main/java/org/apache/rocketmq/client/impl/producer/DefaultMQProducerImpl.java b/client/src/main/java/org/apache/rocketmq/client/impl/producer/DefaultMQProducerImpl.java index 0e70ee25951..3d4fdbec373 100644 --- a/client/src/main/java/org/apache/rocketmq/client/impl/producer/DefaultMQProducerImpl.java +++ b/client/src/main/java/org/apache/rocketmq/client/impl/producer/DefaultMQProducerImpl.java @@ -194,6 +194,14 @@ public void setSemaphoreAsyncSendSize(int size) { semaphoreAsyncSendSize = new Semaphore(size, true); } + public int getSemaphoreAsyncSendNumAvailablePermits() { + return semaphoreAsyncSendNum == null ? 0 : semaphoreAsyncSendNum.availablePermits(); + } + + public int getSemaphoreAsyncSendSizeAvailablePermits() { + return semaphoreAsyncSendSize == null ? 0 : semaphoreAsyncSendSize.availablePermits(); + } + public void initTransactionEnv() { TransactionMQProducer producer = (TransactionMQProducer) this.defaultMQProducer; if (producer.getExecutorService() != null) { @@ -242,6 +250,8 @@ public void start(final boolean startFactory) throws MQClientException { this.mQClientFactory = MQClientManager.getInstance().getOrCreateMQClientInstance(this.defaultMQProducer, rpcHook); + defaultMQProducer.initProduceAccumulator(); + boolean registerOK = mQClientFactory.registerProducer(this.defaultMQProducer.getProducerGroup(), this); if (!registerOK) { this.serviceState = ServiceState.CREATE_JUST; @@ -563,7 +573,7 @@ public void run() { class BackpressureSendCallBack implements SendCallback { public boolean isSemaphoreAsyncSizeAcquired = false; - public boolean isSemaphoreAsyncNumbAcquired = false; + public boolean isSemaphoreAsyncNumAcquired = false; public int msgLen; private final SendCallback sendCallback; @@ -573,24 +583,49 @@ public BackpressureSendCallBack(final SendCallback sendCallback) { @Override public void onSuccess(SendResult sendResult) { - if (isSemaphoreAsyncSizeAcquired) { - semaphoreAsyncSendSize.release(msgLen); - } - if (isSemaphoreAsyncNumbAcquired) { - semaphoreAsyncSendNum.release(); - } + semaphoreProcessor(); sendCallback.onSuccess(sendResult); } @Override public void onException(Throwable e) { + semaphoreProcessor(); + sendCallback.onException(e); + } + + public void semaphoreProcessor() { if (isSemaphoreAsyncSizeAcquired) { + defaultMQProducer.acquireBackPressureForAsyncSendSizeLock(); semaphoreAsyncSendSize.release(msgLen); + defaultMQProducer.releaseBackPressureForAsyncSendSizeLock(); } - if (isSemaphoreAsyncNumbAcquired) { + if (isSemaphoreAsyncNumAcquired) { + defaultMQProducer.acquireBackPressureForAsyncSendNumLock(); semaphoreAsyncSendNum.release(); + defaultMQProducer.releaseBackPressureForAsyncSendNumLock(); } - sendCallback.onException(e); + } + + public void semaphoreAsyncAdjust(int semaphoreAsyncNum, int semaphoreAsyncSize) throws InterruptedException { + defaultMQProducer.acquireBackPressureForAsyncSendNumLock(); + if (semaphoreAsyncNum > 0) { + semaphoreAsyncSendNum.release(semaphoreAsyncNum); + } else { + semaphoreAsyncSendNum.acquire(- semaphoreAsyncNum); + } + defaultMQProducer.setBackPressureForAsyncSendNumInsideAdjust(defaultMQProducer.getBackPressureForAsyncSendNum() + + semaphoreAsyncNum); + defaultMQProducer.releaseBackPressureForAsyncSendNumLock(); + + defaultMQProducer.acquireBackPressureForAsyncSendSizeLock(); + if (semaphoreAsyncSize > 0) { + semaphoreAsyncSendSize.release(semaphoreAsyncSize); + } else { + semaphoreAsyncSendSize.acquire(- semaphoreAsyncSize); + } + defaultMQProducer.setBackPressureForAsyncSendSizeInsideAdjust(defaultMQProducer.getBackPressureForAsyncSendSize() + + semaphoreAsyncSize); + defaultMQProducer.releaseBackPressureForAsyncSendSizeLock(); } } @@ -599,32 +634,40 @@ public void executeAsyncMessageSend(Runnable runnable, final Message msg, final throws MQClientException, InterruptedException { ExecutorService executor = this.getAsyncSenderExecutor(); boolean isEnableBackpressureForAsyncMode = this.getDefaultMQProducer().isEnableBackpressureForAsyncMode(); - boolean isSemaphoreAsyncNumbAcquired = false; + boolean isSemaphoreAsyncNumAcquired = false; boolean isSemaphoreAsyncSizeAcquired = false; int msgLen = msg.getBody() == null ? 1 : msg.getBody().length; + sendCallback.msgLen = msgLen; try { if (isEnableBackpressureForAsyncMode) { + defaultMQProducer.acquireBackPressureForAsyncSendNumLock(); long costTime = System.currentTimeMillis() - beginStartTime; - isSemaphoreAsyncNumbAcquired = timeout - costTime > 0 + + isSemaphoreAsyncNumAcquired = timeout - costTime > 0 && semaphoreAsyncSendNum.tryAcquire(timeout - costTime, TimeUnit.MILLISECONDS); - if (!isSemaphoreAsyncNumbAcquired) { + sendCallback.isSemaphoreAsyncNumAcquired = isSemaphoreAsyncNumAcquired; + defaultMQProducer.releaseBackPressureForAsyncSendNumLock(); + if (!isSemaphoreAsyncNumAcquired) { sendCallback.onException( new RemotingTooMuchRequestException("send message tryAcquire semaphoreAsyncNum timeout")); return; } + + defaultMQProducer.acquireBackPressureForAsyncSendSizeLock(); costTime = System.currentTimeMillis() - beginStartTime; + isSemaphoreAsyncSizeAcquired = timeout - costTime > 0 && semaphoreAsyncSendSize.tryAcquire(msgLen, timeout - costTime, TimeUnit.MILLISECONDS); + sendCallback.isSemaphoreAsyncSizeAcquired = isSemaphoreAsyncSizeAcquired; + defaultMQProducer.releaseBackPressureForAsyncSendSizeLock(); if (!isSemaphoreAsyncSizeAcquired) { sendCallback.onException( new RemotingTooMuchRequestException("send message tryAcquire semaphoreAsyncSize timeout")); return; } } - sendCallback.isSemaphoreAsyncSizeAcquired = isSemaphoreAsyncSizeAcquired; - sendCallback.isSemaphoreAsyncNumbAcquired = isSemaphoreAsyncNumbAcquired; - sendCallback.msgLen = msgLen; + executor.submit(runnable); } catch (RejectedExecutionException e) { if (isEnableBackpressureForAsyncMode) { diff --git a/client/src/main/java/org/apache/rocketmq/client/lock/ReadWriteCASLock.java b/client/src/main/java/org/apache/rocketmq/client/lock/ReadWriteCASLock.java new file mode 100644 index 00000000000..3d157313715 --- /dev/null +++ b/client/src/main/java/org/apache/rocketmq/client/lock/ReadWriteCASLock.java @@ -0,0 +1,63 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.rocketmq.client.lock; + +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicInteger; + +public class ReadWriteCASLock { + //true : can lock ; false : not lock + private final AtomicBoolean writeLock = new AtomicBoolean(true); + + private final AtomicInteger readLock = new AtomicInteger(0); + + public void acquireWriteLock() { + boolean isLock = false; + do { + isLock = writeLock.compareAndSet(true, false); + } while (!isLock); + + do { + isLock = readLock.get() == 0; + } while (!isLock); + } + + public void releaseWriteLock() { + this.writeLock.compareAndSet(false, true); + } + + public void acquireReadLock() { + boolean isLock = false; + do { + isLock = writeLock.get(); + } while (!isLock); + readLock.getAndIncrement(); + } + + public void releaseReadLock() { + this.readLock.getAndDecrement(); + } + + public boolean getWriteLock() { + return this.writeLock.get() && this.readLock.get() == 0; + } + + public boolean getReadLock() { + return this.writeLock.get(); + } + +} diff --git a/client/src/main/java/org/apache/rocketmq/client/producer/DefaultMQProducer.java b/client/src/main/java/org/apache/rocketmq/client/producer/DefaultMQProducer.java index b47c01f6764..a8bf7cee85f 100644 --- a/client/src/main/java/org/apache/rocketmq/client/producer/DefaultMQProducer.java +++ b/client/src/main/java/org/apache/rocketmq/client/producer/DefaultMQProducer.java @@ -24,6 +24,7 @@ import org.apache.rocketmq.client.exception.RequestTimeoutException; import org.apache.rocketmq.client.impl.MQClientManager; import org.apache.rocketmq.client.impl.producer.DefaultMQProducerImpl; +import org.apache.rocketmq.client.lock.ReadWriteCASLock; import org.apache.rocketmq.client.trace.AsyncTraceDispatcher; import org.apache.rocketmq.client.trace.TraceDispatcher; import org.apache.rocketmq.client.trace.hook.EndTransactionTraceHookImpl; @@ -173,8 +174,33 @@ public class DefaultMQProducer extends ClientConfig implements MQProducer { */ private int backPressureForAsyncSendSize = 100 * 1024 * 1024; + /** + * Maximum hold time of accumulator. + */ + private int batchMaxDelayMs = -1; + + /** + * Maximum accumulation message body size for a single messageAccumulation. + */ + private long batchMaxBytes = -1; + + /** + * Maximum message body size for produceAccumulator. + */ + private long totalBatchMaxBytes = -1; + private RPCHook rpcHook = null; + /** + * backPressureForAsyncSendNum is guaranteed to be modified at runtime and no new requests are allowed + */ + private final ReadWriteCASLock backPressureForAsyncSendNumLock = new ReadWriteCASLock(); + + /** + * backPressureForAsyncSendSize is guaranteed to be modified at runtime and no new requests are allowed + */ + private final ReadWriteCASLock backPressureForAsyncSendSizeLock = new ReadWriteCASLock(); + /** * Compress level of compress algorithm. */ @@ -282,7 +308,6 @@ public DefaultMQProducer(final String producerGroup, RPCHook rpcHook, final List this.enableTrace = enableMsgTrace; this.traceTopic = customizedTraceTopic; defaultMQProducerImpl = new DefaultMQProducerImpl(this, rpcHook); - produceAccumulator = MQClientManager.getInstance().getOrCreateProduceAccumulator(this); } /** @@ -309,7 +334,6 @@ public DefaultMQProducer(final String namespace, final String producerGroup, RPC this.producerGroup = producerGroup; this.rpcHook = rpcHook; defaultMQProducerImpl = new DefaultMQProducerImpl(this, rpcHook); - produceAccumulator = MQClientManager.getInstance().getOrCreateProduceAccumulator(this); } /** @@ -1157,10 +1181,10 @@ public int getBatchMaxDelayMs() { } public void batchMaxDelayMs(int holdMs) { - if (this.produceAccumulator == null) { - throw new UnsupportedOperationException("The currently constructed producer does not support autoBatch"); + this.batchMaxDelayMs = holdMs; + if (this.produceAccumulator != null) { + this.produceAccumulator.batchMaxDelayMs(holdMs); } - this.produceAccumulator.batchMaxDelayMs(holdMs); } public long getBatchMaxBytes() { @@ -1171,10 +1195,10 @@ public long getBatchMaxBytes() { } public void batchMaxBytes(long holdSize) { - if (this.produceAccumulator == null) { - throw new UnsupportedOperationException("The currently constructed producer does not support autoBatch"); + this.batchMaxBytes = holdSize; + if (this.produceAccumulator != null) { + this.produceAccumulator.batchMaxBytes(holdSize); } - this.produceAccumulator.batchMaxBytes(holdSize); } public long getTotalBatchMaxBytes() { @@ -1185,10 +1209,10 @@ public long getTotalBatchMaxBytes() { } public void totalBatchMaxBytes(long totalHoldSize) { - if (this.produceAccumulator == null) { - throw new UnsupportedOperationException("The currently constructed producer does not support autoBatch"); + this.totalBatchMaxBytes = totalHoldSize; + if (this.produceAccumulator != null) { + this.produceAccumulator.totalBatchMaxBytes(totalHoldSize); } - this.produceAccumulator.totalBatchMaxBytes(totalHoldSize); } public boolean getAutoBatch() { @@ -1199,9 +1223,6 @@ public boolean getAutoBatch() { } public void setAutoBatch(boolean autoBatch) { - if (this.produceAccumulator == null) { - throw new UnsupportedOperationException("The currently constructed producer does not support autoBatch"); - } this.autoBatch = autoBatch; } @@ -1334,18 +1355,64 @@ public int getBackPressureForAsyncSendNum() { return backPressureForAsyncSendNum; } + /** + * For user modify backPressureForAsyncSendNum at runtime + */ public void setBackPressureForAsyncSendNum(int backPressureForAsyncSendNum) { + this.backPressureForAsyncSendNumLock.acquireWriteLock(); + backPressureForAsyncSendNum = Math.max(backPressureForAsyncSendNum, 10); + int acquiredBackPressureForAsyncSendNum = this.backPressureForAsyncSendNum + - defaultMQProducerImpl.getSemaphoreAsyncSendNumAvailablePermits(); this.backPressureForAsyncSendNum = backPressureForAsyncSendNum; - defaultMQProducerImpl.setSemaphoreAsyncSendNum(backPressureForAsyncSendNum); + defaultMQProducerImpl.setSemaphoreAsyncSendNum(backPressureForAsyncSendNum - acquiredBackPressureForAsyncSendNum); + this.backPressureForAsyncSendNumLock.releaseWriteLock(); } public int getBackPressureForAsyncSendSize() { return backPressureForAsyncSendSize; } + /** + * For user modify backPressureForAsyncSendSize at runtime + */ public void setBackPressureForAsyncSendSize(int backPressureForAsyncSendSize) { + this.backPressureForAsyncSendSizeLock.acquireWriteLock(); + backPressureForAsyncSendSize = Math.max(backPressureForAsyncSendSize, 1024 * 1024); + int acquiredBackPressureForAsyncSendSize = this.backPressureForAsyncSendSize + - defaultMQProducerImpl.getSemaphoreAsyncSendSizeAvailablePermits(); this.backPressureForAsyncSendSize = backPressureForAsyncSendSize; - defaultMQProducerImpl.setSemaphoreAsyncSendSize(backPressureForAsyncSendSize); + defaultMQProducerImpl.setSemaphoreAsyncSendSize(backPressureForAsyncSendSize - acquiredBackPressureForAsyncSendSize); + this.backPressureForAsyncSendSizeLock.releaseWriteLock(); + } + + /** + * Used for system internal adjust backPressureForAsyncSendSize + */ + public void setBackPressureForAsyncSendSizeInsideAdjust(int backPressureForAsyncSendSize) { + this.backPressureForAsyncSendSize = backPressureForAsyncSendSize; + } + + /** + * Used for system internal adjust backPressureForAsyncSendNum + */ + public void setBackPressureForAsyncSendNumInsideAdjust(int backPressureForAsyncSendNum) { + this.backPressureForAsyncSendNum = backPressureForAsyncSendNum; + } + + public void acquireBackPressureForAsyncSendSizeLock() { + this.backPressureForAsyncSendSizeLock.acquireReadLock(); + } + + public void releaseBackPressureForAsyncSendSizeLock() { + this.backPressureForAsyncSendSizeLock.releaseReadLock(); + } + + public void acquireBackPressureForAsyncSendNumLock() { + this.backPressureForAsyncSendNumLock.acquireReadLock(); + } + + public void releaseBackPressureForAsyncSendNumLock() { + this.backPressureForAsyncSendNumLock.releaseReadLock(); } public List getTopics() { @@ -1382,4 +1449,21 @@ public void setCompressType(CompressionType compressType) { public Compressor getCompressor() { return compressor; } + + public void initProduceAccumulator() { + this.produceAccumulator = MQClientManager.getInstance().getOrCreateProduceAccumulator(this); + + if (this.batchMaxDelayMs > -1) { + this.produceAccumulator.batchMaxDelayMs(this.batchMaxDelayMs); + } + + if (this.batchMaxBytes > -1) { + this.produceAccumulator.batchMaxBytes(this.batchMaxBytes); + } + + if (this.totalBatchMaxBytes > -1) { + this.produceAccumulator.totalBatchMaxBytes(this.totalBatchMaxBytes); + } + + } } diff --git a/client/src/main/java/org/apache/rocketmq/client/trace/AsyncTraceDispatcher.java b/client/src/main/java/org/apache/rocketmq/client/trace/AsyncTraceDispatcher.java index 6d62617eb8e..e321e1583d2 100644 --- a/client/src/main/java/org/apache/rocketmq/client/trace/AsyncTraceDispatcher.java +++ b/client/src/main/java/org/apache/rocketmq/client/trace/AsyncTraceDispatcher.java @@ -302,14 +302,24 @@ public void run() { public void sendTraceData(List contextList) { Map> transBeanMap = new HashMap<>(16); - String currentRegionId; + String traceTopic; for (TraceContext context : contextList) { - currentRegionId = context.getRegionId(); + AccessChannel accessChannel = context.getAccessChannel(); + if (accessChannel == null) { + accessChannel = AsyncTraceDispatcher.this.accessChannel; + } + String currentRegionId = context.getRegionId(); if (currentRegionId == null || context.getTraceBeans().isEmpty()) { continue; } + if (AccessChannel.CLOUD == accessChannel) { + traceTopic = TraceConstants.TRACE_TOPIC_PREFIX + currentRegionId; + } else { + traceTopic = traceTopicName; + } + String topic = context.getTraceBeans().get(0).getTopic(); - String key = topic + TraceConstants.CONTENT_SPLITOR + currentRegionId; + String key = topic + TraceConstants.CONTENT_SPLITOR + traceTopic; List transBeanList = transBeanMap.computeIfAbsent(key, k -> new ArrayList<>()); TraceTransferBean traceData = TraceDataEncoder.encoderFromContextBean(context); transBeanList.add(traceData); @@ -320,7 +330,7 @@ public void sendTraceData(List contextList) { } } - private void flushData(List transBeanList, String topic, String currentRegionId) { + private void flushData(List transBeanList, String topic, String traceTopic) { if (transBeanList.size() == 0) { return; } @@ -332,14 +342,14 @@ private void flushData(List transBeanList, String topic, Stri buffer.append(bean.getTransData()); count++; if (buffer.length() >= traceProducer.getMaxMessageSize()) { - sendTraceDataByMQ(keySet, buffer.toString(), TraceConstants.TRACE_TOPIC_PREFIX + currentRegionId); + sendTraceDataByMQ(keySet, buffer.toString(), traceTopic); buffer.delete(0, buffer.length()); keySet.clear(); count = 0; } } if (count > 0) { - sendTraceDataByMQ(keySet, buffer.toString(), TraceConstants.TRACE_TOPIC_PREFIX + currentRegionId); + sendTraceDataByMQ(keySet, buffer.toString(), traceTopic); } transBeanList.clear(); } diff --git a/client/src/test/java/org/apache/rocketmq/client/consumer/DefaultLitePullConsumerTest.java b/client/src/test/java/org/apache/rocketmq/client/consumer/DefaultLitePullConsumerTest.java index 65237bc8f76..592c247057b 100644 --- a/client/src/test/java/org/apache/rocketmq/client/consumer/DefaultLitePullConsumerTest.java +++ b/client/src/test/java/org/apache/rocketmq/client/consumer/DefaultLitePullConsumerTest.java @@ -63,8 +63,6 @@ import org.mockito.Spy; import org.mockito.invocation.InvocationOnMock; import org.mockito.junit.MockitoJUnitRunner; -import org.mockito.quality.Strictness; -import org.mockito.junit.jupiter.MockitoSettings; import org.mockito.stubbing.Answer; import static org.assertj.core.api.Assertions.assertThat; @@ -81,8 +79,7 @@ import static org.mockito.Mockito.spy; import static org.mockito.Mockito.when; -@RunWith(MockitoJUnitRunner.class) -@MockitoSettings(strictness = Strictness.LENIENT) +@RunWith(MockitoJUnitRunner.Silent.class) public class DefaultLitePullConsumerTest { @Spy private MQClientInstance mQClientFactory = MQClientManager.getInstance().getOrCreateMQClientInstance(new ClientConfig()); diff --git a/client/src/test/java/org/apache/rocketmq/client/consumer/DefaultMQPushConsumerTest.java b/client/src/test/java/org/apache/rocketmq/client/consumer/DefaultMQPushConsumerTest.java index a10fd74b34f..834be5cf16f 100644 --- a/client/src/test/java/org/apache/rocketmq/client/consumer/DefaultMQPushConsumerTest.java +++ b/client/src/test/java/org/apache/rocketmq/client/consumer/DefaultMQPushConsumerTest.java @@ -209,7 +209,9 @@ public PullResult answer(InvocationOnMock mock) throws Throwable { @AfterClass public static void terminate() { - pushConsumer.shutdown(); + if (pushConsumer != null) { + pushConsumer.shutdown(); + } } @Test diff --git a/client/src/test/java/org/apache/rocketmq/client/producer/DefaultMQProducerTest.java b/client/src/test/java/org/apache/rocketmq/client/producer/DefaultMQProducerTest.java index be277f69bcf..33cf0df390d 100644 --- a/client/src/test/java/org/apache/rocketmq/client/producer/DefaultMQProducerTest.java +++ b/client/src/test/java/org/apache/rocketmq/client/producer/DefaultMQProducerTest.java @@ -68,6 +68,7 @@ import static org.assertj.core.api.Fail.failBecauseExceptionWasNotThrown; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNotEquals; import static org.junit.Assert.assertNotNull; import static org.junit.Assert.assertNull; import static org.junit.Assert.assertTrue; @@ -551,6 +552,50 @@ public void testBatchSendMessageSync_Success() throws RemotingException, Interru producer.setAutoBatch(false); } + + @Test + public void testRunningSetBackCompress() throws RemotingException, InterruptedException, MQClientException { + final CountDownLatch countDownLatch = new CountDownLatch(5); + SendCallback sendCallback = new SendCallback() { + @Override + public void onSuccess(SendResult sendResult) { + countDownLatch.countDown(); + } + + @Override + public void onException(Throwable e) { + e.printStackTrace(); + countDownLatch.countDown(); + } + }; + + // on enableBackpressureForAsyncMode + producer.setEnableBackpressureForAsyncMode(true); + producer.setBackPressureForAsyncSendNum(10); + producer.setBackPressureForAsyncSendSize(50 * 1024 * 1024); + Message message = new Message(); + message.setTopic("test"); + message.setBody("hello world".getBytes()); + MessageQueue mq = new MessageQueue("test", "BrokerA", 1); + //this message is send success + for (int i = 0; i < 5; i++) { + new Thread(new Runnable() { + @Override + public void run() { + try { + producer.send(message, mq, sendCallback); + } catch (MQClientException | RemotingException | InterruptedException e) { + throw new RuntimeException(e); + } + } + }).start(); + } + producer.setBackPressureForAsyncSendNum(15); + countDownLatch.await(3000L, TimeUnit.MILLISECONDS); + assertThat(producer.defaultMQProducerImpl.getSemaphoreAsyncSendNumAvailablePermits() + countDownLatch.getCount()).isEqualTo(15); + producer.setEnableBackpressureForAsyncMode(false); + } + public static TopicRouteData createTopicRoute() { TopicRouteData topicRouteData = new TopicRouteData(); @@ -615,9 +660,9 @@ public void assertCreateDefaultMQProducer() { assertNotNull(producer1); assertEquals(producerGroupTemp, producer1.getProducerGroup()); assertNotNull(producer1.getDefaultMQProducerImpl()); - assertTrue(producer1.getTotalBatchMaxBytes() > 0); - assertTrue(producer1.getBatchMaxBytes() > 0); - assertTrue(producer1.getBatchMaxDelayMs() > 0); + assertEquals(0, producer1.getTotalBatchMaxBytes()); + assertEquals(0, producer1.getBatchMaxBytes()); + assertEquals(0, producer1.getBatchMaxDelayMs()); assertNull(producer1.getTopics()); assertFalse(producer1.isEnableTrace()); assertTrue(UtilAll.isBlank(producer1.getTraceTopic())); @@ -625,9 +670,9 @@ public void assertCreateDefaultMQProducer() { assertNotNull(producer2); assertEquals(producerGroupTemp, producer2.getProducerGroup()); assertNotNull(producer2.getDefaultMQProducerImpl()); - assertTrue(producer2.getTotalBatchMaxBytes() > 0); - assertTrue(producer2.getBatchMaxBytes() > 0); - assertTrue(producer2.getBatchMaxDelayMs() > 0); + assertEquals(0, producer2.getTotalBatchMaxBytes()); + assertEquals(0, producer2.getBatchMaxBytes()); + assertEquals(0, producer2.getBatchMaxDelayMs()); assertNull(producer2.getTopics()); assertFalse(producer2.isEnableTrace()); assertTrue(UtilAll.isBlank(producer2.getTraceTopic())); @@ -635,9 +680,9 @@ public void assertCreateDefaultMQProducer() { assertNotNull(producer3); assertEquals(producerGroupTemp, producer3.getProducerGroup()); assertNotNull(producer3.getDefaultMQProducerImpl()); - assertTrue(producer3.getTotalBatchMaxBytes() > 0); - assertTrue(producer3.getBatchMaxBytes() > 0); - assertTrue(producer3.getBatchMaxDelayMs() > 0); + assertEquals(0, producer3.getTotalBatchMaxBytes()); + assertEquals(0, producer3.getBatchMaxBytes()); + assertEquals(0, producer3.getBatchMaxDelayMs()); assertNotNull(producer3.getTopics()); assertEquals(1, producer3.getTopics().size()); assertFalse(producer3.isEnableTrace()); @@ -646,9 +691,9 @@ public void assertCreateDefaultMQProducer() { assertNotNull(producer4); assertEquals(producerGroupTemp, producer4.getProducerGroup()); assertNotNull(producer4.getDefaultMQProducerImpl()); - assertTrue(producer4.getTotalBatchMaxBytes() > 0); - assertTrue(producer4.getBatchMaxBytes() > 0); - assertTrue(producer4.getBatchMaxDelayMs() > 0); + assertEquals(0, producer4.getTotalBatchMaxBytes()); + assertEquals(0, producer4.getBatchMaxBytes()); + assertEquals(0, producer4.getBatchMaxDelayMs()); assertNull(producer4.getTopics()); assertTrue(producer4.isEnableTrace()); assertEquals("custom_trace_topic", producer4.getTraceTopic()); @@ -656,9 +701,9 @@ public void assertCreateDefaultMQProducer() { assertNotNull(producer5); assertEquals(producerGroupTemp, producer5.getProducerGroup()); assertNotNull(producer5.getDefaultMQProducerImpl()); - assertTrue(producer5.getTotalBatchMaxBytes() > 0); - assertTrue(producer5.getBatchMaxBytes() > 0); - assertTrue(producer5.getBatchMaxDelayMs() > 0); + assertEquals(0, producer5.getTotalBatchMaxBytes()); + assertEquals(0, producer5.getBatchMaxBytes()); + assertEquals(0, producer5.getBatchMaxDelayMs()); assertNotNull(producer5.getTopics()); assertEquals(1, producer5.getTopics().size()); assertTrue(producer5.isEnableTrace()); @@ -766,6 +811,136 @@ public void assertTotalBatchMaxBytes() throws NoSuchFieldException, IllegalAcces assertEquals(0L, producer.getTotalBatchMaxBytes()); } + @Test + public void assertProduceAccumulatorStart() throws NoSuchFieldException, IllegalAccessException, MQClientException { + String producerGroupTemp = producerGroupPrefix + System.nanoTime(); + DefaultMQProducer producer = new DefaultMQProducer(producerGroupTemp); + assertEquals(0, producer.getTotalBatchMaxBytes()); + assertEquals(0, producer.getBatchMaxBytes()); + assertEquals(0, producer.getBatchMaxDelayMs()); + assertNull(getField(producer, "produceAccumulator", ProduceAccumulator.class)); + producer.start(); + assertTrue(producer.getTotalBatchMaxBytes() > 0); + assertTrue(producer.getBatchMaxBytes() > 0); + assertTrue(producer.getBatchMaxDelayMs() > 0); + assertNotNull(getField(producer, "produceAccumulator", ProduceAccumulator.class)); + } + + @Test + public void assertProduceAccumulatorBeforeStartSet() throws NoSuchFieldException, IllegalAccessException, MQClientException { + String producerGroupTemp = producerGroupPrefix + System.nanoTime(); + DefaultMQProducer producer = new DefaultMQProducer(producerGroupTemp); + producer.totalBatchMaxBytes(64 * 1024 * 100); + producer.batchMaxBytes(64 * 1024); + producer.batchMaxDelayMs(10); + + producer.start(); + assertEquals(64 * 1024, producer.getBatchMaxBytes()); + assertEquals(10, producer.getBatchMaxDelayMs()); + assertNotNull(getField(producer, "produceAccumulator", ProduceAccumulator.class)); + } + + @Test + public void assertProduceAccumulatorAfterStartSet() throws NoSuchFieldException, IllegalAccessException, MQClientException { + String producerGroupTemp = producerGroupPrefix + System.nanoTime(); + DefaultMQProducer producer = new DefaultMQProducer(producerGroupTemp); + producer.start(); + + assertNotNull(getField(producer, "produceAccumulator", ProduceAccumulator.class)); + + producer.totalBatchMaxBytes(64 * 1024 * 100); + producer.batchMaxBytes(64 * 1024); + producer.batchMaxDelayMs(10); + + assertEquals(64 * 1024, producer.getBatchMaxBytes()); + assertEquals(10, producer.getBatchMaxDelayMs()); + } + + @Test + public void assertProduceAccumulatorUnit() throws NoSuchFieldException, IllegalAccessException, MQClientException { + String producerGroupTemp = producerGroupPrefix + System.nanoTime(); + DefaultMQProducer producer1 = new DefaultMQProducer(producerGroupTemp); + producer1.setUnitName("unit1"); + DefaultMQProducer producer2 = new DefaultMQProducer(producerGroupTemp); + producer2.setUnitName("unit2"); + + producer1.start(); + producer2.start(); + + ProduceAccumulator producer1Accumulator = getField(producer1, "produceAccumulator", ProduceAccumulator.class); + ProduceAccumulator producer2Accumulator = getField(producer2, "produceAccumulator", ProduceAccumulator.class); + + assertNotNull(producer1Accumulator); + assertNotNull(producer2Accumulator); + + assertNotEquals(producer1Accumulator, producer2Accumulator); + } + + @Test + public void assertProduceAccumulator() throws NoSuchFieldException, IllegalAccessException, MQClientException { + String producerGroupTemp1 = producerGroupPrefix + System.nanoTime(); + DefaultMQProducer producer1 = new DefaultMQProducer(producerGroupTemp1); + producer1.setInstanceName("instanceName1"); + String producerGroupTemp2 = producerGroupPrefix + System.nanoTime(); + DefaultMQProducer producer2 = new DefaultMQProducer(producerGroupTemp2); + producer2.setInstanceName("instanceName2"); + + producer1.start(); + producer2.start(); + + ProduceAccumulator producer1Accumulator = getField(producer1, "produceAccumulator", ProduceAccumulator.class); + ProduceAccumulator producer2Accumulator = getField(producer2, "produceAccumulator", ProduceAccumulator.class); + + assertNotNull(producer1Accumulator); + assertNotNull(producer2Accumulator); + + assertNotEquals(producer1Accumulator, producer2Accumulator); + } + + @Test + public void assertProduceAccumulatorInstanceEqual() throws NoSuchFieldException, IllegalAccessException, MQClientException { + String producerGroupTemp1 = producerGroupPrefix + System.nanoTime(); + DefaultMQProducer producer1 = new DefaultMQProducer(producerGroupTemp1); + producer1.setInstanceName("equalInstance"); + String producerGroupTemp2 = producerGroupPrefix + System.nanoTime(); + DefaultMQProducer producer2 = new DefaultMQProducer(producerGroupTemp2); + producer2.setInstanceName("equalInstance"); + + producer1.start(); + producer2.start(); + + ProduceAccumulator producer1Accumulator = getField(producer1, "produceAccumulator", ProduceAccumulator.class); + ProduceAccumulator producer2Accumulator = getField(producer2, "produceAccumulator", ProduceAccumulator.class); + + assertNotNull(producer1Accumulator); + assertNotNull(producer2Accumulator); + + assertEquals(producer1Accumulator, producer2Accumulator); + } + + @Test + public void assertProduceAccumulatorInstanceAndUnitNameEqual() throws NoSuchFieldException, IllegalAccessException, MQClientException { + String producerGroupTemp1 = producerGroupPrefix + System.nanoTime(); + DefaultMQProducer producer1 = new DefaultMQProducer(producerGroupTemp1); + producer1.setInstanceName("equalInstance"); + producer1.setUnitName("equalUnitName"); + String producerGroupTemp2 = producerGroupPrefix + System.nanoTime(); + DefaultMQProducer producer2 = new DefaultMQProducer(producerGroupTemp2); + producer2.setInstanceName("equalInstance"); + producer2.setUnitName("equalUnitName"); + + producer1.start(); + producer2.start(); + + ProduceAccumulator producer1Accumulator = getField(producer1, "produceAccumulator", ProduceAccumulator.class); + ProduceAccumulator producer2Accumulator = getField(producer2, "produceAccumulator", ProduceAccumulator.class); + + assertNotNull(producer1Accumulator); + assertNotNull(producer2Accumulator); + + assertEquals(producer1Accumulator, producer2Accumulator); + } + @Test public void assertGetRetryResponseCodes() { assertNotNull(producer.getRetryResponseCodes()); @@ -831,4 +1006,11 @@ private void setField(final Object target, final String fieldName, final Object field.setAccessible(true); field.set(target, newValue); } + + private T getField(final Object target, final String fieldName, final Class fieldClassType) throws NoSuchFieldException, IllegalAccessException { + Class targetClazz = target.getClass(); + Field field = targetClazz.getDeclaredField(fieldName); + field.setAccessible(true); + return fieldClassType.cast(field.get(target)); + } } diff --git a/common/pom.xml b/common/pom.xml index 82994c9a197..b548d3df3c4 100644 --- a/common/pom.xml +++ b/common/pom.xml @@ -19,7 +19,7 @@ org.apache.rocketmq rocketmq-all - 5.3.1-SNAPSHOT + 5.3.2-SNAPSHOT 4.0.0 diff --git a/common/src/main/java/org/apache/rocketmq/common/BrokerConfig.java b/common/src/main/java/org/apache/rocketmq/common/BrokerConfig.java index 26afe593a25..2acfdd69a5c 100644 --- a/common/src/main/java/org/apache/rocketmq/common/BrokerConfig.java +++ b/common/src/main/java/org/apache/rocketmq/common/BrokerConfig.java @@ -231,7 +231,7 @@ public class BrokerConfig extends BrokerIdentity { // read message from pop retry topic v1, for the compatibility, will be removed in the future version private boolean retrieveMessageFromPopRetryTopicV1 = true; private boolean enableRetryTopicV2 = false; - + private int popFromRetryProbability = 20; private boolean realTimeNotifyConsumerChange = true; private boolean litePullMessageEnable = true; @@ -427,6 +427,10 @@ public class BrokerConfig extends BrokerIdentity { // if false, will still rewrite ck after max times 17 private boolean skipWhenCKRePutReachMaxTimes = false; + private boolean appendAckAsync = false; + + private boolean appendCkAsync = false; + public String getConfigBlackList() { return configBlackList; } @@ -563,6 +567,15 @@ public void setEnablePopLog(boolean enablePopLog) { this.enablePopLog = enablePopLog; } + public int getPopFromRetryProbability() { + return popFromRetryProbability; + } + + public void setPopFromRetryProbability(int popFromRetryProbability) { + this.popFromRetryProbability = popFromRetryProbability; + } + + public boolean isTraceOn() { return traceOn; } @@ -1850,4 +1863,20 @@ public int getUpdateNameServerAddrPeriod() { public void setUpdateNameServerAddrPeriod(int updateNameServerAddrPeriod) { this.updateNameServerAddrPeriod = updateNameServerAddrPeriod; } + + public boolean isAppendAckAsync() { + return appendAckAsync; + } + + public void setAppendAckAsync(boolean appendAckAsync) { + this.appendAckAsync = appendAckAsync; + } + + public boolean isAppendCkAsync() { + return appendCkAsync; + } + + public void setAppendCkAsync(boolean appendCkAsync) { + this.appendCkAsync = appendCkAsync; + } } diff --git a/common/src/main/java/org/apache/rocketmq/common/MQVersion.java b/common/src/main/java/org/apache/rocketmq/common/MQVersion.java index 8ac75a72c98..a03668e51ce 100644 --- a/common/src/main/java/org/apache/rocketmq/common/MQVersion.java +++ b/common/src/main/java/org/apache/rocketmq/common/MQVersion.java @@ -18,7 +18,7 @@ public class MQVersion { - public static final int CURRENT_VERSION = Version.V5_3_0.ordinal(); + public static final int CURRENT_VERSION = Version.V5_3_1.ordinal(); public static String getVersionDesc(int value) { int length = Version.values().length; diff --git a/common/src/main/java/org/apache/rocketmq/common/config/AbstractRocksDBStorage.java b/common/src/main/java/org/apache/rocketmq/common/config/AbstractRocksDBStorage.java index f88b8e198bf..42ddbdc728c 100644 --- a/common/src/main/java/org/apache/rocketmq/common/config/AbstractRocksDBStorage.java +++ b/common/src/main/java/org/apache/rocketmq/common/config/AbstractRocksDBStorage.java @@ -17,9 +17,10 @@ package org.apache.rocketmq.common.config; import com.google.common.collect.Maps; +import io.netty.buffer.PooledByteBufAllocator; +import java.nio.charset.StandardCharsets; import org.apache.rocketmq.common.ThreadFactoryImpl; import org.apache.rocketmq.common.constant.LoggerName; -import org.apache.rocketmq.common.utils.DataConverter; import org.apache.rocketmq.common.utils.ThreadUtils; import org.apache.rocketmq.logging.org.slf4j.Logger; import org.apache.rocketmq.logging.org.slf4j.LoggerFactory; @@ -28,7 +29,9 @@ import org.rocksdb.ColumnFamilyOptions; import org.rocksdb.CompactRangeOptions; import org.rocksdb.CompactionOptions; +import org.rocksdb.CompressionType; import org.rocksdb.DBOptions; +import org.rocksdb.Env; import org.rocksdb.FlushOptions; import org.rocksdb.LiveFileMetaData; import org.rocksdb.Priority; @@ -50,14 +53,21 @@ import java.util.concurrent.ThreadPoolExecutor; import java.util.concurrent.TimeUnit; -import static org.rocksdb.RocksDB.NOT_FOUND; - public abstract class AbstractRocksDBStorage { protected static final Logger LOGGER = LoggerFactory.getLogger(LoggerName.ROCKSDB_LOGGER_NAME); + /** + * Direct Jemalloc allocator + */ + public static final PooledByteBufAllocator POOLED_ALLOCATOR = new PooledByteBufAllocator(true); + + public static final byte CTRL_0 = '\u0000'; + public static final byte CTRL_1 = '\u0001'; + public static final byte CTRL_2 = '\u0002'; + private static final String SPACE = " | "; - protected String dbPath; + protected final String dbPath; protected boolean readOnly; protected RocksDB db; protected DBOptions options; @@ -72,7 +82,8 @@ public abstract class AbstractRocksDBStorage { protected CompactRangeOptions compactRangeOptions; protected ColumnFamilyHandle defaultCFHandle; - protected final List cfOptions = new ArrayList(); + protected final List cfOptions = new ArrayList<>(); + protected final List cfHandles = new ArrayList<>(); protected volatile boolean loaded; private volatile boolean closed; @@ -80,15 +91,76 @@ public abstract class AbstractRocksDBStorage { private final Semaphore reloadPermit = new Semaphore(1); private final ScheduledExecutorService reloadScheduler = ThreadUtils.newScheduledThreadPool(1, new ThreadFactoryImpl("RocksDBStorageReloadService_")); private final ThreadPoolExecutor manualCompactionThread = (ThreadPoolExecutor) ThreadUtils.newThreadPoolExecutor( - 1, 1, 1000 * 60, TimeUnit.MILLISECONDS, - new ArrayBlockingQueue(1), - new ThreadFactoryImpl("RocksDBManualCompactionService_"), - new ThreadPoolExecutor.DiscardOldestPolicy()); + 1, 1, 1000 * 60, TimeUnit.MILLISECONDS, + new ArrayBlockingQueue<>(1), + new ThreadFactoryImpl("RocksDBManualCompactionService_"), + new ThreadPoolExecutor.DiscardOldestPolicy()); static { RocksDB.loadLibrary(); } + public AbstractRocksDBStorage(String dbPath) { + this.dbPath = dbPath; + } + + protected void initOptions() { + initWriteOptions(); + initAbleWalWriteOptions(); + initReadOptions(); + initTotalOrderReadOptions(); + initCompactRangeOptions(); + initCompactionOptions(); + } + + /** + * Write options for Atomic Flush + */ + protected void initWriteOptions() { + this.writeOptions = new WriteOptions(); + this.writeOptions.setSync(false); + this.writeOptions.setDisableWAL(true); + this.writeOptions.setNoSlowdown(true); + } + + protected void initAbleWalWriteOptions() { + this.ableWalWriteOptions = new WriteOptions(); + this.ableWalWriteOptions.setSync(false); + this.ableWalWriteOptions.setDisableWAL(false); + this.ableWalWriteOptions.setNoSlowdown(true); + } + + protected void initReadOptions() { + this.readOptions = new ReadOptions(); + this.readOptions.setPrefixSameAsStart(true); + this.readOptions.setTotalOrderSeek(false); + this.readOptions.setTailing(false); + } + + protected void initTotalOrderReadOptions() { + this.totalOrderReadOptions = new ReadOptions(); + this.totalOrderReadOptions.setPrefixSameAsStart(false); + this.totalOrderReadOptions.setTotalOrderSeek(true); + this.totalOrderReadOptions.setTailing(false); + } + + protected void initCompactRangeOptions() { + this.compactRangeOptions = new CompactRangeOptions(); + this.compactRangeOptions.setBottommostLevelCompaction(CompactRangeOptions.BottommostLevelCompaction.kForce); + this.compactRangeOptions.setAllowWriteStall(true); + this.compactRangeOptions.setExclusiveManualCompaction(false); + this.compactRangeOptions.setChangeLevel(true); + this.compactRangeOptions.setTargetLevel(-1); + this.compactRangeOptions.setMaxSubcompactions(4); + } + + protected void initCompactionOptions() { + this.compactionOptions = new CompactionOptions(); + this.compactionOptions.setCompression(CompressionType.LZ4_COMPRESSION); + this.compactionOptions.setMaxSubcompactions(4); + this.compactionOptions.setOutputFileSizeLimit(4 * 1024 * 1024 * 1024L); + } + public boolean hold() { if (!this.loaded || this.db == null || this.closed) { LOGGER.error("hold rocksdb Failed. {}", this.dbPath); @@ -102,8 +174,8 @@ public void release() { } protected void put(ColumnFamilyHandle cfHandle, WriteOptions writeOptions, - final byte[] keyBytes, final int keyLen, - final byte[] valueBytes, final int valueLen) throws RocksDBException { + final byte[] keyBytes, final int keyLen, + final byte[] valueBytes, final int valueLen) throws RocksDBException { if (!hold()) { throw new IllegalStateException("rocksDB:" + this + " is not ready"); } @@ -119,7 +191,7 @@ protected void put(ColumnFamilyHandle cfHandle, WriteOptions writeOptions, } protected void put(ColumnFamilyHandle cfHandle, WriteOptions writeOptions, - final ByteBuffer keyBB, final ByteBuffer valueBB) throws RocksDBException { + final ByteBuffer keyBB, final ByteBuffer valueBB) throws RocksDBException { if (!hold()) { throw new IllegalStateException("rocksDB:" + this + " is not ready"); } @@ -160,13 +232,13 @@ protected byte[] get(ColumnFamilyHandle cfHandle, ReadOptions readOptions, byte[ } } - protected boolean get(ColumnFamilyHandle cfHandle, ReadOptions readOptions, - final ByteBuffer keyBB, final ByteBuffer valueBB) throws RocksDBException { + protected int get(ColumnFamilyHandle cfHandle, ReadOptions readOptions, final ByteBuffer keyBB, + final ByteBuffer valueBB) throws RocksDBException { if (!hold()) { throw new IllegalStateException("rocksDB:" + this + " is not ready"); } try { - return this.db.get(cfHandle, readOptions, keyBB, valueBB) != NOT_FOUND; + return this.db.get(cfHandle, readOptions, keyBB, valueBB); } catch (RocksDBException e) { LOGGER.error("get Failed. {}, {}", this.dbPath, getStatusError(e)); throw e; @@ -176,8 +248,8 @@ protected boolean get(ColumnFamilyHandle cfHandle, ReadOptions readOptions, } protected List multiGet(final ReadOptions readOptions, - final List columnFamilyHandleList, - final List keys) throws RocksDBException { + final List columnFamilyHandleList, + final List keys) throws RocksDBException { if (!hold()) { throw new IllegalStateException("rocksDB:" + this + " is not ready"); } @@ -191,7 +263,8 @@ protected List multiGet(final ReadOptions readOptions, } } - protected void delete(ColumnFamilyHandle cfHandle, WriteOptions writeOptions, byte[] keyBytes) throws RocksDBException { + protected void delete(ColumnFamilyHandle cfHandle, WriteOptions writeOptions, + byte[] keyBytes) throws RocksDBException { if (!hold()) { throw new IllegalStateException("rocksDB:" + this + " is not ready"); } @@ -205,7 +278,8 @@ protected void delete(ColumnFamilyHandle cfHandle, WriteOptions writeOptions, by } } - protected void delete(ColumnFamilyHandle cfHandle, WriteOptions writeOptions, ByteBuffer keyBB) throws RocksDBException { + protected void delete(ColumnFamilyHandle cfHandle, WriteOptions writeOptions, ByteBuffer keyBB) + throws RocksDBException { if (!hold()) { throw new IllegalStateException("rocksDB:" + this + " is not ready"); } @@ -219,8 +293,8 @@ protected void delete(ColumnFamilyHandle cfHandle, WriteOptions writeOptions, By } } - protected void rangeDelete(ColumnFamilyHandle cfHandle, WriteOptions writeOptions, - final byte[] startKey, final byte[] endKey) throws RocksDBException { + protected void rangeDelete(ColumnFamilyHandle cfHandle, WriteOptions writeOptions, final byte[] startKey, + final byte[] endKey) throws RocksDBException { if (!hold()) { throw new IllegalStateException("rocksDB:" + this + " is not ready"); } @@ -263,16 +337,17 @@ public void run() { }); } - protected void open(final List cfDescriptors, - final List cfHandles) throws RocksDBException { + protected void open(final List cfDescriptors) throws RocksDBException { + this.cfHandles.clear(); if (this.readOnly) { this.db = RocksDB.openReadOnly(this.options, this.dbPath, cfDescriptors, cfHandles); } else { this.db = RocksDB.open(this.options, this.dbPath, cfDescriptors, cfHandles); } - this.db.getEnv().setBackgroundThreads(8, Priority.HIGH); - this.db.getEnv().setBackgroundThreads(8, Priority.LOW); - + assert cfDescriptors.size() == cfHandles.size(); + try (Env env = this.db.getEnv()) { + env.setBackgroundThreads(8, Priority.LOW); + } if (this.db == null) { throw new RocksDBException("open rocksdb null"); } @@ -294,6 +369,9 @@ public synchronized boolean start() { } } + /** + * Close column family handles except the default column family + */ protected abstract void preShutdown(); public synchronized boolean shutdown() { @@ -311,11 +389,12 @@ public synchronized boolean shutdown() { } this.db.cancelAllBackgroundWork(true); this.db.pauseBackgroundWork(); - //The close order is matter. + //The close order matters. //1. close column family handles preShutdown(); this.defaultCFHandle.close(); + //2. close column family options. for (final ColumnFamilyOptions opt : this.cfOptions) { opt.close(); @@ -333,9 +412,6 @@ public synchronized boolean shutdown() { if (this.totalOrderReadOptions != null) { this.totalOrderReadOptions.close(); } - if (this.options != null) { - this.options.close(); - } //4. close db. if (db != null && !this.readOnly) { this.db.syncWal(); @@ -343,6 +419,10 @@ public synchronized boolean shutdown() { if (db != null) { this.db.closeE(); } + // Close DBOptions after RocksDB instance is closed. + if (this.options != null) { + this.options.close(); + } //5. help gc. this.cfOptions.clear(); this.db = null; @@ -361,21 +441,33 @@ public synchronized boolean shutdown() { return true; } - public void flush(final FlushOptions flushOptions) { + public void flush(final FlushOptions flushOptions) throws RocksDBException { + flush(flushOptions, this.cfHandles); + } + + public void flush(final FlushOptions flushOptions, List columnFamilyHandles) throws RocksDBException { if (!this.loaded || this.readOnly || closed) { return; } try { if (db != null) { - this.db.flush(flushOptions); + // For atomic-flush, we have to explicitly specify column family handles + // See https://github.com/rust-rocksdb/rust-rocksdb/pull/793 + // and https://github.com/facebook/rocksdb/blob/8ad4c7efc48d301f5e85467105d7019a49984dc8/include/rocksdb/db.h#L1667 + this.db.flush(flushOptions, columnFamilyHandles); } } catch (RocksDBException e) { scheduleReloadRocksdb(e); LOGGER.error("flush Failed. {}, {}", this.dbPath, getStatusError(e)); + throw e; } } + public void flushWAL() throws RocksDBException { + this.db.flushWal(true); + } + public Statistics getStatistics() { return this.options.statistics(); } @@ -442,10 +534,6 @@ private void reloadRocksdb() throws Exception { LOGGER.info("reload rocksdb OK. {}", this.dbPath); } - public void flushWAL() throws RocksDBException { - this.db.flushWal(true); - } - private String getStatusError(RocksDBException e) { if (e == null || e.getStatus() == null) { return "null"; @@ -478,13 +566,13 @@ public void statRocksdb(Logger logger) { Map map = Maps.newHashMap(); for (LiveFileMetaData metaData : liveFileMetaDataList) { StringBuilder sb = map.computeIfAbsent(metaData.level(), k -> new StringBuilder(256)); - sb.append(new String(metaData.columnFamilyName(), DataConverter.CHARSET_UTF8)).append(SPACE). - append(metaData.fileName()).append(SPACE). - append("s: ").append(metaData.size()).append(SPACE). - append("a: ").append(metaData.numEntries()).append(SPACE). - append("r: ").append(metaData.numReadsSampled()).append(SPACE). - append("d: ").append(metaData.numDeletions()).append(SPACE). - append(metaData.beingCompacted()).append("\n"); + sb.append(new String(metaData.columnFamilyName(), StandardCharsets.UTF_8)).append(SPACE). + append(metaData.fileName()).append(SPACE). + append("s: ").append(metaData.size()).append(SPACE). + append("a: ").append(metaData.numEntries()).append(SPACE). + append("r: ").append(metaData.numReadsSampled()).append(SPACE). + append("d: ").append(metaData.numDeletions()).append(SPACE). + append(metaData.beingCompacted()).append("\n"); } map.forEach((key, value) -> logger.info("level: {}\n{}", key, value.toString())); @@ -493,8 +581,8 @@ public void statRocksdb(Logger logger) { String indexesAndFilterBlockMemUsage = this.db.getProperty("rocksdb.estimate-table-readers-mem"); String memTableMemUsage = this.db.getProperty("rocksdb.cur-size-all-mem-tables"); String blocksPinnedByIteratorMemUsage = this.db.getProperty("rocksdb.block-cache-pinned-usage"); - logger.info("MemUsage. blockCache: {}, indexesAndFilterBlock: {}, memtable: {}, blocksPinnedByIterator: {}", - blockCacheMemUsage, indexesAndFilterBlockMemUsage, memTableMemUsage, blocksPinnedByIteratorMemUsage); + logger.info("MemUsage. blockCache: {}, indexesAndFilterBlock: {}, MemTable: {}, blocksPinnedByIterator: {}", + blockCacheMemUsage, indexesAndFilterBlockMemUsage, memTableMemUsage, blocksPinnedByIteratorMemUsage); } catch (Exception ignored) { } } diff --git a/common/src/main/java/org/apache/rocketmq/common/config/ConfigHelper.java b/common/src/main/java/org/apache/rocketmq/common/config/ConfigHelper.java new file mode 100644 index 00000000000..95d5119cfc6 --- /dev/null +++ b/common/src/main/java/org/apache/rocketmq/common/config/ConfigHelper.java @@ -0,0 +1,121 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.rocketmq.common.config; + +import java.io.File; +import org.apache.commons.lang3.StringUtils; +import org.apache.rocketmq.common.UtilAll; +import org.rocksdb.BlockBasedTableConfig; +import org.rocksdb.BloomFilter; +import org.rocksdb.ColumnFamilyOptions; +import org.rocksdb.CompactionStyle; +import org.rocksdb.CompressionType; +import org.rocksdb.DBOptions; +import org.rocksdb.DataBlockIndexType; +import org.rocksdb.IndexType; +import org.rocksdb.InfoLogLevel; +import org.rocksdb.LRUCache; +import org.rocksdb.RateLimiter; +import org.rocksdb.SkipListMemTableConfig; +import org.rocksdb.Statistics; +import org.rocksdb.StatsLevel; +import org.rocksdb.StringAppendOperator; +import org.rocksdb.WALRecoveryMode; +import org.rocksdb.util.SizeUnit; + +public class ConfigHelper { + public static ColumnFamilyOptions createConfigOptions() { + BlockBasedTableConfig blockBasedTableConfig = new BlockBasedTableConfig(). + setFormatVersion(5). + setIndexType(IndexType.kBinarySearch). + setDataBlockIndexType(DataBlockIndexType.kDataBlockBinarySearch). + setBlockSize(32 * SizeUnit.KB). + setFilterPolicy(new BloomFilter(16, false)). + // Indicating if we'd put index/filter blocks to the block cache. + setCacheIndexAndFilterBlocks(false). + setCacheIndexAndFilterBlocksWithHighPriority(true). + setPinL0FilterAndIndexBlocksInCache(false). + setPinTopLevelIndexAndFilter(true). + setBlockCache(new LRUCache(4 * SizeUnit.MB, 8, false)). + setWholeKeyFiltering(true); + + ColumnFamilyOptions options = new ColumnFamilyOptions(); + return options.setMaxWriteBufferNumber(2). + // MemTable size, MemTable(cache) -> immutable MemTable(cache) -> SST(disk) + setWriteBufferSize(8 * SizeUnit.MB). + setMinWriteBufferNumberToMerge(1). + setTableFormatConfig(blockBasedTableConfig). + setMemTableConfig(new SkipListMemTableConfig()). + setCompressionType(CompressionType.NO_COMPRESSION). + setNumLevels(7). + setCompactionStyle(CompactionStyle.LEVEL). + setLevel0FileNumCompactionTrigger(4). + setLevel0SlowdownWritesTrigger(8). + setLevel0StopWritesTrigger(12). + // The target file size for compaction. + setTargetFileSizeBase(64 * SizeUnit.MB). + setTargetFileSizeMultiplier(2). + // The upper-bound of the total size of L1 files in bytes + setMaxBytesForLevelBase(256 * SizeUnit.MB). + setMaxBytesForLevelMultiplier(2). + setMergeOperator(new StringAppendOperator()). + setInplaceUpdateSupport(true); + } + + public static DBOptions createConfigDBOptions() { + //Turn based on https://github.com/facebook/rocksdb/wiki/RocksDB-Tuning-Guide + // and http://gitlab.alibaba-inc.com/aloha/aloha/blob/branch_2_5_0/jstorm-core/src/main/java/com/alibaba/jstorm/cache/rocksdb/RocksDbOptionsFactory.java + DBOptions options = new DBOptions(); + Statistics statistics = new Statistics(); + statistics.setStatsLevel(StatsLevel.EXCEPT_DETAILED_TIMERS); + return options. + setDbLogDir(getDBLogDir()). + setInfoLogLevel(InfoLogLevel.INFO_LEVEL). + setWalRecoveryMode(WALRecoveryMode.SkipAnyCorruptedRecords). + setManualWalFlush(true). + setMaxTotalWalSize(500 * SizeUnit.MB). + setWalSizeLimitMB(0). + setWalTtlSeconds(0). + setCreateIfMissing(true). + setCreateMissingColumnFamilies(true). + setMaxOpenFiles(-1). + setMaxLogFileSize(SizeUnit.GB). + setKeepLogFileNum(5). + setMaxManifestFileSize(SizeUnit.GB). + setAllowConcurrentMemtableWrite(false). + setStatistics(statistics). + setStatsDumpPeriodSec(600). + setAtomicFlush(true). + setMaxBackgroundJobs(32). + setMaxSubcompactions(4). + setParanoidChecks(true). + setDelayedWriteRate(16 * SizeUnit.MB). + setRateLimiter(new RateLimiter(100 * SizeUnit.MB)). + setUseDirectIoForFlushAndCompaction(true). + setUseDirectReads(true); + } + + public static String getDBLogDir() { + String rootPath = System.getProperty("user.home"); + if (StringUtils.isEmpty(rootPath)) { + return ""; + } + rootPath = rootPath + File.separator + "logs"; + UtilAll.ensureDirOK(rootPath); + return rootPath + File.separator + "rocketmqlogs" + File.separator; + } +} diff --git a/common/src/main/java/org/apache/rocketmq/common/config/ConfigRocksDBStorage.java b/common/src/main/java/org/apache/rocketmq/common/config/ConfigRocksDBStorage.java index f657d9cf2d2..36da6834ff3 100644 --- a/common/src/main/java/org/apache/rocketmq/common/config/ConfigRocksDBStorage.java +++ b/common/src/main/java/org/apache/rocketmq/common/config/ConfigRocksDBStorage.java @@ -16,101 +16,43 @@ */ package org.apache.rocketmq.common.config; -import java.io.File; import java.nio.ByteBuffer; import java.nio.charset.StandardCharsets; import java.util.ArrayList; import java.util.List; -import org.apache.commons.lang3.StringUtils; import org.apache.rocketmq.common.UtilAll; -import org.rocksdb.BlockBasedTableConfig; -import org.rocksdb.BloomFilter; import org.rocksdb.ColumnFamilyDescriptor; import org.rocksdb.ColumnFamilyHandle; import org.rocksdb.ColumnFamilyOptions; -import org.rocksdb.CompactRangeOptions; -import org.rocksdb.CompactRangeOptions.BottommostLevelCompaction; -import org.rocksdb.CompactionOptions; -import org.rocksdb.CompactionStyle; -import org.rocksdb.CompressionType; -import org.rocksdb.DBOptions; -import org.rocksdb.DataBlockIndexType; -import org.rocksdb.IndexType; -import org.rocksdb.InfoLogLevel; -import org.rocksdb.LRUCache; -import org.rocksdb.RateLimiter; import org.rocksdb.ReadOptions; import org.rocksdb.RocksDB; import org.rocksdb.RocksDBException; import org.rocksdb.RocksIterator; -import org.rocksdb.SkipListMemTableConfig; -import org.rocksdb.Statistics; -import org.rocksdb.StatsLevel; -import org.rocksdb.StringAppendOperator; -import org.rocksdb.WALRecoveryMode; import org.rocksdb.WriteBatch; -import org.rocksdb.WriteOptions; -import org.rocksdb.util.SizeUnit; public class ConfigRocksDBStorage extends AbstractRocksDBStorage { + public static final byte[] KV_DATA_VERSION_COLUMN_FAMILY_NAME = "kvDataVersion".getBytes(StandardCharsets.UTF_8); + public static final byte[] FORBIDDEN_COLUMN_FAMILY_NAME = "forbidden".getBytes(StandardCharsets.UTF_8); - private static final byte[] KV_DATA_VERSION_COLUMN_FAMILY_NAME = "kvDataVersion".getBytes(StandardCharsets.UTF_8); - private static final byte[] KV_DATA_VERSION_KEY = "kvDataVersionKey".getBytes(StandardCharsets.UTF_8); protected ColumnFamilyHandle kvDataVersionFamilyHandle; - - private static final byte[] FORBIDDEN_COLUMN_FAMILY_NAME = "forbidden".getBytes(StandardCharsets.UTF_8); protected ColumnFamilyHandle forbiddenFamilyHandle; - + public static final byte[] KV_DATA_VERSION_KEY = "kvDataVersionKey".getBytes(StandardCharsets.UTF_8); public ConfigRocksDBStorage(final String dbPath) { - super(); - this.dbPath = dbPath; + super(dbPath); this.readOnly = false; } public ConfigRocksDBStorage(final String dbPath, boolean readOnly) { - super(); - this.dbPath = dbPath; + super(dbPath); this.readOnly = readOnly; } - private void initOptions() { - this.options = createConfigDBOptions(); - - this.writeOptions = new WriteOptions(); - this.writeOptions.setSync(false); - this.writeOptions.setDisableWAL(true); - this.writeOptions.setNoSlowdown(true); - - this.ableWalWriteOptions = new WriteOptions(); - this.ableWalWriteOptions.setSync(false); - this.ableWalWriteOptions.setDisableWAL(false); - this.ableWalWriteOptions.setNoSlowdown(true); - - this.readOptions = new ReadOptions(); - this.readOptions.setPrefixSameAsStart(true); - this.readOptions.setTotalOrderSeek(false); - this.readOptions.setTailing(false); - - this.totalOrderReadOptions = new ReadOptions(); - this.totalOrderReadOptions.setPrefixSameAsStart(false); - this.totalOrderReadOptions.setTotalOrderSeek(false); - this.totalOrderReadOptions.setTailing(false); - - this.compactRangeOptions = new CompactRangeOptions(); - this.compactRangeOptions.setBottommostLevelCompaction(BottommostLevelCompaction.kForce); - this.compactRangeOptions.setAllowWriteStall(true); - this.compactRangeOptions.setExclusiveManualCompaction(false); - this.compactRangeOptions.setChangeLevel(true); - this.compactRangeOptions.setTargetLevel(-1); - this.compactRangeOptions.setMaxSubcompactions(4); - - this.compactionOptions = new CompactionOptions(); - this.compactionOptions.setCompression(CompressionType.LZ4_COMPRESSION); - this.compactionOptions.setMaxSubcompactions(4); - this.compactionOptions.setOutputFileSizeLimit(4 * 1024 * 1024 * 1024L); + protected void initOptions() { + this.options = ConfigHelper.createConfigDBOptions(); + super.initOptions(); } @Override @@ -120,15 +62,14 @@ protected boolean postLoad() { initOptions(); - final List cfDescriptors = new ArrayList(); + final List cfDescriptors = new ArrayList<>(); - ColumnFamilyOptions defaultOptions = createConfigOptions(); + ColumnFamilyOptions defaultOptions = ConfigHelper.createConfigOptions(); this.cfOptions.add(defaultOptions); cfDescriptors.add(new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY, defaultOptions)); cfDescriptors.add(new ColumnFamilyDescriptor(KV_DATA_VERSION_COLUMN_FAMILY_NAME, defaultOptions)); cfDescriptors.add(new ColumnFamilyDescriptor(FORBIDDEN_COLUMN_FAMILY_NAME, defaultOptions)); - final List cfHandles = new ArrayList(); - open(cfDescriptors, cfHandles); + open(cfDescriptors); this.defaultCFHandle = cfHandles.get(0); this.kvDataVersionFamilyHandle = cfHandles.get(1); @@ -147,87 +88,6 @@ protected void preShutdown() { this.forbiddenFamilyHandle.close(); } - private ColumnFamilyOptions createConfigOptions() { - BlockBasedTableConfig blockBasedTableConfig = new BlockBasedTableConfig(). - setFormatVersion(5). - setIndexType(IndexType.kBinarySearch). - setDataBlockIndexType(DataBlockIndexType.kDataBlockBinarySearch). - setBlockSize(32 * SizeUnit.KB). - setFilterPolicy(new BloomFilter(16, false)). - // Indicating if we'd put index/filter blocks to the block cache. - setCacheIndexAndFilterBlocks(false). - setCacheIndexAndFilterBlocksWithHighPriority(true). - setPinL0FilterAndIndexBlocksInCache(false). - setPinTopLevelIndexAndFilter(true). - setBlockCache(new LRUCache(4 * SizeUnit.MB, 8, false)). - setWholeKeyFiltering(true); - - ColumnFamilyOptions options = new ColumnFamilyOptions(); - return options.setMaxWriteBufferNumber(2). - // MemTable size, memtable(cache) -> immutable memtable(cache) -> sst(disk) - setWriteBufferSize(8 * SizeUnit.MB). - setMinWriteBufferNumberToMerge(1). - setTableFormatConfig(blockBasedTableConfig). - setMemTableConfig(new SkipListMemTableConfig()). - setCompressionType(CompressionType.NO_COMPRESSION). - setNumLevels(7). - setCompactionStyle(CompactionStyle.LEVEL). - setLevel0FileNumCompactionTrigger(4). - setLevel0SlowdownWritesTrigger(8). - setLevel0StopWritesTrigger(12). - // The target file size for compaction. - setTargetFileSizeBase(64 * SizeUnit.MB). - setTargetFileSizeMultiplier(2). - // The upper-bound of the total size of L1 files in bytes - setMaxBytesForLevelBase(256 * SizeUnit.MB). - setMaxBytesForLevelMultiplier(2). - setMergeOperator(new StringAppendOperator()). - setInplaceUpdateSupport(true); - } - - private DBOptions createConfigDBOptions() { - //Turn based on https://github.com/facebook/rocksdb/wiki/RocksDB-Tuning-Guide - // and http://gitlab.alibaba-inc.com/aloha/aloha/blob/branch_2_5_0/jstorm-core/src/main/java/com/alibaba/jstorm/cache/rocksdb/RocksDbOptionsFactory.java - DBOptions options = new DBOptions(); - Statistics statistics = new Statistics(); - statistics.setStatsLevel(StatsLevel.EXCEPT_DETAILED_TIMERS); - return options. - setDbLogDir(getDBLogDir()). - setInfoLogLevel(InfoLogLevel.INFO_LEVEL). - setWalRecoveryMode(WALRecoveryMode.SkipAnyCorruptedRecords). - setManualWalFlush(true). - setMaxTotalWalSize(500 * SizeUnit.MB). - setWalSizeLimitMB(0). - setWalTtlSeconds(0). - setCreateIfMissing(true). - setCreateMissingColumnFamilies(true). - setMaxOpenFiles(-1). - setMaxLogFileSize(1 * SizeUnit.GB). - setKeepLogFileNum(5). - setMaxManifestFileSize(1 * SizeUnit.GB). - setAllowConcurrentMemtableWrite(false). - setStatistics(statistics). - setStatsDumpPeriodSec(600). - setAtomicFlush(true). - setMaxBackgroundJobs(32). - setMaxSubcompactions(4). - setParanoidChecks(true). - setDelayedWriteRate(16 * SizeUnit.MB). - setRateLimiter(new RateLimiter(100 * SizeUnit.MB)). - setUseDirectIoForFlushAndCompaction(true). - setUseDirectReads(true); - } - - public static String getDBLogDir() { - String rootPath = System.getProperty("user.home"); - if (StringUtils.isEmpty(rootPath)) { - return ""; - } - rootPath = rootPath + File.separator + "logs"; - UtilAll.ensureDirOK(rootPath); - return rootPath + File.separator + "rocketmqlogs" + File.separator; - } - public void put(final byte[] keyBytes, final int keyLen, final byte[] valueBytes) throws Exception { put(this.defaultCFHandle, this.ableWalWriteOptions, keyBytes, keyLen, valueBytes, valueBytes.length); } @@ -281,10 +141,6 @@ public RocksIterator forbiddenIterator() { return this.db.newIterator(this.forbiddenFamilyHandle, this.totalOrderReadOptions); } - public void rangeDelete(final byte[] startKey, final byte[] endKey) throws RocksDBException { - rangeDelete(this.defaultCFHandle, this.writeOptions, startKey, endKey); - } - public RocksIterator iterator(ReadOptions readOptions) { return this.db.newIterator(this.defaultCFHandle, readOptions); } diff --git a/common/src/main/java/org/apache/rocketmq/common/utils/AsyncShutdownHelper.java b/common/src/main/java/org/apache/rocketmq/common/utils/AsyncShutdownHelper.java new file mode 100644 index 00000000000..da765d5e749 --- /dev/null +++ b/common/src/main/java/org/apache/rocketmq/common/utils/AsyncShutdownHelper.java @@ -0,0 +1,76 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.rocketmq.common.utils; + +import java.util.ArrayList; +import java.util.List; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicBoolean; + +public class AsyncShutdownHelper { + private final AtomicBoolean shutdown; + private final List targetList; + + private CountDownLatch countDownLatch; + + public AsyncShutdownHelper() { + this.targetList = new ArrayList<>(); + this.shutdown = new AtomicBoolean(false); + } + + public void addTarget(Shutdown target) { + if (shutdown.get()) { + return; + } + targetList.add(target); + } + + public AsyncShutdownHelper shutdown() { + if (shutdown.get()) { + return this; + } + if (targetList.isEmpty()) { + return this; + } + this.countDownLatch = new CountDownLatch(targetList.size()); + for (Shutdown target : targetList) { + Runnable runnable = () -> { + try { + target.shutdown(); + } catch (Exception ignored) { + + } finally { + countDownLatch.countDown(); + } + }; + new Thread(runnable).start(); + } + return this; + } + + public boolean await(long time, TimeUnit unit) throws InterruptedException { + if (shutdown.get()) { + return false; + } + try { + return this.countDownLatch.await(time, unit); + } finally { + shutdown.compareAndSet(false, true); + } + } +} diff --git a/common/src/main/java/org/apache/rocketmq/common/utils/ServiceProvider.java b/common/src/main/java/org/apache/rocketmq/common/utils/ServiceProvider.java index 65dea47b5ea..49e2c442b23 100644 --- a/common/src/main/java/org/apache/rocketmq/common/utils/ServiceProvider.java +++ b/common/src/main/java/org/apache/rocketmq/common/utils/ServiceProvider.java @@ -50,7 +50,7 @@ public class ServiceProvider { * Returns a string that uniquely identifies the specified object, including its class. *

* The returned string is of form "classname@hashcode", ie is the same as the return value of the Object.toString() - * method, but works even when the specified object's class has overidden the toString method. + * method, but works even when the specified object's class has overridden the toString method. * * @param o may be null. * @return a string of form classname@hashcode, or "null" if param o is null. diff --git a/container/pom.xml b/container/pom.xml index b9514defdb8..cc177abeea9 100644 --- a/container/pom.xml +++ b/container/pom.xml @@ -18,7 +18,7 @@ org.apache.rocketmq rocketmq-all - 5.3.1-SNAPSHOT + 5.3.2-SNAPSHOT 4.0.0 diff --git a/controller/pom.xml b/controller/pom.xml index 82b6fc7d969..7092ca2b3cd 100644 --- a/controller/pom.xml +++ b/controller/pom.xml @@ -19,7 +19,7 @@ rocketmq-all org.apache.rocketmq - 5.3.1-SNAPSHOT + 5.3.2-SNAPSHOT 4.0.0 jar diff --git a/controller/src/main/java/org/apache/rocketmq/controller/impl/DLedgerController.java b/controller/src/main/java/org/apache/rocketmq/controller/impl/DLedgerController.java index be487849ce5..3421010340a 100644 --- a/controller/src/main/java/org/apache/rocketmq/controller/impl/DLedgerController.java +++ b/controller/src/main/java/org/apache/rocketmq/controller/impl/DLedgerController.java @@ -101,7 +101,7 @@ public class DLedgerController implements Controller { private final List brokerLifecycleListeners; - // Usr for checking whether the broker is alive + // use for checking whether the broker is alive private BrokerValidPredicate brokerAlivePredicate; // use for elect a master private ElectPolicy electPolicy; diff --git a/distribution/pom.xml b/distribution/pom.xml index 60fc6170bbe..88521fbede7 100644 --- a/distribution/pom.xml +++ b/distribution/pom.xml @@ -20,7 +20,7 @@ org.apache.rocketmq rocketmq-all - 5.3.1-SNAPSHOT + 5.3.2-SNAPSHOT rocketmq-distribution rocketmq-distribution ${project.version} diff --git a/docs/cn/best_practice.md b/docs/cn/best_practice.md index 5cc5b37643f..36d6acff6bd 100755 --- a/docs/cn/best_practice.md +++ b/docs/cn/best_practice.md @@ -253,7 +253,7 @@ DefaultMQProducer、TransactionMQProducer、DefaultMQPushConsumer、DefaultMQPul | clientIP | 本机IP | 客户端本机IP地址,某些机器会发生无法识别客户端IP地址情况,需要应用在代码中强制指定 | | instanceName | DEFAULT | 客户端实例名称,客户端创建的多个Producer、Consumer实际是共用一个内部实例(这个实例包含网络连接、线程资源等) | | clientCallbackExecutorThreads | 4 | 通信层异步回调线程数 | -| pollNameServerInteval | 30000 | 轮询Name Server间隔时间,单位毫秒 | +| pollNameServerInterval | 30000 | 轮询Name Server间隔时间,单位毫秒 | | heartbeatBrokerInterval | 30000 | 向Broker发送心跳间隔时间,单位毫秒 | | persistConsumerOffsetInterval | 5000 | 持久化Consumer消费进度间隔时间,单位毫秒 | diff --git a/docs/cn/controller/design.md b/docs/cn/controller/design.md index 563a624eddc..13eba7764a6 100644 --- a/docs/cn/controller/design.md +++ b/docs/cn/controller/design.md @@ -121,13 +121,13 @@ nextTransferFromWhere + size > currentTransferEpochEndOffset,则将 selectMapp ![示意图](../image/controller/controller_design_3.png) -`current state(4byte) + Two flags(4byte) + slaveAddressLength(4byte) + slaveAddress(50byte)` +`current state(4byte) + Two flags(4byte) + slaveBrokerId(8byte)` - Current state 代表当前的 HAConnectionState,也即 HANDSHAKE。 - Two flags 是两个状态标志位,其中,isSyncFromLastFile 代表是否要从 Master 的最后一个文件开始复制,isAsyncLearner 代表该 Slave 是否是异步复制,并以 Learner 的形式接入 Master。 -- slaveAddressLength 与 slaveAddress 代表了该 Slave 的地址,用于后续加入 SyncStateSet 。 +- slaveBrokerId 代表了该 Slave 的 brokerId,用于后续加入 SyncStateSet 。 2.AutoSwitchHaConnection (Master) 会向 Slave 回送 HandShake 包,如下: diff --git a/docs/cn/image/controller/controller_design_3.png b/docs/cn/image/controller/controller_design_3.png index 8c475bcecf1..0379c231d46 100644 Binary files a/docs/cn/image/controller/controller_design_3.png and b/docs/cn/image/controller/controller_design_3.png differ diff --git a/docs/en/Configuration_Client.md b/docs/en/Configuration_Client.md index 4d999b2feda..4679957af5a 100644 --- a/docs/en/Configuration_Client.md +++ b/docs/en/Configuration_Client.md @@ -48,7 +48,7 @@ HTTP static server addressing is recommended, because it is simple client deploy | clientIP | local IP | Client local ip address, some machines will fail to recognize the client IP address, which needs to be enforced in the code | | instanceName | DEFAULT | Name of the client instance, Multiple producers and consumers created by the client actually share one internal instance (this instance contains network connection, thread resources, etc.). | | clientCallbackExecutorThreads | 4 | Number of communication layer asynchronous callback threads | -| pollNameServerInteval | 30000 | Polling the Name Server interval in milliseconds | +| pollNameServerInterval | 30000 | Polling the Name Server interval in milliseconds | | heartbeatBrokerInterval | 30000 | The heartbeat interval, in milliseconds, is sent to the Broker | | persistConsumerOffsetInterval | 5000 | The persistent Consumer consumes the progress interval in milliseconds | diff --git a/docs/en/controller/design.md b/docs/en/controller/design.md index ba2de58af14..af4958a4d3e 100644 --- a/docs/en/controller/design.md +++ b/docs/en/controller/design.md @@ -112,13 +112,13 @@ According to the above, we can know the AutoSwitchHaService protocol divides log ![示意图](../image/controller/controller_design_3.png) -`current state(4byte) + Two flags(4byte) + slaveAddressLength(4byte) + slaveAddress(50byte)` +`current state(4byte) + Two flags(4byte) + slaveBrokerId(8byte)` - `Current state` represents the current HAConnectionState, which is HANDSHAKE. - Two flags are two status flags, where `isSyncFromLastFile` indicates whether to start copying from the Master's last file, and `isAsyncLearner` indicates whether the Slave is an asynchronous copy and joins the Master as a Learner. -- `slaveAddressLength` and `slaveAddress` represent the address of the Slave, which will be used later to join the SyncStateSet. +- `slaveBrokerId` represent the brokerId of the Slave, which will be used later to join the SyncStateSet. 2.AutoSwitchHaConnection (Master) will send a HandShake packet back to the Slave as follows: diff --git a/docs/en/image/controller/controller_design_3.png b/docs/en/image/controller/controller_design_3.png index 8c475bcecf1..0379c231d46 100644 Binary files a/docs/en/image/controller/controller_design_3.png and b/docs/en/image/controller/controller_design_3.png differ diff --git a/example/pom.xml b/example/pom.xml index 7685a811690..19047c2f552 100644 --- a/example/pom.xml +++ b/example/pom.xml @@ -19,7 +19,7 @@ rocketmq-all org.apache.rocketmq - 5.3.1-SNAPSHOT + 5.3.2-SNAPSHOT 4.0.0 diff --git a/filter/pom.xml b/filter/pom.xml index 0acaa73f8ae..262177b61c2 100644 --- a/filter/pom.xml +++ b/filter/pom.xml @@ -20,7 +20,7 @@ rocketmq-all org.apache.rocketmq - 5.3.1-SNAPSHOT + 5.3.2-SNAPSHOT 4.0.0 diff --git a/namesrv/pom.xml b/namesrv/pom.xml index d53540601e6..012ebafe064 100644 --- a/namesrv/pom.xml +++ b/namesrv/pom.xml @@ -19,7 +19,7 @@ org.apache.rocketmq rocketmq-all - 5.3.1-SNAPSHOT + 5.3.2-SNAPSHOT 4.0.0 diff --git a/openmessaging/pom.xml b/openmessaging/pom.xml index 09ab5ed2586..8ea4745b25d 100644 --- a/openmessaging/pom.xml +++ b/openmessaging/pom.xml @@ -20,7 +20,7 @@ rocketmq-all org.apache.rocketmq - 5.3.1-SNAPSHOT + 5.3.2-SNAPSHOT 4.0.0 diff --git a/openmessaging/src/main/java/io/openmessaging/rocketmq/promise/DefaultPromise.java b/openmessaging/src/main/java/io/openmessaging/rocketmq/promise/DefaultPromise.java index 36ac27f417a..46e607a5802 100644 --- a/openmessaging/src/main/java/io/openmessaging/rocketmq/promise/DefaultPromise.java +++ b/openmessaging/src/main/java/io/openmessaging/rocketmq/promise/DefaultPromise.java @@ -82,7 +82,7 @@ public V get(final long timeout) { try { lock.wait(waitTime); } catch (InterruptedException e) { - LOG.error("promise get value interrupted,excepiton:{}", e.getMessage()); + LOG.error("promise get value interrupted,exception:{}", e.getMessage()); } if (!isDoing()) { diff --git a/pom.xml b/pom.xml index 8449bd6fb88..b18d9bbb439 100644 --- a/pom.xml +++ b/pom.xml @@ -28,7 +28,7 @@ 2012 org.apache.rocketmq rocketmq-all - 5.3.1-SNAPSHOT + 5.3.2-SNAPSHOT pom Apache RocketMQ ${project.version} http://rocketmq.apache.org/ @@ -108,7 +108,7 @@ 3.20.0-GA 4.2.2 3.12.0 - 2.7 + 2.14.0 32.0.1-jre 2.9.0 0.3.1-alpha diff --git a/proxy/pom.xml b/proxy/pom.xml index 41e6fa95f55..e608d9f587f 100644 --- a/proxy/pom.xml +++ b/proxy/pom.xml @@ -20,7 +20,7 @@ rocketmq-all org.apache.rocketmq - 5.3.1-SNAPSHOT + 5.3.2-SNAPSHOT 4.0.0 diff --git a/proxy/src/main/java/org/apache/rocketmq/proxy/common/utils/GrpcUtils.java b/proxy/src/main/java/org/apache/rocketmq/proxy/common/utils/GrpcUtils.java new file mode 100644 index 00000000000..5c50de4426e --- /dev/null +++ b/proxy/src/main/java/org/apache/rocketmq/proxy/common/utils/GrpcUtils.java @@ -0,0 +1,45 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.rocketmq.proxy.common.utils; + +import io.grpc.Attributes; +import io.grpc.Metadata; +import io.grpc.ServerCall; + +public class GrpcUtils { + + private GrpcUtils() { + } + + public static void putHeaderIfNotExist(Metadata headers, Metadata.Key key, T value) { + if (headers == null) { + return; + } + if (!headers.containsKey(key) && value != null) { + headers.put(key, value); + } + } + + public static T getAttribute(ServerCall call, Attributes.Key key) { + Attributes attributes = call.getAttributes(); + if (attributes == null) { + return null; + } + return attributes.get(key); + } +} diff --git a/proxy/src/main/java/org/apache/rocketmq/proxy/grpc/interceptor/AuthenticationInterceptor.java b/proxy/src/main/java/org/apache/rocketmq/proxy/grpc/interceptor/AuthenticationInterceptor.java index 28ee019fae7..e082ba6e28c 100644 --- a/proxy/src/main/java/org/apache/rocketmq/proxy/grpc/interceptor/AuthenticationInterceptor.java +++ b/proxy/src/main/java/org/apache/rocketmq/proxy/grpc/interceptor/AuthenticationInterceptor.java @@ -33,6 +33,7 @@ import org.apache.rocketmq.acl.common.AuthenticationHeader; import org.apache.rocketmq.acl.plain.PlainAccessResource; import org.apache.rocketmq.common.constant.GrpcConstants; +import org.apache.rocketmq.proxy.common.utils.GrpcUtils; import org.apache.rocketmq.proxy.config.ConfigurationManager; public class AuthenticationInterceptor implements ServerInterceptor { @@ -49,8 +50,8 @@ public ServerCall.Listener interceptCall(ServerCall call, Metada @Override public void onMessage(R message) { GeneratedMessageV3 messageV3 = (GeneratedMessageV3) message; - headers.put(GrpcConstants.RPC_NAME, messageV3.getDescriptorForType().getFullName()); - headers.put(GrpcConstants.SIMPLE_RPC_NAME, messageV3.getDescriptorForType().getName()); + GrpcUtils.putHeaderIfNotExist(headers, GrpcConstants.RPC_NAME, messageV3.getDescriptorForType().getFullName()); + GrpcUtils.putHeaderIfNotExist(headers, GrpcConstants.SIMPLE_RPC_NAME, messageV3.getDescriptorForType().getName()); if (ConfigurationManager.getProxyConfig().isEnableACL()) { try { AuthenticationHeader authenticationHeader = AuthenticationHeader.builder() @@ -85,7 +86,7 @@ protected void validate(AuthenticationHeader authenticationHeader, Metadata head if (accessResource instanceof PlainAccessResource) { PlainAccessResource plainAccessResource = (PlainAccessResource) accessResource; - headers.put(GrpcConstants.AUTHORIZATION_AK, plainAccessResource.getAccessKey()); + GrpcUtils.putHeaderIfNotExist(headers, GrpcConstants.AUTHORIZATION_AK, plainAccessResource.getAccessKey()); } } } diff --git a/proxy/src/main/java/org/apache/rocketmq/proxy/grpc/interceptor/HeaderInterceptor.java b/proxy/src/main/java/org/apache/rocketmq/proxy/grpc/interceptor/HeaderInterceptor.java index 1de2ce4f986..e3e78841559 100644 --- a/proxy/src/main/java/org/apache/rocketmq/proxy/grpc/interceptor/HeaderInterceptor.java +++ b/proxy/src/main/java/org/apache/rocketmq/proxy/grpc/interceptor/HeaderInterceptor.java @@ -27,6 +27,7 @@ import org.apache.commons.lang3.StringUtils; import org.apache.rocketmq.common.constant.HAProxyConstants; import org.apache.rocketmq.common.constant.GrpcConstants; +import org.apache.rocketmq.proxy.common.utils.GrpcUtils; import org.apache.rocketmq.proxy.grpc.constant.AttributeKeys; import java.net.InetSocketAddress; @@ -44,11 +45,11 @@ public ServerCall.Listener interceptCall( SocketAddress remoteSocketAddress = call.getAttributes().get(Grpc.TRANSPORT_ATTR_REMOTE_ADDR); remoteAddress = parseSocketAddress(remoteSocketAddress); } - headers.put(GrpcConstants.REMOTE_ADDRESS, remoteAddress); + GrpcUtils.putHeaderIfNotExist(headers, GrpcConstants.REMOTE_ADDRESS, remoteAddress); SocketAddress localSocketAddress = call.getAttributes().get(Grpc.TRANSPORT_ATTR_LOCAL_ADDR); String localAddress = parseSocketAddress(localSocketAddress); - headers.put(GrpcConstants.LOCAL_ADDRESS, localAddress); + GrpcUtils.putHeaderIfNotExist(headers, GrpcConstants.LOCAL_ADDRESS, localAddress); for (Attributes.Key key : call.getAttributes().keys()) { if (!StringUtils.startsWith(key.toString(), HAProxyConstants.PROXY_PROTOCOL_PREFIX)) { @@ -57,12 +58,12 @@ public ServerCall.Listener interceptCall( Metadata.Key headerKey = Metadata.Key.of(key.toString(), Metadata.ASCII_STRING_MARSHALLER); String headerValue = String.valueOf(call.getAttributes().get(key)); - headers.put(headerKey, headerValue); + GrpcUtils.putHeaderIfNotExist(headers, headerKey, headerValue); } String channelId = call.getAttributes().get(AttributeKeys.CHANNEL_ID); if (StringUtils.isNotBlank(channelId)) { - headers.put(GrpcConstants.CHANNEL_ID, channelId); + GrpcUtils.putHeaderIfNotExist(headers, GrpcConstants.CHANNEL_ID, channelId); } return next.startCall(call, headers); diff --git a/proxy/src/main/java/org/apache/rocketmq/proxy/grpc/pipeline/AuthenticationPipeline.java b/proxy/src/main/java/org/apache/rocketmq/proxy/grpc/pipeline/AuthenticationPipeline.java index 58eed91c9fa..e317b48f1ed 100644 --- a/proxy/src/main/java/org/apache/rocketmq/proxy/grpc/pipeline/AuthenticationPipeline.java +++ b/proxy/src/main/java/org/apache/rocketmq/proxy/grpc/pipeline/AuthenticationPipeline.java @@ -31,6 +31,7 @@ import org.apache.rocketmq.logging.org.slf4j.Logger; import org.apache.rocketmq.logging.org.slf4j.LoggerFactory; import org.apache.rocketmq.proxy.common.ProxyContext; +import org.apache.rocketmq.proxy.common.utils.GrpcUtils; import org.apache.rocketmq.proxy.processor.MessagingProcessor; public class AuthenticationPipeline implements RequestPipeline { @@ -73,7 +74,7 @@ protected AuthenticationContext newContext(ProxyContext context, Metadata header if (result instanceof DefaultAuthenticationContext) { DefaultAuthenticationContext defaultAuthenticationContext = (DefaultAuthenticationContext) result; if (StringUtils.isNotBlank(defaultAuthenticationContext.getUsername())) { - headers.put(GrpcConstants.AUTHORIZATION_AK, defaultAuthenticationContext.getUsername()); + GrpcUtils.putHeaderIfNotExist(headers, GrpcConstants.AUTHORIZATION_AK, defaultAuthenticationContext.getUsername()); } } return result; diff --git a/proxy/src/main/java/org/apache/rocketmq/proxy/remoting/protocol/http2proxy/HAProxyMessageForwarder.java b/proxy/src/main/java/org/apache/rocketmq/proxy/remoting/protocol/http2proxy/HAProxyMessageForwarder.java index 39d7057bddd..518868831f4 100644 --- a/proxy/src/main/java/org/apache/rocketmq/proxy/remoting/protocol/http2proxy/HAProxyMessageForwarder.java +++ b/proxy/src/main/java/org/apache/rocketmq/proxy/remoting/protocol/http2proxy/HAProxyMessageForwarder.java @@ -118,11 +118,11 @@ protected HAProxyMessage buildHAProxyMessage(Channel inboundChannel) throws Ille } } else { String remoteAddr = RemotingHelper.parseChannelRemoteAddr(inboundChannel); - sourceAddress = StringUtils.substringBefore(remoteAddr, CommonConstants.COLON); + sourceAddress = StringUtils.substringBeforeLast(remoteAddr, CommonConstants.COLON); sourcePort = Integer.parseInt(StringUtils.substringAfterLast(remoteAddr, CommonConstants.COLON)); String localAddr = RemotingHelper.parseChannelLocalAddr(inboundChannel); - destinationAddress = StringUtils.substringBefore(localAddr, CommonConstants.COLON); + destinationAddress = StringUtils.substringBeforeLast(localAddr, CommonConstants.COLON); destinationPort = Integer.parseInt(StringUtils.substringAfterLast(localAddr, CommonConstants.COLON)); } diff --git a/proxy/src/main/java/org/apache/rocketmq/proxy/service/message/LocalMessageService.java b/proxy/src/main/java/org/apache/rocketmq/proxy/service/message/LocalMessageService.java index 6b2ba02f7c9..a8088a95d0a 100644 --- a/proxy/src/main/java/org/apache/rocketmq/proxy/service/message/LocalMessageService.java +++ b/proxy/src/main/java/org/apache/rocketmq/proxy/service/message/LocalMessageService.java @@ -176,7 +176,8 @@ public CompletableFuture sendMessageBack(ProxyContext ctx, Rece } @Override - public CompletableFuture endTransactionOneway(ProxyContext ctx, String brokerName, EndTransactionRequestHeader requestHeader, + public CompletableFuture endTransactionOneway(ProxyContext ctx, String brokerName, + EndTransactionRequestHeader requestHeader, long timeoutMillis) { CompletableFuture future = new CompletableFuture<>(); SimpleChannel channel = channelManager.createChannel(ctx); @@ -310,9 +311,8 @@ public CompletableFuture changeInvisibleTime(ProxyContext ctx, Receip RemotingCommand command = LocalRemotingCommand.createRequestCommand(RequestCode.CHANGE_MESSAGE_INVISIBLETIME, requestHeader, ctx.getLanguage()); CompletableFuture future = new CompletableFuture<>(); try { - RemotingCommand response = brokerController.getChangeInvisibleTimeProcessor() - .processRequest(channelHandlerContext, command); - future.complete(response); + future = brokerController.getChangeInvisibleTimeProcessor() + .processRequestAsync(channelHandlerContext.channel(), command, true); } catch (Exception e) { log.error("Fail to process changeInvisibleTime command", e); future.completeExceptionally(e); diff --git a/proxy/src/test/java/org/apache/rocketmq/proxy/service/message/LocalMessageServiceTest.java b/proxy/src/test/java/org/apache/rocketmq/proxy/service/message/LocalMessageServiceTest.java index 3e3d37086b5..f7a656d7682 100644 --- a/proxy/src/test/java/org/apache/rocketmq/proxy/service/message/LocalMessageServiceTest.java +++ b/proxy/src/test/java/org/apache/rocketmq/proxy/service/message/LocalMessageServiceTest.java @@ -17,6 +17,7 @@ package org.apache.rocketmq.proxy.service.message; +import io.netty.channel.Channel; import java.net.InetSocketAddress; import java.nio.ByteBuffer; import java.nio.charset.StandardCharsets; @@ -370,11 +371,11 @@ public void testChangeInvisibleTime() throws Exception { responseHeader.setReviveQid(newReviveQueueId); responseHeader.setInvisibleTime(newInvisibleTime); responseHeader.setPopTime(newPopTime); - Mockito.when(changeInvisibleTimeProcessorMock.processRequest(Mockito.any(SimpleChannelHandlerContext.class), Mockito.argThat(argument -> { + Mockito.when(changeInvisibleTimeProcessorMock.processRequestAsync(Mockito.any(Channel.class), Mockito.argThat(argument -> { boolean first = argument.getCode() == RequestCode.CHANGE_MESSAGE_INVISIBLETIME; boolean second = argument.readCustomHeader() instanceof ChangeInvisibleTimeRequestHeader; return first && second; - }))).thenReturn(remotingCommand); + }), Mockito.any(Boolean.class))).thenReturn(CompletableFuture.completedFuture(remotingCommand)); ChangeInvisibleTimeRequestHeader requestHeader = new ChangeInvisibleTimeRequestHeader(); CompletableFuture future = localMessageService.changeInvisibleTime(proxyContext, handle, messageId, requestHeader, 1000L); diff --git a/remoting/pom.xml b/remoting/pom.xml index 566c983ea98..65e9a852fcc 100644 --- a/remoting/pom.xml +++ b/remoting/pom.xml @@ -19,7 +19,7 @@ org.apache.rocketmq rocketmq-all - 5.3.1-SNAPSHOT + 5.3.2-SNAPSHOT 4.0.0 diff --git a/remoting/src/main/java/org/apache/rocketmq/remoting/netty/NettyRemotingAbstract.java b/remoting/src/main/java/org/apache/rocketmq/remoting/netty/NettyRemotingAbstract.java index 9f3136195b3..ffa37260594 100644 --- a/remoting/src/main/java/org/apache/rocketmq/remoting/netty/NettyRemotingAbstract.java +++ b/remoting/src/main/java/org/apache/rocketmq/remoting/netty/NettyRemotingAbstract.java @@ -39,8 +39,8 @@ import java.util.concurrent.Semaphore; import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeoutException; -import java.util.concurrent.atomic.AtomicReference; import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicReference; import java.util.function.Consumer; import javax.annotation.Nullable; import org.apache.rocketmq.common.AbortProcessException; @@ -393,7 +393,7 @@ public void processResponseCommand(ChannelHandlerContext ctx, RemotingCommand cm responseFuture.release(); } } else { - log.warn("receive response, cmd={}, but not matched any request, address={}", cmd, RemotingHelper.parseChannelRemoteAddr(ctx.channel())); + log.warn("receive response, cmd={}, but not matched any request, address={}, channelId={}", cmd, RemotingHelper.parseChannelRemoteAddr(ctx.channel()), ctx.channel().id()); } } @@ -560,13 +560,13 @@ public void operationFail(Throwable throwable) { return; } requestFail(opaque); - log.warn("send a request command to channel <{}> failed.", RemotingHelper.parseChannelRemoteAddr(channel)); + log.warn("send a request command to channel <{}>, channelId={}, failed.", RemotingHelper.parseChannelRemoteAddr(channel), channel.id()); }); return future; } catch (Exception e) { responseTable.remove(opaque); responseFuture.release(); - log.warn("send a request command to channel <" + RemotingHelper.parseChannelRemoteAddr(channel) + "> Exception", e); + log.warn("send a request command to channel <{}> channelId={} Exception", RemotingHelper.parseChannelRemoteAddr(channel), channel.id(), e); future.completeExceptionally(new RemotingSendRequestException(RemotingHelper.parseChannelRemoteAddr(channel), e)); return future; } diff --git a/remoting/src/main/java/org/apache/rocketmq/remoting/netty/NettyRemotingClient.java b/remoting/src/main/java/org/apache/rocketmq/remoting/netty/NettyRemotingClient.java index 41976122b2f..ae82b09edaf 100644 --- a/remoting/src/main/java/org/apache/rocketmq/remoting/netty/NettyRemotingClient.java +++ b/remoting/src/main/java/org/apache/rocketmq/remoting/netty/NettyRemotingClient.java @@ -49,7 +49,6 @@ import java.net.InetSocketAddress; import java.net.SocketAddress; import java.security.cert.CertificateException; -import java.time.Duration; import java.util.ArrayList; import java.util.Collections; import java.util.HashMap; @@ -416,14 +415,14 @@ public void closeChannel(final String addr, final Channel channel) { boolean removeItemFromTable = true; final ChannelWrapper prevCW = this.channelTables.get(addrRemote); - LOGGER.info("closeChannel: begin close the channel[{}] Found: {}", addrRemote, prevCW != null); + LOGGER.info("closeChannel: begin close the channel[addr={}, id={}] Found: {}", addrRemote, channel.id(), prevCW != null); if (null == prevCW) { - LOGGER.info("closeChannel: the channel[{}] has been removed from the channel table before", addrRemote); + LOGGER.info("closeChannel: the channel[addr={}, id={}] has been removed from the channel table before", addrRemote, channel.id()); removeItemFromTable = false; } else if (prevCW.isWrapperOf(channel)) { - LOGGER.info("closeChannel: the channel[{}] has been closed before, and has been created again, nothing to do.", - addrRemote); + LOGGER.info("closeChannel: the channel[addr={}, id={}] has been closed before, and has been created again, nothing to do.", + addrRemote, channel.id()); removeItemFromTable = false; } @@ -432,7 +431,7 @@ public void closeChannel(final String addr, final Channel channel) { if (channelWrapper != null && channelWrapper.tryClose(channel)) { this.channelTables.remove(addrRemote); } - LOGGER.info("closeChannel: the channel[{}] was removed from channel table", addrRemote); + LOGGER.info("closeChannel: the channel[addr={}, id={}] was removed from channel table", addrRemote, channel.id()); } RemotingHelper.closeChannel(channel); @@ -471,7 +470,7 @@ public void closeChannel(final Channel channel) { } if (null == prevCW) { - LOGGER.info("eventCloseChannel: the channel[{}] has been removed from the channel table before", addrRemote); + LOGGER.info("eventCloseChannel: the channel[addr={}, id={}] has been removed from the channel table before", RemotingHelper.parseChannelRemoteAddr(channel), channel.id()); removeItemFromTable = false; } @@ -480,11 +479,11 @@ public void closeChannel(final Channel channel) { if (channelWrapper != null && channelWrapper.tryClose(channel)) { this.channelTables.remove(addrRemote); } - LOGGER.info("closeChannel: the channel[{}] was removed from channel table", addrRemote); + LOGGER.info("closeChannel: the channel[addr={}, id={}] was removed from channel table", addrRemote, channel.id()); RemotingHelper.closeChannel(channel); } } catch (Exception e) { - LOGGER.error("closeChannel: close the channel exception", e); + LOGGER.error("closeChannel: close the channel[id={}] exception", channel.id(), e); } finally { this.lockChannelTables.unlock(); } @@ -521,10 +520,11 @@ public void updateNameServerAddressList(List addrs) { this.namesrvAddrList.set(addrs); // should close the channel if choosed addr is not exist. - if (this.namesrvAddrChoosed.get() != null && !addrs.contains(this.namesrvAddrChoosed.get())) { - String namesrvAddr = this.namesrvAddrChoosed.get(); + String chosenNameServerAddr = this.namesrvAddrChoosed.get(); + if (chosenNameServerAddr != null && !addrs.contains(chosenNameServerAddr)) { + namesrvAddrChoosed.compareAndSet(chosenNameServerAddr, null); for (String addr : this.channelTables.keySet()) { - if (addr.contains(namesrvAddr)) { + if (addr.contains(chosenNameServerAddr)) { ChannelWrapper channelWrapper = this.channelTables.get(addr); if (channelWrapper != null) { channelWrapper.close(); @@ -562,9 +562,9 @@ public RemotingCommand invokeSync(String addr, final RemotingCommand request, lo boolean shouldClose = left > MIN_CLOSE_TIMEOUT_MILLIS || left > timeoutMillis / 4; if (nettyClientConfig.isClientCloseSocketIfTimeout() && shouldClose) { this.closeChannel(addr, channel); - LOGGER.warn("invokeSync: close socket because of timeout, {}ms, {}", timeoutMillis, channelRemoteAddr); + LOGGER.warn("invokeSync: close socket because of timeout, {}ms, channel[addr={}, id={}]", timeoutMillis, channelRemoteAddr, channel.id()); } - LOGGER.warn("invokeSync: wait response timeout exception, the channel[{}]", channelRemoteAddr); + LOGGER.warn("invokeSync: wait response timeout exception, the channel[addr={}, id={}]", channelRemoteAddr, channel.id()); throw e; } } else { @@ -819,10 +819,11 @@ public CompletableFuture invokeImpl(final Channel channel, final RemotingCommand response = responseFuture.getResponseCommand(); if (response.getCode() == ResponseCode.GO_AWAY) { if (nettyClientConfig.isEnableReconnectForGoAway()) { + LOGGER.info("Receive go away from channelId={}, channel={}", channel.id(), channel); ChannelWrapper channelWrapper = channelWrapperTables.computeIfPresent(channel, (channel0, channelWrapper0) -> { try { - if (channelWrapper0.reconnect()) { - LOGGER.info("Receive go away from channel {}, recreate the channel", channel0); + if (channelWrapper0.reconnect(channel0)) { + LOGGER.info("Receive go away from channelId={}, channel={}, recreate the channelId={}", channel0.id(), channel0, channelWrapper0.getChannel().id()); channelWrapperTables.put(channelWrapper0.getChannel(), channelWrapper0); } } catch (Throwable t) { @@ -830,10 +831,11 @@ public CompletableFuture invokeImpl(final Channel channel, final } return channelWrapper0; }); - if (channelWrapper != null) { + if (channelWrapper != null && !channelWrapper.isWrapperOf(channel)) { if (nettyClientConfig.isEnableTransparentRetry()) { RemotingCommand retryRequest = RemotingCommand.createRequestCommand(request.getCode(), request.readCustomHeader()); retryRequest.setBody(request.getBody()); + retryRequest.setExtFields(request.getExtFields()); if (channelWrapper.isOK()) { long duration = stopwatch.elapsed(TimeUnit.MILLISECONDS); stopwatch.stop(); @@ -865,6 +867,8 @@ public CompletableFuture invokeImpl(final Channel channel, final return future; } } + } else { + LOGGER.warn("invokeImpl receive GO_AWAY, channelWrapper is null or channel is the same in wrapper, channelId={}", channel.id()); } } } @@ -1002,7 +1006,6 @@ class ChannelWrapper { // only affected by sync or async request, oneway is not included. private ChannelFuture channelToClose; private long lastResponseTime; - private volatile long lastReconnectTimestamp = 0L; private final String channelAddress; public ChannelWrapper(String address, ChannelFuture channelFuture) { @@ -1021,10 +1024,7 @@ public boolean isWritable() { } public boolean isWrapperOf(Channel channel) { - if (this.channelFuture.channel() != null && this.channelFuture.channel() == channel) { - return true; - } - return false; + return this.channelFuture.channel() != null && this.channelFuture.channel() == channel; } private Channel getChannel() { @@ -1052,20 +1052,27 @@ public String getChannelAddress() { return channelAddress; } - public boolean reconnect() { + public boolean reconnect(Channel channel) { + if (!isWrapperOf(channel)) { + LOGGER.warn("channelWrapper has reconnect, so do nothing, now channelId={}, input channelId={}",getChannel().id(), channel.id()); + return false; + } if (lock.writeLock().tryLock()) { try { - if (lastReconnectTimestamp == 0L || System.currentTimeMillis() - lastReconnectTimestamp > Duration.ofSeconds(nettyClientConfig.getMaxReconnectIntervalTimeSeconds()).toMillis()) { + if (isWrapperOf(channel)) { channelToClose = channelFuture; String[] hostAndPort = getHostAndPort(channelAddress); channelFuture = fetchBootstrap(channelAddress) .connect(hostAndPort[0], Integer.parseInt(hostAndPort[1])); - lastReconnectTimestamp = System.currentTimeMillis(); return true; + } else { + LOGGER.warn("channelWrapper has reconnect, so do nothing, now channelId={}, input channelId={}",getChannel().id(), channel.id()); } } finally { lock.writeLock().unlock(); } + } else { + LOGGER.warn("channelWrapper reconnect try lock fail, now channelId={}", getChannel().id()); } return false; } @@ -1152,7 +1159,7 @@ public void connect(ChannelHandlerContext ctx, SocketAddress remoteAddress, Sock @Override public void channelActive(ChannelHandlerContext ctx) throws Exception { final String remoteAddress = RemotingHelper.parseChannelRemoteAddr(ctx.channel()); - LOGGER.info("NETTY CLIENT PIPELINE: ACTIVE, {}", remoteAddress); + LOGGER.info("NETTY CLIENT PIPELINE: ACTIVE, {}, channelId={}", remoteAddress, ctx.channel().id()); super.channelActive(ctx); if (NettyRemotingClient.this.channelEventListener != null) { @@ -1175,7 +1182,7 @@ public void disconnect(ChannelHandlerContext ctx, ChannelPromise promise) throws @Override public void close(ChannelHandlerContext ctx, ChannelPromise promise) throws Exception { final String remoteAddress = RemotingHelper.parseChannelRemoteAddr(ctx.channel()); - LOGGER.info("NETTY CLIENT PIPELINE: CLOSE {}", remoteAddress); + LOGGER.info("NETTY CLIENT PIPELINE: CLOSE channel[addr={}, id={}]", remoteAddress, ctx.channel().id()); closeChannel(ctx.channel()); super.close(ctx, promise); NettyRemotingClient.this.failFast(ctx.channel()); @@ -1187,7 +1194,7 @@ public void close(ChannelHandlerContext ctx, ChannelPromise promise) throws Exce @Override public void channelInactive(ChannelHandlerContext ctx) throws Exception { final String remoteAddress = RemotingHelper.parseChannelRemoteAddr(ctx.channel()); - LOGGER.info("NETTY CLIENT PIPELINE: channelInactive, the channel[{}]", remoteAddress); + LOGGER.info("NETTY CLIENT PIPELINE: channelInactive, the channel[addr={}, id={}]", remoteAddress, ctx.channel().id()); closeChannel(ctx.channel()); super.channelInactive(ctx); } @@ -1198,7 +1205,7 @@ public void userEventTriggered(ChannelHandlerContext ctx, Object evt) throws Exc IdleStateEvent event = (IdleStateEvent) evt; if (event.state().equals(IdleState.ALL_IDLE)) { final String remoteAddress = RemotingHelper.parseChannelRemoteAddr(ctx.channel()); - LOGGER.warn("NETTY CLIENT PIPELINE: IDLE exception [{}]", remoteAddress); + LOGGER.warn("NETTY CLIENT PIPELINE: IDLE exception channel[addr={}, id={}]", remoteAddress, ctx.channel().id()); closeChannel(ctx.channel()); if (NettyRemotingClient.this.channelEventListener != null) { NettyRemotingClient.this @@ -1213,8 +1220,7 @@ public void userEventTriggered(ChannelHandlerContext ctx, Object evt) throws Exc @Override public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) throws Exception { final String remoteAddress = RemotingHelper.parseChannelRemoteAddr(ctx.channel()); - LOGGER.warn("NETTY CLIENT PIPELINE: exceptionCaught {}", remoteAddress); - LOGGER.warn("NETTY CLIENT PIPELINE: exceptionCaught exception.", cause); + LOGGER.warn("NETTY CLIENT PIPELINE: exceptionCaught channel[addr={}, id={}]", remoteAddress, ctx.channel().id(), cause); closeChannel(ctx.channel()); if (NettyRemotingClient.this.channelEventListener != null) { NettyRemotingClient.this.putNettyEvent(new NettyEvent(NettyEventType.EXCEPTION, remoteAddress, ctx.channel())); diff --git a/remoting/src/main/java/org/apache/rocketmq/remoting/netty/NettyRemotingServer.java b/remoting/src/main/java/org/apache/rocketmq/remoting/netty/NettyRemotingServer.java index 51f8b85009e..cbf25c23c60 100644 --- a/remoting/src/main/java/org/apache/rocketmq/remoting/netty/NettyRemotingServer.java +++ b/remoting/src/main/java/org/apache/rocketmq/remoting/netty/NettyRemotingServer.java @@ -270,8 +270,9 @@ public void run(Timeout timeout) { */ protected ChannelPipeline configChannel(SocketChannel ch) { return ch.pipeline() - .addLast(defaultEventExecutorGroup, HANDSHAKE_HANDLER_NAME, new HandshakeHandler()) - .addLast(defaultEventExecutorGroup, + .addLast(nettyServerConfig.isServerNettyWorkerGroupEnable() ? defaultEventExecutorGroup : null, + HANDSHAKE_HANDLER_NAME, new HandshakeHandler()) + .addLast(nettyServerConfig.isServerNettyWorkerGroupEnable() ? defaultEventExecutorGroup : null, encoder, new NettyDecoder(), distributionHandler, @@ -782,16 +783,16 @@ public void channelRead(ChannelHandlerContext ctx, Object msg) throws Exception private void handleWithMessage(HAProxyMessage msg, Channel channel) { try { if (StringUtils.isNotBlank(msg.sourceAddress())) { - channel.attr(AttributeKeys.PROXY_PROTOCOL_ADDR).set(msg.sourceAddress()); + RemotingHelper.setPropertyToAttr(channel, AttributeKeys.PROXY_PROTOCOL_ADDR, msg.sourceAddress()); } if (msg.sourcePort() > 0) { - channel.attr(AttributeKeys.PROXY_PROTOCOL_PORT).set(String.valueOf(msg.sourcePort())); + RemotingHelper.setPropertyToAttr(channel, AttributeKeys.PROXY_PROTOCOL_PORT, String.valueOf(msg.sourcePort())); } if (StringUtils.isNotBlank(msg.destinationAddress())) { - channel.attr(AttributeKeys.PROXY_PROTOCOL_SERVER_ADDR).set(msg.destinationAddress()); + RemotingHelper.setPropertyToAttr(channel, AttributeKeys.PROXY_PROTOCOL_SERVER_ADDR, msg.destinationAddress()); } if (msg.destinationPort() > 0) { - channel.attr(AttributeKeys.PROXY_PROTOCOL_SERVER_PORT).set(String.valueOf(msg.destinationPort())); + RemotingHelper.setPropertyToAttr(channel, AttributeKeys.PROXY_PROTOCOL_SERVER_PORT, String.valueOf(msg.destinationPort())); } if (CollectionUtils.isNotEmpty(msg.tlvs())) { msg.tlvs().forEach(tlv -> { @@ -811,6 +812,6 @@ protected void handleHAProxyTLV(HAProxyTLV tlv, Channel channel) { } AttributeKey key = AttributeKeys.valueOf( HAProxyConstants.PROXY_PROTOCOL_TLV_PREFIX + String.format("%02x", tlv.typeByteValue())); - channel.attr(key).set(new String(valueBytes, CharsetUtil.UTF_8)); + RemotingHelper.setPropertyToAttr(channel, key, new String(valueBytes, CharsetUtil.UTF_8)); } } diff --git a/remoting/src/main/java/org/apache/rocketmq/remoting/netty/NettyServerConfig.java b/remoting/src/main/java/org/apache/rocketmq/remoting/netty/NettyServerConfig.java index 6564404b920..664dee8371c 100644 --- a/remoting/src/main/java/org/apache/rocketmq/remoting/netty/NettyServerConfig.java +++ b/remoting/src/main/java/org/apache/rocketmq/remoting/netty/NettyServerConfig.java @@ -36,6 +36,7 @@ public class NettyServerConfig implements Cloneable { private int writeBufferHighWaterMark = NettySystemConfig.writeBufferHighWaterMark; private int writeBufferLowWaterMark = NettySystemConfig.writeBufferLowWaterMark; private int serverSocketBacklog = NettySystemConfig.socketBacklog; + private boolean serverNettyWorkerGroupEnable = true; private boolean serverPooledByteBufAllocatorEnable = true; private boolean enableShutdownGracefully = false; @@ -175,6 +176,14 @@ public void setWriteBufferHighWaterMark(int writeBufferHighWaterMark) { this.writeBufferHighWaterMark = writeBufferHighWaterMark; } + public boolean isServerNettyWorkerGroupEnable() { + return serverNettyWorkerGroupEnable; + } + + public void setServerNettyWorkerGroupEnable(boolean serverNettyWorkerGroupEnable) { + this.serverNettyWorkerGroupEnable = serverNettyWorkerGroupEnable; + } + public boolean isEnableShutdownGracefully() { return enableShutdownGracefully; } diff --git a/remoting/src/main/java/org/apache/rocketmq/remoting/protocol/ForbiddenType.java b/remoting/src/main/java/org/apache/rocketmq/remoting/protocol/ForbiddenType.java index 0701dc57fc5..7c561f5721a 100644 --- a/remoting/src/main/java/org/apache/rocketmq/remoting/protocol/ForbiddenType.java +++ b/remoting/src/main/java/org/apache/rocketmq/remoting/protocol/ForbiddenType.java @@ -37,11 +37,11 @@ public interface ForbiddenType { */ int TOPIC_FORBIDDEN = 3; /** - * 4=forbidden by brocasting mode + * 4=forbidden by broadcasting mode */ int BROADCASTING_DISABLE_FORBIDDEN = 4; /** - * 5=forbidden for a substription(group with a topic) + * 5=forbidden for a subscription(group with a topic) */ int SUBSCRIPTION_FORBIDDEN = 5; diff --git a/remoting/src/main/java/org/apache/rocketmq/remoting/protocol/RequestCode.java b/remoting/src/main/java/org/apache/rocketmq/remoting/protocol/RequestCode.java index f45ff6fa484..cfc5cc22785 100644 --- a/remoting/src/main/java/org/apache/rocketmq/remoting/protocol/RequestCode.java +++ b/remoting/src/main/java/org/apache/rocketmq/remoting/protocol/RequestCode.java @@ -217,6 +217,7 @@ public class RequestCode { public static final int GET_SUBSCRIPTIONGROUP_CONFIG = 352; public static final int UPDATE_AND_GET_GROUP_FORBIDDEN = 353; + public static final int CHECK_ROCKSDB_CQ_WRITE_PROGRESS = 354; public static final int LITE_PULL_MESSAGE = 361; diff --git a/remoting/src/main/java/org/apache/rocketmq/remoting/protocol/body/CheckRocksdbCqWriteProgressResponseBody.java b/remoting/src/main/java/org/apache/rocketmq/remoting/protocol/body/CheckRocksdbCqWriteProgressResponseBody.java new file mode 100644 index 00000000000..76719ac1a24 --- /dev/null +++ b/remoting/src/main/java/org/apache/rocketmq/remoting/protocol/body/CheckRocksdbCqWriteProgressResponseBody.java @@ -0,0 +1,35 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.rocketmq.remoting.protocol.body; + +import org.apache.rocketmq.remoting.protocol.RemotingSerializable; + +public class CheckRocksdbCqWriteProgressResponseBody extends RemotingSerializable { + + String diffResult; + + public String getDiffResult() { + return diffResult; + } + + public void setDiffResult(String diffResult) { + this.diffResult = diffResult; + } + + +} diff --git a/remoting/src/main/java/org/apache/rocketmq/remoting/protocol/header/CheckRocksdbCqWriteProgressRequestHeader.java b/remoting/src/main/java/org/apache/rocketmq/remoting/protocol/header/CheckRocksdbCqWriteProgressRequestHeader.java new file mode 100644 index 00000000000..fee158b4976 --- /dev/null +++ b/remoting/src/main/java/org/apache/rocketmq/remoting/protocol/header/CheckRocksdbCqWriteProgressRequestHeader.java @@ -0,0 +1,47 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.rocketmq.remoting.protocol.header; + +import org.apache.rocketmq.common.action.Action; +import org.apache.rocketmq.common.action.RocketMQAction; +import org.apache.rocketmq.common.resource.ResourceType; +import org.apache.rocketmq.common.resource.RocketMQResource; +import org.apache.rocketmq.remoting.CommandCustomHeader; +import org.apache.rocketmq.remoting.annotation.CFNotNull; +import org.apache.rocketmq.remoting.exception.RemotingCommandException; +import org.apache.rocketmq.remoting.protocol.RequestCode; + +@RocketMQAction(value = RequestCode.CHECK_ROCKSDB_CQ_WRITE_PROGRESS, action = Action.GET) +public class CheckRocksdbCqWriteProgressRequestHeader implements CommandCustomHeader { + + @CFNotNull + @RocketMQResource(ResourceType.TOPIC) + private String topic; + + @Override + public void checkFields() throws RemotingCommandException { + + } + + public String getTopic() { + return topic; + } + + public void setTopic(String topic) { + this.topic = topic; + } +} diff --git a/remoting/src/main/java/org/apache/rocketmq/remoting/protocol/header/ResetOffsetRequestHeader.java b/remoting/src/main/java/org/apache/rocketmq/remoting/protocol/header/ResetOffsetRequestHeader.java index de9432ca515..f72fe57136c 100644 --- a/remoting/src/main/java/org/apache/rocketmq/remoting/protocol/header/ResetOffsetRequestHeader.java +++ b/remoting/src/main/java/org/apache/rocketmq/remoting/protocol/header/ResetOffsetRequestHeader.java @@ -31,11 +31,11 @@ public class ResetOffsetRequestHeader extends TopicQueueRequestHeader { @CFNotNull @RocketMQResource(ResourceType.GROUP) - private String topic; + private String group; @CFNotNull @RocketMQResource(ResourceType.TOPIC) - private String group; + private String topic; private int queueId = -1; diff --git a/remoting/src/main/java/org/apache/rocketmq/remoting/rpc/RpcClientImpl.java b/remoting/src/main/java/org/apache/rocketmq/remoting/rpc/RpcClientImpl.java index bca2d79d995..c8b404dd696 100644 --- a/remoting/src/main/java/org/apache/rocketmq/remoting/rpc/RpcClientImpl.java +++ b/remoting/src/main/java/org/apache/rocketmq/remoting/rpc/RpcClientImpl.java @@ -174,6 +174,7 @@ public void operationSucceed(RemotingCommand response) { PullMessageResponseHeader responseHeader = (PullMessageResponseHeader) response.decodeCommandCustomHeader(PullMessageResponseHeader.class); rpcResponsePromise.setSuccess(new RpcResponse(response.getCode(), responseHeader, response.getBody())); + break; default: RpcResponse rpcResponse = new RpcResponse(new RpcException(response.getCode(), "unexpected remote response code")); rpcResponsePromise.setSuccess(rpcResponse); diff --git a/srvutil/pom.xml b/srvutil/pom.xml index 562a5ea2a33..f6c5b3f54d6 100644 --- a/srvutil/pom.xml +++ b/srvutil/pom.xml @@ -19,7 +19,7 @@ org.apache.rocketmq rocketmq-all - 5.3.1-SNAPSHOT + 5.3.2-SNAPSHOT 4.0.0 diff --git a/store/pom.xml b/store/pom.xml index 6de01626772..d49de5ae267 100644 --- a/store/pom.xml +++ b/store/pom.xml @@ -19,7 +19,7 @@ org.apache.rocketmq rocketmq-all - 5.3.1-SNAPSHOT + 5.3.2-SNAPSHOT 4.0.0 diff --git a/store/src/main/java/org/apache/rocketmq/store/AllocateMappedFileService.java b/store/src/main/java/org/apache/rocketmq/store/AllocateMappedFileService.java index 3dbc274ef00..d9cd602a65c 100644 --- a/store/src/main/java/org/apache/rocketmq/store/AllocateMappedFileService.java +++ b/store/src/main/java/org/apache/rocketmq/store/AllocateMappedFileService.java @@ -132,7 +132,7 @@ public void shutdown() { super.shutdown(true); for (AllocateRequest req : this.requestTable.values()) { if (req.mappedFile != null) { - log.info("delete pre allocated maped file, {}", req.mappedFile.getFileName()); + log.info("delete pre allocated mapped file, {}", req.mappedFile.getFileName()); req.mappedFile.destroy(1000); } } diff --git a/store/src/main/java/org/apache/rocketmq/store/CommitLog.java b/store/src/main/java/org/apache/rocketmq/store/CommitLog.java index f707d8fbd87..972e71aadd8 100644 --- a/store/src/main/java/org/apache/rocketmq/store/CommitLog.java +++ b/store/src/main/java/org/apache/rocketmq/store/CommitLog.java @@ -61,6 +61,7 @@ import org.apache.rocketmq.store.ha.HAService; import org.apache.rocketmq.store.ha.autoswitch.AutoSwitchHAService; import org.apache.rocketmq.store.logfile.MappedFile; +import org.apache.rocketmq.store.queue.MultiDispatchUtils; import org.apache.rocketmq.store.util.LibC; import org.rocksdb.RocksDBException; @@ -1834,12 +1835,13 @@ class DefaultAppendMessageCallback implements AppendMessageCallback { private static final int END_FILE_MIN_BLANK_LENGTH = 4 + 4; // Store the message content private final ByteBuffer msgStoreItemMemory; - private final int crc32ReservedLength = enabledAppendPropCRC ? CommitLog.CRC32_RESERVED_LEN : 0; + private final int crc32ReservedLength; private final MessageStoreConfig messageStoreConfig; DefaultAppendMessageCallback(MessageStoreConfig messageStoreConfig) { this.msgStoreItemMemory = ByteBuffer.allocate(END_FILE_MIN_BLANK_LENGTH); this.messageStoreConfig = messageStoreConfig; + this.crc32ReservedLength = messageStoreConfig.isEnabledAppendPropCRC() ? CommitLog.CRC32_RESERVED_LEN : 0; } public AppendMessageResult handlePropertiesForLmqMsg(ByteBuffer preEncodeBuffer, @@ -1902,7 +1904,7 @@ public AppendMessageResult doAppend(final long fileFromOffset, final ByteBuffer // STORETIMESTAMP + STOREHOSTADDRESS + OFFSET
ByteBuffer preEncodeBuffer = msgInner.getEncodedBuff(); - boolean isMultiDispatchMsg = messageStoreConfig.isEnableMultiDispatch() && CommitLog.isMultiDispatchMsg(msgInner); + final boolean isMultiDispatchMsg = CommitLog.isMultiDispatchMsg(messageStoreConfig, msgInner); if (isMultiDispatchMsg) { AppendMessageResult appendMessageResult = handlePropertiesForLmqMsg(preEncodeBuffer, msgInner); if (appendMessageResult != null) { @@ -2243,8 +2245,9 @@ public FlushManager getFlushManager() { return flushManager; } - public static boolean isMultiDispatchMsg(MessageExtBrokerInner msg) { - return StringUtils.isNoneBlank(msg.getProperty(MessageConst.PROPERTY_INNER_MULTI_DISPATCH)) && !msg.getTopic().startsWith(MixAll.RETRY_GROUP_TOPIC_PREFIX); + public static boolean isMultiDispatchMsg(MessageStoreConfig messageStoreConfig, MessageExtBrokerInner msg) { + return StringUtils.isNotBlank(msg.getProperty(MessageConst.PROPERTY_INNER_MULTI_DISPATCH)) && + MultiDispatchUtils.isNeedHandleMultiDispatch(messageStoreConfig, msg.getTopic()); } private boolean isCloseReadAhead() { diff --git a/store/src/main/java/org/apache/rocketmq/store/DefaultMessageStore.java b/store/src/main/java/org/apache/rocketmq/store/DefaultMessageStore.java index f159c31a7be..8b46c7f5ce4 100644 --- a/store/src/main/java/org/apache/rocketmq/store/DefaultMessageStore.java +++ b/store/src/main/java/org/apache/rocketmq/store/DefaultMessageStore.java @@ -163,11 +163,13 @@ public class DefaultMessageStore implements MessageStore { private volatile boolean shutdown = true; protected boolean notifyMessageArriveInBatch = false; - private StoreCheckpoint storeCheckpoint; + protected StoreCheckpoint storeCheckpoint; private TimerMessageStore timerMessageStore; private final LinkedList dispatcherList; + private RocksDBMessageStore rocksDBMessageStore; + private RandomAccessFile lockFile; private FileLock lock; @@ -354,12 +356,7 @@ public boolean load() { } if (result) { - this.storeCheckpoint = - new StoreCheckpoint( - StorePathConfigHelper.getStoreCheckpoint(this.messageStoreConfig.getStorePathRootDir())); - this.masterFlushedOffset = this.storeCheckpoint.getMasterFlushedOffset(); - setConfirmOffset(this.storeCheckpoint.getConfirmPhyOffset()); - + loadCheckPoint(); result = this.indexService.load(lastExitOK); this.recover(lastExitOK); LOGGER.info("message store recover end, and the max phy offset = {}", this.getMaxPhyOffset()); @@ -381,6 +378,14 @@ public boolean load() { return result; } + public void loadCheckPoint() throws IOException { + this.storeCheckpoint = + new StoreCheckpoint( + StorePathConfigHelper.getStoreCheckpoint(this.messageStoreConfig.getStorePathRootDir())); + this.masterFlushedOffset = this.storeCheckpoint.getMasterFlushedOffset(); + setConfirmOffset(this.storeCheckpoint.getConfirmPhyOffset()); + } + /** * @throws Exception */ @@ -511,6 +516,10 @@ public void shutdown() { this.compactionService.shutdown(); } + if (messageStoreConfig.isRocksdbCQDoubleWriteEnable()) { + this.rocksDBMessageStore.consumeQueueStore.shutdown(); + } + this.flushConsumeQueueService.shutdown(); this.allocateMappedFileService.shutdown(); this.storeCheckpoint.flush(); @@ -985,7 +994,7 @@ public long getMaxOffsetInQueue(String topic, int queueId) { @Override public long getMaxOffsetInQueue(String topic, int queueId, boolean committed) { if (committed) { - ConsumeQueueInterface logic = this.findConsumeQueue(topic, queueId); + ConsumeQueueInterface logic = this.getConsumeQueue(topic, queueId); if (logic != null) { return logic.getMaxOffsetInQueue(); } @@ -1021,7 +1030,7 @@ public void setTimerMessageStore(TimerMessageStore timerMessageStore) { @Override public long getCommitLogOffsetInQueue(String topic, int queueId, long consumeQueueOffset) { - ConsumeQueueInterface consumeQueue = findConsumeQueue(topic, queueId); + ConsumeQueueInterface consumeQueue = getConsumeQueue(topic, queueId); if (consumeQueue != null) { CqUnit cqUnit = consumeQueue.get(consumeQueueOffset); if (cqUnit != null) { @@ -1157,7 +1166,7 @@ public boolean getLastMappedFile(long startOffset) { @Override public long getEarliestMessageTime(String topic, int queueId) { - ConsumeQueueInterface logicQueue = this.findConsumeQueue(topic, queueId); + ConsumeQueueInterface logicQueue = this.getConsumeQueue(topic, queueId); if (logicQueue != null) { Pair pair = logicQueue.getEarliestUnitAndStoreTime(); if (pair != null && pair.getObject2() != null) { @@ -1189,7 +1198,7 @@ public long getEarliestMessageTime() { @Override public long getMessageStoreTimeStamp(String topic, int queueId, long consumeQueueOffset) { - ConsumeQueueInterface logicQueue = this.findConsumeQueue(topic, queueId); + ConsumeQueueInterface logicQueue = this.getConsumeQueue(topic, queueId); if (logicQueue != null) { Pair pair = logicQueue.getCqUnitAndStoreTime(consumeQueueOffset); if (pair != null && pair.getObject2() != null) { @@ -1207,12 +1216,12 @@ public CompletableFuture getMessageStoreTimeStampAsync(String topic, int q @Override public long getMessageTotalInQueue(String topic, int queueId) { - ConsumeQueueInterface logicQueue = this.findConsumeQueue(topic, queueId); + ConsumeQueueInterface logicQueue = this.getConsumeQueue(topic, queueId); if (logicQueue != null) { return logicQueue.getMessageTotalInQueue(); } - return -1; + return 0; } @Override @@ -1496,7 +1505,7 @@ public boolean checkInDiskByConsumeOffset(final String topic, final int queueId, final long maxOffsetPy = this.commitLog.getMaxOffset(); - ConsumeQueueInterface consumeQueue = findConsumeQueue(topic, queueId); + ConsumeQueueInterface consumeQueue = getConsumeQueue(topic, queueId); if (consumeQueue != null) { CqUnit cqUnit = consumeQueue.get(consumeOffset); @@ -1512,7 +1521,7 @@ public boolean checkInDiskByConsumeOffset(final String topic, final int queueId, @Override public boolean checkInMemByConsumeOffset(final String topic, final int queueId, long consumeOffset, int batchSize) { - ConsumeQueueInterface consumeQueue = findConsumeQueue(topic, queueId); + ConsumeQueueInterface consumeQueue = getConsumeQueue(topic, queueId); if (consumeQueue != null) { CqUnit firstCQItem = consumeQueue.get(consumeOffset); if (firstCQItem == null) { @@ -3251,6 +3260,17 @@ public HARuntimeInfo getHARuntimeInfo() { } } + public void enableRocksdbCQWrite() { + try { + RocksDBMessageStore store = new RocksDBMessageStore(this.messageStoreConfig, this.brokerStatsManager, this.messageArrivingListener, this.brokerConfig, this.topicConfigTable); + this.rocksDBMessageStore = store; + store.loadAndStartConsumerServiceOnly(); + addDispatcher(store.getDispatcherBuildRocksdbConsumeQueue()); + } catch (Exception e) { + LOGGER.error("enableRocksdbCqWrite error", e); + } + } + public int getMaxDelayLevel() { return maxDelayLevel; } @@ -3338,4 +3358,12 @@ public boolean isTransientStorePoolEnable() { public long getReputFromOffset() { return this.reputMessageService.getReputFromOffset(); } + + public RocksDBMessageStore getRocksDBMessageStore() { + return this.rocksDBMessageStore; + } + + public ConsumeQueueStoreInterface getConsumeQueueStore() { + return consumeQueueStore; + } } diff --git a/store/src/main/java/org/apache/rocketmq/store/MessageExtEncoder.java b/store/src/main/java/org/apache/rocketmq/store/MessageExtEncoder.java index 20e9a652b7e..5c74918d9e6 100644 --- a/store/src/main/java/org/apache/rocketmq/store/MessageExtEncoder.java +++ b/store/src/main/java/org/apache/rocketmq/store/MessageExtEncoder.java @@ -175,7 +175,7 @@ public PutMessageResult encodeWithoutProperties(MessageExtBrokerInner msgInner) public PutMessageResult encode(MessageExtBrokerInner msgInner) { this.byteBuf.clear(); - if (messageStoreConfig.isEnableMultiDispatch() && CommitLog.isMultiDispatchMsg(msgInner)) { + if (CommitLog.isMultiDispatchMsg(messageStoreConfig, msgInner)) { return encodeWithoutProperties(msgInner); } diff --git a/store/src/main/java/org/apache/rocketmq/store/RocksDBMessageStore.java b/store/src/main/java/org/apache/rocketmq/store/RocksDBMessageStore.java index 6141b778bf7..21f8d45c9d9 100644 --- a/store/src/main/java/org/apache/rocketmq/store/RocksDBMessageStore.java +++ b/store/src/main/java/org/apache/rocketmq/store/RocksDBMessageStore.java @@ -16,16 +16,16 @@ */ package org.apache.rocketmq.store; +import io.opentelemetry.api.common.AttributesBuilder; +import io.opentelemetry.api.metrics.Meter; import java.io.IOException; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ConcurrentMap; import java.util.function.Supplier; - -import io.opentelemetry.api.common.AttributesBuilder; -import io.opentelemetry.api.metrics.Meter; import org.apache.rocketmq.common.BrokerConfig; import org.apache.rocketmq.common.TopicConfig; import org.apache.rocketmq.common.UtilAll; +import org.apache.rocketmq.common.sysflag.MessageSysFlag; import org.apache.rocketmq.store.config.MessageStoreConfig; import org.apache.rocketmq.store.config.StorePathConfigHelper; import org.apache.rocketmq.store.metrics.DefaultStoreMetricsManager; @@ -39,6 +39,8 @@ public class RocksDBMessageStore extends DefaultMessageStore { + private CommitLogDispatcherBuildRocksdbConsumeQueue dispatcherBuildRocksdbConsumeQueue; + public RocksDBMessageStore(final MessageStoreConfig messageStoreConfig, final BrokerStatsManager brokerStatsManager, final MessageArrivingListener messageArrivingListener, final BrokerConfig brokerConfig, final ConcurrentMap topicConfigTable) throws IOException { @@ -166,16 +168,46 @@ public void run() { } } - @Override - public long estimateMessageCount(String topic, int queueId, long from, long to, MessageFilter filter) { - // todo - return 0; - } - @Override public void initMetrics(Meter meter, Supplier attributesBuilderSupplier) { DefaultStoreMetricsManager.init(meter, attributesBuilderSupplier, this); // Also add some metrics for rocksdb's monitoring. RocksDBStoreMetricsManager.init(meter, attributesBuilderSupplier, this); } + + public CommitLogDispatcherBuildRocksdbConsumeQueue getDispatcherBuildRocksdbConsumeQueue() { + return dispatcherBuildRocksdbConsumeQueue; + } + + class CommitLogDispatcherBuildRocksdbConsumeQueue implements CommitLogDispatcher { + @Override + public void dispatch(DispatchRequest request) throws RocksDBException { + final int tranType = MessageSysFlag.getTransactionValue(request.getSysFlag()); + switch (tranType) { + case MessageSysFlag.TRANSACTION_NOT_TYPE: + case MessageSysFlag.TRANSACTION_COMMIT_TYPE: + putMessagePositionInfo(request); + break; + case MessageSysFlag.TRANSACTION_PREPARED_TYPE: + case MessageSysFlag.TRANSACTION_ROLLBACK_TYPE: + break; + } + } + } + + public void loadAndStartConsumerServiceOnly() { + try { + this.dispatcherBuildRocksdbConsumeQueue = new CommitLogDispatcherBuildRocksdbConsumeQueue(); + boolean loadResult = this.consumeQueueStore.load(); + if (!loadResult) { + throw new RuntimeException("load consume queue failed"); + } + super.loadCheckPoint(); + this.consumeQueueStore.start(); + } catch (Exception e) { + ERROR_LOG.error("loadAndStartConsumerServiceOnly error", e); + throw new RuntimeException(e); + } + } + } diff --git a/store/src/main/java/org/apache/rocketmq/store/config/MessageStoreConfig.java b/store/src/main/java/org/apache/rocketmq/store/config/MessageStoreConfig.java index 0b45d92418e..5195868e0f1 100644 --- a/store/src/main/java/org/apache/rocketmq/store/config/MessageStoreConfig.java +++ b/store/src/main/java/org/apache/rocketmq/store/config/MessageStoreConfig.java @@ -424,6 +424,53 @@ public class MessageStoreConfig { private boolean putConsumeQueueDataByFileChannel = true; + private boolean transferOffsetJsonToRocksdb = false; + + private boolean rocksdbCQDoubleWriteEnable = false; + + private int batchWriteKvCqSize = 16; + + /** + * If ConsumeQueueStore is RocksDB based, this option is to configure bottom-most tier compression type. + * The following values are valid: + *

    + *
  • snappy
  • + *
  • z
  • + *
  • bzip2
  • + *
  • lz4
  • + *
  • lz4hc
  • + *
  • xpress
  • + *
  • zstd
  • + *
+ * + * LZ4 is the recommended one. + */ + private String bottomMostCompressionTypeForConsumeQueueStore = "zstd"; + + public int getBatchWriteKvCqSize() { + return batchWriteKvCqSize; + } + + public void setBatchWriteKvCqSize(int batchWriteKvCqSize) { + this.batchWriteKvCqSize = batchWriteKvCqSize; + } + + public boolean isRocksdbCQDoubleWriteEnable() { + return rocksdbCQDoubleWriteEnable; + } + + public void setRocksdbCQDoubleWriteEnable(boolean rocksdbWriteEnable) { + this.rocksdbCQDoubleWriteEnable = rocksdbWriteEnable; + } + + public boolean isTransferOffsetJsonToRocksdb() { + return transferOffsetJsonToRocksdb; + } + + public void setTransferOffsetJsonToRocksdb(boolean transferOffsetJsonToRocksdb) { + this.transferOffsetJsonToRocksdb = transferOffsetJsonToRocksdb; + } + public boolean isEnabledAppendPropCRC() { return enabledAppendPropCRC; } @@ -1854,4 +1901,11 @@ public void setTransferMetadataJsonToRocksdb(boolean transferMetadataJsonToRocks this.transferMetadataJsonToRocksdb = transferMetadataJsonToRocksdb; } + public String getBottomMostCompressionTypeForConsumeQueueStore() { + return bottomMostCompressionTypeForConsumeQueueStore; + } + + public void setBottomMostCompressionTypeForConsumeQueueStore(String bottomMostCompressionTypeForConsumeQueueStore) { + this.bottomMostCompressionTypeForConsumeQueueStore = bottomMostCompressionTypeForConsumeQueueStore; + } } diff --git a/store/src/main/java/org/apache/rocketmq/store/plugin/AbstractPluginMessageStore.java b/store/src/main/java/org/apache/rocketmq/store/plugin/AbstractPluginMessageStore.java index 2f2ce981257..2401257c306 100644 --- a/store/src/main/java/org/apache/rocketmq/store/plugin/AbstractPluginMessageStore.java +++ b/store/src/main/java/org/apache/rocketmq/store/plugin/AbstractPluginMessageStore.java @@ -661,4 +661,8 @@ public void recoverTopicQueueTable() { public void notifyMessageArriveIfNecessary(DispatchRequest dispatchRequest) { next.notifyMessageArriveIfNecessary(dispatchRequest); } + + public MessageStore getNext() { + return next; + } } diff --git a/store/src/main/java/org/apache/rocketmq/store/queue/CqUnit.java b/store/src/main/java/org/apache/rocketmq/store/queue/CqUnit.java index b8865fd9195..34f5cb142b6 100644 --- a/store/src/main/java/org/apache/rocketmq/store/queue/CqUnit.java +++ b/store/src/main/java/org/apache/rocketmq/store/queue/CqUnit.java @@ -109,6 +109,7 @@ public String toString() { ", size=" + size + ", pos=" + pos + ", batchNum=" + batchNum + + ", tagsCode=" + tagsCode + ", compactedOffset=" + compactedOffset + '}'; } diff --git a/store/src/main/java/org/apache/rocketmq/store/queue/RocksDBConsumeQueue.java b/store/src/main/java/org/apache/rocketmq/store/queue/RocksDBConsumeQueue.java index 5a981bb4df1..83ba7bebad0 100644 --- a/store/src/main/java/org/apache/rocketmq/store/queue/RocksDBConsumeQueue.java +++ b/store/src/main/java/org/apache/rocketmq/store/queue/RocksDBConsumeQueue.java @@ -18,7 +18,6 @@ import java.nio.ByteBuffer; import java.util.List; - import org.apache.rocketmq.common.BoundaryType; import org.apache.rocketmq.common.Pair; import org.apache.rocketmq.common.attribute.CQType; @@ -223,10 +222,47 @@ public void increaseQueueOffset(QueueOffsetOperator queueOffsetOperator, Message @Override public long estimateMessageCount(long from, long to, MessageFilter filter) { - // todo - return 0; + // Check from and to offset validity + Pair fromUnit = getCqUnitAndStoreTime(from); + if (fromUnit == null) { + return -1; + } + + if (from >= to) { + return -1; + } + + if (to > getMaxOffsetInQueue()) { + to = getMaxOffsetInQueue(); + } + + int maxSampleSize = messageStore.getMessageStoreConfig().getMaxConsumeQueueScan(); + int sampleSize = to - from > maxSampleSize ? maxSampleSize : (int) (to - from); + + int matchThreshold = messageStore.getMessageStoreConfig().getSampleCountThreshold(); + int matchSize = 0; + + for (int i = 0; i < sampleSize; i++) { + long index = from + i; + Pair pair = getCqUnitAndStoreTime(index); + if (pair == null) { + continue; + } + CqUnit cqUnit = pair.getObject1(); + if (filter.isMatchedByConsumeQueue(cqUnit.getTagsCode(), cqUnit.getCqExtUnit())) { + matchSize++; + // if matchSize is plenty, early exit estimate + if (matchSize > matchThreshold) { + sampleSize = i; + break; + } + } + } + // Make sure the second half is a floating point number, otherwise it will be truncated to 0 + return sampleSize == 0 ? 0 : (long) ((to - from) * (matchSize / (sampleSize * 1.0))); } + @Override public long getMinOffsetInQueue() { return this.messageStore.getMinOffsetInQueue(this.topic, this.queueId); @@ -311,7 +347,7 @@ public CqUnit getEarliestUnit() { public CqUnit getLatestUnit() { try { long maxOffset = this.messageStore.getQueueStore().getMaxOffsetInQueue(topic, queueId); - return get(maxOffset); + return get(maxOffset > 0 ? maxOffset - 1 : maxOffset); } catch (RocksDBException e) { ERROR_LOG.error("getLatestUnit Failed. topic: {}, queueId: {}, {}", topic, queueId, e.getMessage()); } diff --git a/store/src/main/java/org/apache/rocketmq/store/queue/RocksDBConsumeQueueStore.java b/store/src/main/java/org/apache/rocketmq/store/queue/RocksDBConsumeQueueStore.java index 3c6b91ec018..17b845d8176 100644 --- a/store/src/main/java/org/apache/rocketmq/store/queue/RocksDBConsumeQueueStore.java +++ b/store/src/main/java/org/apache/rocketmq/store/queue/RocksDBConsumeQueueStore.java @@ -28,7 +28,6 @@ import java.util.concurrent.Executors; import java.util.concurrent.ScheduledExecutorService; import java.util.concurrent.TimeUnit; - import org.apache.commons.io.FileUtils; import org.apache.commons.lang3.StringUtils; import org.apache.rocketmq.common.BoundaryType; @@ -56,7 +55,7 @@ public class RocksDBConsumeQueueStore extends AbstractConsumeQueueStore { public static final byte CTRL_1 = '\u0001'; public static final byte CTRL_2 = '\u0002'; - private static final int BATCH_SIZE = 16; + private final int batchSize; public static final int MAX_KEY_LEN = 300; private final ScheduledExecutorService scheduledExecutorService; @@ -82,15 +81,16 @@ public RocksDBConsumeQueueStore(DefaultMessageStore messageStore) { super(messageStore); this.storePath = StorePathConfigHelper.getStorePathConsumeQueue(messageStoreConfig.getStorePathRootDir()); - this.rocksDBStorage = new ConsumeQueueRocksDBStorage(messageStore, storePath, 4); + this.rocksDBStorage = new ConsumeQueueRocksDBStorage(messageStore, storePath); this.rocksDBConsumeQueueTable = new RocksDBConsumeQueueTable(rocksDBStorage, messageStore); this.rocksDBConsumeQueueOffsetTable = new RocksDBConsumeQueueOffsetTable(rocksDBConsumeQueueTable, rocksDBStorage, messageStore); this.writeBatch = new WriteBatch(); - this.bufferDRList = new ArrayList(BATCH_SIZE); - this.cqBBPairList = new ArrayList(BATCH_SIZE); - this.offsetBBPairList = new ArrayList(BATCH_SIZE); - for (int i = 0; i < BATCH_SIZE; i++) { + this.batchSize = messageStoreConfig.getBatchWriteKvCqSize(); + this.bufferDRList = new ArrayList<>(batchSize); + this.cqBBPairList = new ArrayList<>(batchSize); + this.offsetBBPairList = new ArrayList<>(batchSize); + for (int i = 0; i < batchSize; i++) { this.cqBBPairList.add(RocksDBConsumeQueueTable.getCQByteBufferPair()); this.offsetBBPairList.add(RocksDBConsumeQueueOffsetTable.getOffsetByteBufferPair()); } @@ -164,9 +164,10 @@ private boolean shutdownInner() { @Override public void putMessagePositionInfoWrapper(DispatchRequest request) throws RocksDBException { - if (request == null || this.bufferDRList.size() >= BATCH_SIZE) { + if (request == null || this.bufferDRList.size() >= batchSize) { putMessagePosition(); } + if (request != null) { this.bufferDRList.add(request); } diff --git a/store/src/main/java/org/apache/rocketmq/store/queue/RocksDBConsumeQueueTable.java b/store/src/main/java/org/apache/rocketmq/store/queue/RocksDBConsumeQueueTable.java index c7d35fa8c0c..194bd4cca5f 100644 --- a/store/src/main/java/org/apache/rocketmq/store/queue/RocksDBConsumeQueueTable.java +++ b/store/src/main/java/org/apache/rocketmq/store/queue/RocksDBConsumeQueueTable.java @@ -185,6 +185,39 @@ public long binarySearchInCQByTime(String topic, int queueId, long high, long lo long result = -1L; long targetOffset = -1L, leftOffset = -1L, rightOffset = -1L; long ceiling = high, floor = low; + // Handle the following corner cases first: + // 1. store time of (high) < timestamp + ByteBuffer buffer = getCQInKV(topic, queueId, ceiling); + if (buffer != null) { + long storeTime = buffer.getLong(MSG_STORE_TIME_SIZE_OFFSET); + if (storeTime < timestamp) { + switch (boundaryType) { + case LOWER: + return ceiling + 1; + case UPPER: + return ceiling; + default: + log.warn("Unknown boundary type"); + break; + } + } + } + // 2. store time of (low) > timestamp + buffer = getCQInKV(topic, queueId, floor); + if (buffer != null) { + long storeTime = buffer.getLong(MSG_STORE_TIME_SIZE_OFFSET); + if (storeTime > timestamp) { + switch (boundaryType) { + case LOWER: + return floor; + case UPPER: + return 0; + default: + log.warn("Unknown boundary type"); + break; + } + } + } while (high >= low) { long midOffset = low + ((high - low) >>> 1); ByteBuffer byteBuffer = getCQInKV(topic, queueId, midOffset); diff --git a/store/src/main/java/org/apache/rocketmq/store/rocksdb/ConsumeQueueRocksDBStorage.java b/store/src/main/java/org/apache/rocketmq/store/rocksdb/ConsumeQueueRocksDBStorage.java index 362684560c8..b343a5b4b50 100644 --- a/store/src/main/java/org/apache/rocketmq/store/rocksdb/ConsumeQueueRocksDBStorage.java +++ b/store/src/main/java/org/apache/rocketmq/store/rocksdb/ConsumeQueueRocksDBStorage.java @@ -16,53 +16,45 @@ */ package org.apache.rocketmq.store.rocksdb; +import java.nio.charset.StandardCharsets; import java.util.ArrayList; import java.util.List; import org.apache.rocketmq.common.UtilAll; import org.apache.rocketmq.common.config.AbstractRocksDBStorage; -import org.apache.rocketmq.common.utils.DataConverter; import org.apache.rocketmq.store.MessageStore; import org.rocksdb.ColumnFamilyDescriptor; import org.rocksdb.ColumnFamilyHandle; import org.rocksdb.ColumnFamilyOptions; -import org.rocksdb.CompactRangeOptions; import org.rocksdb.ReadOptions; import org.rocksdb.RocksDB; import org.rocksdb.RocksDBException; import org.rocksdb.RocksIterator; import org.rocksdb.WriteBatch; -import org.rocksdb.WriteOptions; public class ConsumeQueueRocksDBStorage extends AbstractRocksDBStorage { + + public static final byte[] OFFSET_COLUMN_FAMILY = "offset".getBytes(StandardCharsets.UTF_8); + private final MessageStore messageStore; private volatile ColumnFamilyHandle offsetCFHandle; - public ConsumeQueueRocksDBStorage(final MessageStore messageStore, final String dbPath, final int prefixLen) { + public ConsumeQueueRocksDBStorage(final MessageStore messageStore, final String dbPath) { + super(dbPath); this.messageStore = messageStore; - this.dbPath = dbPath; this.readOnly = false; } - private void initOptions() { + protected void initOptions() { this.options = RocksDBOptionsFactory.createDBOptions(); + super.initOptions(); + } - this.writeOptions = new WriteOptions(); - this.writeOptions.setSync(false); - this.writeOptions.setDisableWAL(true); - this.writeOptions.setNoSlowdown(true); - + @Override + protected void initTotalOrderReadOptions() { this.totalOrderReadOptions = new ReadOptions(); this.totalOrderReadOptions.setPrefixSameAsStart(false); this.totalOrderReadOptions.setTotalOrderSeek(false); - - this.compactRangeOptions = new CompactRangeOptions(); - this.compactRangeOptions.setBottommostLevelCompaction(CompactRangeOptions.BottommostLevelCompaction.kForce); - this.compactRangeOptions.setAllowWriteStall(true); - this.compactRangeOptions.setExclusiveManualCompaction(false); - this.compactRangeOptions.setChangeLevel(true); - this.compactRangeOptions.setTargetLevel(-1); - this.compactRangeOptions.setMaxSubcompactions(4); } @Override @@ -72,7 +64,7 @@ protected boolean postLoad() { initOptions(); - final List cfDescriptors = new ArrayList(); + final List cfDescriptors = new ArrayList<>(); ColumnFamilyOptions cqCfOptions = RocksDBOptionsFactory.createCQCFOptions(this.messageStore); this.cfOptions.add(cqCfOptions); @@ -80,11 +72,8 @@ protected boolean postLoad() { ColumnFamilyOptions offsetCfOptions = RocksDBOptionsFactory.createOffsetCFOptions(); this.cfOptions.add(offsetCfOptions); - cfDescriptors.add(new ColumnFamilyDescriptor("offset".getBytes(DataConverter.CHARSET_UTF8), offsetCfOptions)); - - final List cfHandles = new ArrayList(); - open(cfDescriptors, cfHandles); - + cfDescriptors.add(new ColumnFamilyDescriptor(OFFSET_COLUMN_FAMILY, offsetCfOptions)); + open(cfDescriptors); this.defaultCFHandle = cfHandles.get(0); this.offsetCFHandle = cfHandles.get(1); } catch (final Exception e) { @@ -130,4 +119,4 @@ public RocksIterator seekOffsetCF() { public ColumnFamilyHandle getOffsetCFHandle() { return this.offsetCFHandle; } -} \ No newline at end of file +} diff --git a/store/src/main/java/org/apache/rocketmq/store/rocksdb/RocksDBOptionsFactory.java b/store/src/main/java/org/apache/rocketmq/store/rocksdb/RocksDBOptionsFactory.java index a3a99d3346c..d373ba6249c 100644 --- a/store/src/main/java/org/apache/rocketmq/store/rocksdb/RocksDBOptionsFactory.java +++ b/store/src/main/java/org/apache/rocketmq/store/rocksdb/RocksDBOptionsFactory.java @@ -16,7 +16,7 @@ */ package org.apache.rocketmq.store.rocksdb; -import org.apache.rocketmq.common.config.ConfigRocksDBStorage; +import org.apache.rocketmq.common.config.ConfigHelper; import org.apache.rocketmq.store.MessageStore; import org.rocksdb.BlockBasedTableConfig; import org.rocksdb.BloomFilter; @@ -65,13 +65,16 @@ public static ColumnFamilyOptions createCQCFOptions(final MessageStore messageSt setMaxMergeWidth(Integer.MAX_VALUE). setStopStyle(CompactionStopStyle.CompactionStopStyleTotalSize). setCompressionSizePercent(-1); + String bottomMostCompressionTypeOpt = messageStore.getMessageStoreConfig() + .getBottomMostCompressionTypeForConsumeQueueStore(); + CompressionType bottomMostCompressionType = CompressionType.getCompressionType(bottomMostCompressionTypeOpt); return columnFamilyOptions.setMaxWriteBufferNumber(4). setWriteBufferSize(128 * SizeUnit.MB). setMinWriteBufferNumberToMerge(1). setTableFormatConfig(blockBasedTableConfig). setMemTableConfig(new SkipListMemTableConfig()). setCompressionType(CompressionType.LZ4_COMPRESSION). - setBottommostCompressionType(CompressionType.ZSTD_COMPRESSION). + setBottommostCompressionType(bottomMostCompressionType). setNumLevels(7). setCompactionStyle(CompactionStyle.UNIVERSAL). setCompactionOptionsUniversal(compactionOption). @@ -134,7 +137,7 @@ public static DBOptions createDBOptions() { Statistics statistics = new Statistics(); statistics.setStatsLevel(StatsLevel.EXCEPT_DETAILED_TIMERS); return options. - setDbLogDir(ConfigRocksDBStorage.getDBLogDir()). + setDbLogDir(ConfigHelper.getDBLogDir()). setInfoLogLevel(InfoLogLevel.INFO_LEVEL). setWalRecoveryMode(WALRecoveryMode.PointInTimeRecovery). setManualWalFlush(true). @@ -144,9 +147,9 @@ public static DBOptions createDBOptions() { setCreateIfMissing(true). setCreateMissingColumnFamilies(true). setMaxOpenFiles(-1). - setMaxLogFileSize(1 * SizeUnit.GB). + setMaxLogFileSize(SizeUnit.GB). setKeepLogFileNum(5). - setMaxManifestFileSize(1 * SizeUnit.GB). + setMaxManifestFileSize(SizeUnit.GB). setAllowConcurrentMemtableWrite(false). setStatistics(statistics). setAtomicFlush(true). diff --git a/store/src/main/java/org/apache/rocketmq/store/util/PerfCounter.java b/store/src/main/java/org/apache/rocketmq/store/util/PerfCounter.java index e2a55d63994..99649398a83 100644 --- a/store/src/main/java/org/apache/rocketmq/store/util/PerfCounter.java +++ b/store/src/main/java/org/apache/rocketmq/store/util/PerfCounter.java @@ -356,7 +356,7 @@ public void run() { } } catch (Exception e) { - logger.error("{} get unknown errror", getServiceName(), e); + logger.error("{} get unknown error", getServiceName(), e); try { Thread.sleep(1000); } catch (Throwable ignored) { diff --git a/store/src/test/java/org/apache/rocketmq/store/queue/ConsumeQueueTest.java b/store/src/test/java/org/apache/rocketmq/store/queue/ConsumeQueueTest.java index c3c8be52ddd..bf3b1eeca83 100644 --- a/store/src/test/java/org/apache/rocketmq/store/queue/ConsumeQueueTest.java +++ b/store/src/test/java/org/apache/rocketmq/store/queue/ConsumeQueueTest.java @@ -22,6 +22,7 @@ import java.util.UUID; import java.util.concurrent.ConcurrentHashMap; import org.apache.rocketmq.common.BrokerConfig; +import org.apache.rocketmq.common.MixAll; import org.apache.rocketmq.common.UtilAll; import org.apache.rocketmq.common.attribute.CQType; import org.apache.rocketmq.common.message.MessageDecoder; @@ -31,6 +32,7 @@ import org.apache.rocketmq.store.DispatchRequest; import org.apache.rocketmq.store.MessageFilter; import org.apache.rocketmq.store.MessageStore; +import org.apache.rocketmq.store.RocksDBMessageStore; import org.apache.rocketmq.store.config.MessageStoreConfig; import org.apache.rocketmq.store.stats.BrokerStatsManager; import org.junit.Assert; @@ -84,7 +86,26 @@ messageStoreConfig, new BrokerStatsManager(brokerConfig), return master; } - protected void putMsg(DefaultMessageStore messageStore) throws Exception { + protected RocksDBMessageStore genRocksdbMessageStore() throws Exception { + MessageStoreConfig messageStoreConfig = buildStoreConfig( + COMMIT_LOG_FILE_SIZE, CQ_FILE_SIZE, true, CQ_EXT_FILE_SIZE + ); + + BrokerConfig brokerConfig = new BrokerConfig(); + + RocksDBMessageStore master = new RocksDBMessageStore( + messageStoreConfig, new BrokerStatsManager(brokerConfig), + (topic, queueId, logicOffset, tagsCode, msgStoreTime, filterBitMap, properties) -> { + }, brokerConfig, new ConcurrentHashMap<>()); + + assertThat(master.load()).isTrue(); + + master.start(); + + return master; + } + + protected void putMsg(MessageStore messageStore) { int totalMsgs = 200; for (int i = 0; i < totalMsgs; i++) { MessageExtBrokerInner message = buildMessage(); @@ -184,9 +205,33 @@ public void testIterator() throws Exception { @Test public void testEstimateMessageCountInEmptyConsumeQueue() { - DefaultMessageStore master = null; + DefaultMessageStore messageStore = null; + try { + messageStore = gen(); + doTestEstimateMessageCountInEmptyConsumeQueue(messageStore); + } catch (Exception e) { + e.printStackTrace(); + assertThat(Boolean.FALSE).isTrue(); + } + } + + @Test + public void testEstimateRocksdbMessageCountInEmptyConsumeQueue() { + if (notExecuted()) { + return; + } + DefaultMessageStore messageStore = null; + try { + messageStore = genRocksdbMessageStore(); + doTestEstimateMessageCountInEmptyConsumeQueue(messageStore); + } catch (Exception e) { + e.printStackTrace(); + assertThat(Boolean.FALSE).isTrue(); + } + } + + public void doTestEstimateMessageCountInEmptyConsumeQueue(MessageStore master) { try { - master = gen(); ConsumeQueueInterface consumeQueue = master.findConsumeQueue(TOPIC, QUEUE_ID); MessageFilter filter = new MessageFilter() { @Override @@ -219,16 +264,34 @@ public boolean isMatchedByCommitLog(ByteBuffer msgBuffer, Map pr } } + @Test + public void testEstimateRocksdbMessageCount() { + if (notExecuted()) { + return; + } + DefaultMessageStore messageStore = null; + try { + messageStore = genRocksdbMessageStore(); + doTestEstimateMessageCount(messageStore); + } catch (Exception e) { + e.printStackTrace(); + assertThat(Boolean.FALSE).isTrue(); + } + } + @Test public void testEstimateMessageCount() { DefaultMessageStore messageStore = null; try { messageStore = gen(); + doTestEstimateMessageCount(messageStore); } catch (Exception e) { e.printStackTrace(); assertThat(Boolean.FALSE).isTrue(); } + } + public void doTestEstimateMessageCount(MessageStore messageStore) { try { try { putMsg(messageStore); @@ -265,15 +328,34 @@ public boolean isMatchedByCommitLog(ByteBuffer msgBuffer, Map pr } } + @Test + public void testEstimateRocksdbMessageCountSample() { + if (notExecuted()) { + return; + } + DefaultMessageStore messageStore = null; + try { + messageStore = genRocksdbMessageStore(); + doTestEstimateMessageCountSample(messageStore); + } catch (Exception e) { + e.printStackTrace(); + assertThat(Boolean.FALSE).isTrue(); + } + } + @Test public void testEstimateMessageCountSample() { DefaultMessageStore messageStore = null; try { messageStore = gen(); + doTestEstimateMessageCountSample(messageStore); } catch (Exception e) { e.printStackTrace(); assertThat(Boolean.FALSE).isTrue(); } + } + + public void doTestEstimateMessageCountSample(MessageStore messageStore) { try { try { @@ -303,4 +385,8 @@ public boolean isMatchedByCommitLog(ByteBuffer msgBuffer, Map pr UtilAll.deleteFile(new File(STORE_PATH)); } } + + private boolean notExecuted() { + return MixAll.isMac(); + } } diff --git a/store/src/test/java/org/apache/rocketmq/store/queue/RocksDBConsumeQueueTableTest.java b/store/src/test/java/org/apache/rocketmq/store/queue/RocksDBConsumeQueueTableTest.java new file mode 100644 index 00000000000..d06b6da2fbd --- /dev/null +++ b/store/src/test/java/org/apache/rocketmq/store/queue/RocksDBConsumeQueueTableTest.java @@ -0,0 +1,75 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.rocketmq.store.queue; + +import org.apache.rocketmq.common.BoundaryType; +import org.apache.rocketmq.common.MixAll; +import org.apache.rocketmq.store.DefaultMessageStore; +import org.apache.rocketmq.store.rocksdb.ConsumeQueueRocksDBStorage; +import org.junit.Test; +import org.mockito.stubbing.Answer; +import org.rocksdb.RocksDBException; + +import java.nio.ByteBuffer; + +import static org.apache.rocketmq.store.queue.RocksDBConsumeQueueTable.CQ_UNIT_SIZE; +import static org.junit.Assert.assertEquals; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.doAnswer; +import static org.mockito.Mockito.mock; + +public class RocksDBConsumeQueueTableTest { + + @Test + public void testBinarySearchInCQByTime() throws RocksDBException { + if (MixAll.isMac()) { + return; + } + ConsumeQueueRocksDBStorage rocksDBStorage = mock(ConsumeQueueRocksDBStorage.class); + DefaultMessageStore store = mock(DefaultMessageStore.class); + RocksDBConsumeQueueTable table = new RocksDBConsumeQueueTable(rocksDBStorage, store); + doAnswer((Answer) mock -> { + /* + * queueOffset timestamp + * 100 1000 + * 200 2000 + * 201 2010 + * 1000 10000 + */ + byte[] keyBytes = mock.getArgument(0); + ByteBuffer keyBuffer = ByteBuffer.wrap(keyBytes); + int len = keyBuffer.getInt(0); + long offset = keyBuffer.getLong(4 + 1 + len + 1 + 4 + 1); + long phyOffset = offset; + long timestamp = offset * 10; + final ByteBuffer byteBuffer = ByteBuffer.allocate(CQ_UNIT_SIZE); + byteBuffer.putLong(phyOffset); + byteBuffer.putInt(1); + byteBuffer.putLong(0); + byteBuffer.putLong(timestamp); + return byteBuffer.array(); + }).when(rocksDBStorage).getCQ(any()); + assertEquals(1001, table.binarySearchInCQByTime("topic", 0, 1000, 100, 20000, 0, BoundaryType.LOWER)); + assertEquals(1000, table.binarySearchInCQByTime("topic", 0, 1000, 100, 20000, 0, BoundaryType.UPPER)); + assertEquals(100, table.binarySearchInCQByTime("topic", 0, 1000, 100, 1, 0, BoundaryType.LOWER)); + assertEquals(0, table.binarySearchInCQByTime("topic", 0, 1000, 100, 1, 0, BoundaryType.UPPER)); + assertEquals(201, table.binarySearchInCQByTime("topic", 0, 1000, 100, 2001, 0, BoundaryType.LOWER)); + assertEquals(200, table.binarySearchInCQByTime("topic", 0, 1000, 100, 2001, 0, BoundaryType.UPPER)); + assertEquals(200, table.binarySearchInCQByTime("topic", 0, 1000, 100, 2000, 0, BoundaryType.LOWER)); + assertEquals(200, table.binarySearchInCQByTime("topic", 0, 1000, 100, 2000, 0, BoundaryType.UPPER)); + } +} \ No newline at end of file diff --git a/store/src/test/java/org/apache/rocketmq/store/rocksdb/RocksDBOptionsFactoryTest.java b/store/src/test/java/org/apache/rocketmq/store/rocksdb/RocksDBOptionsFactoryTest.java new file mode 100644 index 00000000000..1d7273968f6 --- /dev/null +++ b/store/src/test/java/org/apache/rocketmq/store/rocksdb/RocksDBOptionsFactoryTest.java @@ -0,0 +1,34 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.rocketmq.store.rocksdb; + +import org.apache.rocketmq.store.config.MessageStoreConfig; +import org.junit.Assert; +import org.junit.Test; +import org.rocksdb.CompressionType; + +public class RocksDBOptionsFactoryTest { + + @Test + public void testBottomMostCompressionType() { + MessageStoreConfig config = new MessageStoreConfig(); + Assert.assertEquals(CompressionType.ZSTD_COMPRESSION, + CompressionType.getCompressionType(config.getBottomMostCompressionTypeForConsumeQueueStore())); + Assert.assertEquals(CompressionType.LZ4_COMPRESSION, CompressionType.getCompressionType("lz4")); + } +} diff --git a/test/pom.xml b/test/pom.xml index df380a0b604..801a10301eb 100644 --- a/test/pom.xml +++ b/test/pom.xml @@ -20,7 +20,7 @@ rocketmq-all org.apache.rocketmq - 5.3.1-SNAPSHOT + 5.3.2-SNAPSHOT 4.0.0 diff --git a/test/src/main/java/org/apache/rocketmq/test/client/rmq/RMQBroadCastConsumer.java b/test/src/main/java/org/apache/rocketmq/test/client/rmq/RMQBroadCastConsumer.java index 2a596197441..7ac5ec39786 100644 --- a/test/src/main/java/org/apache/rocketmq/test/client/rmq/RMQBroadCastConsumer.java +++ b/test/src/main/java/org/apache/rocketmq/test/client/rmq/RMQBroadCastConsumer.java @@ -26,8 +26,8 @@ public class RMQBroadCastConsumer extends RMQNormalConsumer { private static Logger logger = LoggerFactory.getLogger(RMQBroadCastConsumer.class); public RMQBroadCastConsumer(String nsAddr, String topic, String subExpression, - String consumerGroup, AbstractListener listner) { - super(nsAddr, topic, subExpression, consumerGroup, listner); + String consumerGroup, AbstractListener listener) { + super(nsAddr, topic, subExpression, consumerGroup, listener); } @Override diff --git a/test/src/main/java/org/apache/rocketmq/test/clientinterface/AbstractMQConsumer.java b/test/src/main/java/org/apache/rocketmq/test/clientinterface/AbstractMQConsumer.java index 5681ecc841a..22193bb4ba9 100644 --- a/test/src/main/java/org/apache/rocketmq/test/clientinterface/AbstractMQConsumer.java +++ b/test/src/main/java/org/apache/rocketmq/test/clientinterface/AbstractMQConsumer.java @@ -69,8 +69,8 @@ public AbstractListener getListener() { return listener; } - public void setListener(AbstractListener listner) { - this.listener = listner; + public void setListener(AbstractListener listener) { + this.listener = listener; } public String getNsAddr() { diff --git a/test/src/test/java/org/apache/rocketmq/test/base/BaseConf.java b/test/src/test/java/org/apache/rocketmq/test/base/BaseConf.java index b64cda33420..472e106ce35 100644 --- a/test/src/test/java/org/apache/rocketmq/test/base/BaseConf.java +++ b/test/src/test/java/org/apache/rocketmq/test/base/BaseConf.java @@ -100,8 +100,8 @@ public class BaseConf { brokerController2.getBrokerConfig().getListenPort()); brokerController3 = IntegrationTestBase.createAndStartBroker(NAMESRV_ADDR); - log.debug("Broker {} started, listening: {}", brokerController2.getBrokerConfig().getBrokerName(), - brokerController2.getBrokerConfig().getListenPort()); + log.debug("Broker {} started, listening: {}", brokerController3.getBrokerConfig().getBrokerName(), + brokerController3.getBrokerConfig().getListenPort()); CLUSTER_NAME = brokerController1.getBrokerConfig().getBrokerClusterName(); BROKER1_NAME = brokerController1.getBrokerConfig().getBrokerName(); diff --git a/test/src/test/java/org/apache/rocketmq/test/base/IntegrationTestBase.java b/test/src/test/java/org/apache/rocketmq/test/base/IntegrationTestBase.java index 2217936929c..fde991ad13d 100644 --- a/test/src/test/java/org/apache/rocketmq/test/base/IntegrationTestBase.java +++ b/test/src/test/java/org/apache/rocketmq/test/base/IntegrationTestBase.java @@ -136,6 +136,8 @@ public static BrokerController createAndStartBroker(String nsAddr) { brokerConfig.setNamesrvAddr(nsAddr); brokerConfig.setEnablePropertyFilter(true); brokerConfig.setEnableCalcFilterBitMap(true); + brokerConfig.setAppendAckAsync(true); + brokerConfig.setAppendCkAsync(true); storeConfig.setEnableConsumeQueueExt(true); brokerConfig.setLoadBalancePollNameServerInterval(500); storeConfig.setStorePathRootDir(baseDir); diff --git a/test/src/test/java/org/apache/rocketmq/test/client/consumer/balance/NormalMsgDynamicBalanceIT.java b/test/src/test/java/org/apache/rocketmq/test/client/consumer/balance/NormalMsgDynamicBalanceIT.java index 684b718ae5d..7408a092c4b 100644 --- a/test/src/test/java/org/apache/rocketmq/test/client/consumer/balance/NormalMsgDynamicBalanceIT.java +++ b/test/src/test/java/org/apache/rocketmq/test/client/consumer/balance/NormalMsgDynamicBalanceIT.java @@ -96,6 +96,8 @@ public void test3ConsumerAndCrashOne() { MQWait.waitConsumeAll(CONSUME_TIME, producer.getAllMsgBody(), consumer1.getListener(), consumer2.getListener(), consumer3.getListener()); consumer3.shutdown(); + TestUtils.waitForSeconds(WAIT_TIME); + producer.clearMsg(); consumer1.clearMsg(); consumer2.clearMsg(); diff --git a/test/src/test/java/org/apache/rocketmq/test/route/CreateAndUpdateTopicIT.java b/test/src/test/java/org/apache/rocketmq/test/route/CreateAndUpdateTopicIT.java index 9004b91db39..9e9afb1ed2c 100644 --- a/test/src/test/java/org/apache/rocketmq/test/route/CreateAndUpdateTopicIT.java +++ b/test/src/test/java/org/apache/rocketmq/test/route/CreateAndUpdateTopicIT.java @@ -17,13 +17,16 @@ package org.apache.rocketmq.test.route; +import java.util.concurrent.TimeUnit; import org.apache.rocketmq.common.TopicConfig; import org.apache.rocketmq.remoting.protocol.route.TopicRouteData; import org.apache.rocketmq.test.base.BaseConf; import org.apache.rocketmq.test.util.MQAdminTestUtils; +import org.junit.Ignore; import org.junit.Test; import static org.assertj.core.api.Assertions.assertThat; +import static org.awaitility.Awaitility.await; public class CreateAndUpdateTopicIT extends BaseConf { @@ -47,6 +50,8 @@ public void testCreateOrUpdateTopic_EnableSingleTopicRegistration() { } + // Temporarily ignore the fact that this test cannot pass in the integration test pipeline due to unknown reasons + @Ignore @Test public void testDeleteTopicFromNameSrvWithBrokerRegistration() { namesrvController.getNamesrvConfig().setDeleteTopicWithBrokerRegistration(true); @@ -60,11 +65,9 @@ public void testDeleteTopicFromNameSrvWithBrokerRegistration() { boolean createResult = MQAdminTestUtils.createTopic(NAMESRV_ADDR, CLUSTER_NAME, testTopic1, 8, null); assertThat(createResult).isTrue(); - createResult = MQAdminTestUtils.createTopic(NAMESRV_ADDR, CLUSTER_NAME, testTopic2, 8, null); assertThat(createResult).isTrue(); - TopicRouteData route = MQAdminTestUtils.examineTopicRouteInfo(NAMESRV_ADDR, testTopic2); assertThat(route.getBrokerDatas()).hasSize(3); @@ -73,11 +76,13 @@ public void testDeleteTopicFromNameSrvWithBrokerRegistration() { // Deletion is lazy, trigger broker registration brokerController1.registerBrokerAll(false, false, true); - // The route info of testTopic2 will be removed from broker1 after the registration - route = MQAdminTestUtils.examineTopicRouteInfo(NAMESRV_ADDR, testTopic2); - assertThat(route.getBrokerDatas()).hasSize(2); - assertThat(route.getQueueDatas().get(0).getBrokerName()).isEqualTo(BROKER2_NAME); - assertThat(route.getQueueDatas().get(1).getBrokerName()).isEqualTo(BROKER3_NAME); + await().atMost(10, TimeUnit.SECONDS).until(() -> { + // The route info of testTopic2 will be removed from broker1 after the registration + TopicRouteData finalRoute = MQAdminTestUtils.examineTopicRouteInfo(NAMESRV_ADDR, testTopic2); + return finalRoute.getBrokerDatas().size() == 2 + && finalRoute.getQueueDatas().get(0).getBrokerName().equals(BROKER2_NAME) + && finalRoute.getQueueDatas().get(1).getBrokerName().equals(BROKER3_NAME); + }); brokerController1.getBrokerConfig().setEnableSingleTopicRegister(false); brokerController2.getBrokerConfig().setEnableSingleTopicRegister(false); diff --git a/tieredstore/pom.xml b/tieredstore/pom.xml index 96f042da21b..4d9af208187 100644 --- a/tieredstore/pom.xml +++ b/tieredstore/pom.xml @@ -19,7 +19,7 @@ org.apache.rocketmq rocketmq-all - 5.3.1-SNAPSHOT + 5.3.2-SNAPSHOT 4.0.0 diff --git a/tieredstore/src/main/java/org/apache/rocketmq/tieredstore/TieredMessageStore.java b/tieredstore/src/main/java/org/apache/rocketmq/tieredstore/TieredMessageStore.java index 7b63e16696e..0e3ede871c3 100644 --- a/tieredstore/src/main/java/org/apache/rocketmq/tieredstore/TieredMessageStore.java +++ b/tieredstore/src/main/java/org/apache/rocketmq/tieredstore/TieredMessageStore.java @@ -180,9 +180,15 @@ public boolean fetchFromCurrentStore(String topic, int queueId, long offset, int } // determine whether tiered storage path conditions are met - if (storageLevel.check(MessageStoreConfig.TieredStorageLevel.NOT_IN_DISK) - && !next.checkInStoreByConsumeOffset(topic, queueId, offset)) { - return true; + if (storageLevel.check(MessageStoreConfig.TieredStorageLevel.NOT_IN_DISK)) { + // return true to read from tiered storage if the CommitLog is empty + if (next != null && next.getCommitLog() != null && + next.getCommitLog().getMinOffset() < 0L) { + return true; + } + if (!next.checkInStoreByConsumeOffset(topic, queueId, offset)) { + return true; + } } if (storageLevel.check(MessageStoreConfig.TieredStorageLevel.NOT_IN_MEM) @@ -208,10 +214,10 @@ public CompletableFuture getMessageAsync(String group, String } if (fetchFromCurrentStore(topic, queueId, offset, maxMsgNums)) { - log.trace("GetMessageAsync from current store, " + + log.trace("GetMessageAsync from remote store, " + "topic: {}, queue: {}, offset: {}, maxCount: {}", topic, queueId, offset, maxMsgNums); } else { - log.trace("GetMessageAsync from remote store, " + + log.trace("GetMessageAsync from next store, " + "topic: {}, queue: {}, offset: {}, maxCount: {}", topic, queueId, offset, maxMsgNums); return next.getMessageAsync(group, topic, queueId, offset, maxMsgNums, messageFilter); } diff --git a/tieredstore/src/main/java/org/apache/rocketmq/tieredstore/index/IndexStoreService.java b/tieredstore/src/main/java/org/apache/rocketmq/tieredstore/index/IndexStoreService.java index 020b9f3b068..0db5dc5c4c5 100644 --- a/tieredstore/src/main/java/org/apache/rocketmq/tieredstore/index/IndexStoreService.java +++ b/tieredstore/src/main/java/org/apache/rocketmq/tieredstore/index/IndexStoreService.java @@ -42,8 +42,6 @@ import org.apache.rocketmq.store.logfile.MappedFile; import org.apache.rocketmq.tieredstore.MessageStoreConfig; import org.apache.rocketmq.tieredstore.common.AppendResult; -import org.apache.rocketmq.tieredstore.exception.TieredStoreErrorCode; -import org.apache.rocketmq.tieredstore.exception.TieredStoreException; import org.apache.rocketmq.tieredstore.file.FlatAppendFile; import org.apache.rocketmq.tieredstore.file.FlatFileFactory; import org.apache.rocketmq.tieredstore.provider.FileSegment; @@ -271,23 +269,23 @@ public CompletableFuture> queryAsync( public void forceUpload() { try { readWriteLock.writeLock().lock(); - if (this.currentWriteFile == null) { - log.warn("IndexStoreService no need force upload current write file"); - return; - } - // note: current file has been shutdown before - IndexStoreFile lastFile = new IndexStoreFile(storeConfig, currentWriteFile.getTimestamp()); - if (this.doCompactThenUploadFile(lastFile)) { - this.setCompactTimestamp(lastFile.getTimestamp()); - } else { - throw new TieredStoreException( - TieredStoreErrorCode.UNKNOWN, "IndexStoreService force compact current file error"); + while (true) { + Map.Entry entry = + this.timeStoreTable.higherEntry(this.compactTimestamp.get()); + if (entry == null) { + break; + } + if (this.doCompactThenUploadFile(entry.getValue())) { + this.setCompactTimestamp(entry.getValue().getTimestamp()); + // The total number of files will not too much, prevent io too fast. + TimeUnit.MILLISECONDS.sleep(50); + } } } catch (Exception e) { log.error("IndexStoreService force upload error", e); throw new RuntimeException(e); } finally { - readWriteLock.writeLock().lock(); + readWriteLock.writeLock().unlock(); } } @@ -393,19 +391,13 @@ protected IndexFile getNextSealedFile() { @Override public void shutdown() { super.shutdown(); - readWriteLock.writeLock().lock(); - try { - for (Map.Entry entry : timeStoreTable.entrySet()) { - entry.getValue().shutdown(); - } - if (!autoCreateNewFile) { - this.forceUpload(); + // Wait index service upload then clear time store table + while (!this.timeStoreTable.isEmpty()) { + try { + TimeUnit.MILLISECONDS.sleep(50); + } catch (InterruptedException e) { + throw new RuntimeException(e); } - this.timeStoreTable.clear(); - } catch (Exception e) { - log.error("IndexStoreService shutdown error", e); - } finally { - readWriteLock.writeLock().unlock(); } } @@ -424,6 +416,18 @@ public void run() { } this.waitForRunning(TimeUnit.SECONDS.toMillis(10)); } + readWriteLock.writeLock().lock(); + try { + if (autoCreateNewFile) { + this.forceUpload(); + } + this.timeStoreTable.forEach((timestamp, file) -> file.shutdown()); + this.timeStoreTable.clear(); + } catch (Exception e) { + log.error("IndexStoreService shutdown error", e); + } finally { + readWriteLock.writeLock().unlock(); + } log.info(this.getServiceName() + " service shutdown"); } } diff --git a/tieredstore/src/test/java/org/apache/rocketmq/tieredstore/index/IndexStoreServiceTest.java b/tieredstore/src/test/java/org/apache/rocketmq/tieredstore/index/IndexStoreServiceTest.java index fb563f7c6c2..83b407e73ba 100644 --- a/tieredstore/src/test/java/org/apache/rocketmq/tieredstore/index/IndexStoreServiceTest.java +++ b/tieredstore/src/test/java/org/apache/rocketmq/tieredstore/index/IndexStoreServiceTest.java @@ -120,7 +120,7 @@ public void doConvertOldFormatTest() throws IOException { indexService = new IndexStoreService(fileAllocator, filePath); indexService.start(); ConcurrentSkipListMap timeStoreTable = indexService.getTimeStoreTable(); - Assert.assertEquals(1, timeStoreTable.size()); + Assert.assertEquals(2, timeStoreTable.size()); Assert.assertEquals(Long.valueOf(timestamp), timeStoreTable.firstKey()); mappedFile.destroy(10 * 1000); } @@ -232,7 +232,7 @@ public void restartServiceTest() throws InterruptedException { indexService = new IndexStoreService(fileAllocator, filePath); indexService.start(); Assert.assertEquals(timestamp, indexService.getTimeStoreTable().firstKey().longValue()); - Assert.assertEquals(2, indexService.getTimeStoreTable().size()); + Assert.assertEquals(4, indexService.getTimeStoreTable().size()); Assert.assertEquals(IndexFile.IndexStatusEnum.UPLOAD, indexService.getTimeStoreTable().firstEntry().getValue().getFileStatus()); } diff --git a/tools/pom.xml b/tools/pom.xml index ee459dfd95a..ab740bd8a70 100644 --- a/tools/pom.xml +++ b/tools/pom.xml @@ -19,7 +19,7 @@ org.apache.rocketmq rocketmq-all - 5.3.1-SNAPSHOT + 5.3.2-SNAPSHOT 4.0.0 diff --git a/tools/src/main/java/org/apache/rocketmq/tools/admin/DefaultMQAdminExt.java b/tools/src/main/java/org/apache/rocketmq/tools/admin/DefaultMQAdminExt.java index 6ebee1d0dd1..3686bf2644b 100644 --- a/tools/src/main/java/org/apache/rocketmq/tools/admin/DefaultMQAdminExt.java +++ b/tools/src/main/java/org/apache/rocketmq/tools/admin/DefaultMQAdminExt.java @@ -52,6 +52,7 @@ import org.apache.rocketmq.remoting.protocol.body.ConsumeStatsList; import org.apache.rocketmq.remoting.protocol.body.ConsumerConnection; import org.apache.rocketmq.remoting.protocol.body.ConsumerRunningInfo; +import org.apache.rocketmq.remoting.protocol.body.CheckRocksdbCqWriteProgressResponseBody; import org.apache.rocketmq.remoting.protocol.body.EpochEntryCache; import org.apache.rocketmq.remoting.protocol.body.GroupList; import org.apache.rocketmq.remoting.protocol.body.HARuntimeInfo; @@ -771,6 +772,12 @@ public QueryConsumeQueueResponseBody queryConsumeQueue(String brokerAddr, String ); } + @Override + public CheckRocksdbCqWriteProgressResponseBody checkRocksdbCqWriteProgress(String brokerAddr, String topic) + throws InterruptedException, RemotingTimeoutException, RemotingSendRequestException, RemotingConnectException, MQClientException { + return this.defaultMQAdminExtImpl.checkRocksdbCqWriteProgress(brokerAddr, topic); + } + @Override public boolean resumeCheckHalfMessage(String topic, String msgId) diff --git a/tools/src/main/java/org/apache/rocketmq/tools/admin/DefaultMQAdminExtImpl.java b/tools/src/main/java/org/apache/rocketmq/tools/admin/DefaultMQAdminExtImpl.java index dc4d35e7049..883dcbe41d7 100644 --- a/tools/src/main/java/org/apache/rocketmq/tools/admin/DefaultMQAdminExtImpl.java +++ b/tools/src/main/java/org/apache/rocketmq/tools/admin/DefaultMQAdminExtImpl.java @@ -90,6 +90,7 @@ import org.apache.rocketmq.remoting.protocol.body.ConsumeStatsList; import org.apache.rocketmq.remoting.protocol.body.ConsumerConnection; import org.apache.rocketmq.remoting.protocol.body.ConsumerRunningInfo; +import org.apache.rocketmq.remoting.protocol.body.CheckRocksdbCqWriteProgressResponseBody; import org.apache.rocketmq.remoting.protocol.body.EpochEntryCache; import org.apache.rocketmq.remoting.protocol.body.GroupList; import org.apache.rocketmq.remoting.protocol.body.HARuntimeInfo; @@ -1817,6 +1818,12 @@ public QueryConsumeQueueResponseBody queryConsumeQueue(String brokerAddr, String return this.mqClientInstance.getMQClientAPIImpl().queryConsumeQueue(brokerAddr, topic, queueId, index, count, consumerGroup, timeoutMillis); } + @Override + public CheckRocksdbCqWriteProgressResponseBody checkRocksdbCqWriteProgress(String brokerAddr, String topic) + throws InterruptedException, RemotingTimeoutException, RemotingSendRequestException, RemotingConnectException, MQClientException { + return this.mqClientInstance.getMQClientAPIImpl().checkRocksdbCqWriteProgress(brokerAddr, topic, timeoutMillis); + } + @Override public boolean resumeCheckHalfMessage(final String topic, final String msgId) throws RemotingException, MQClientException, InterruptedException, MQBrokerException { diff --git a/tools/src/main/java/org/apache/rocketmq/tools/admin/MQAdminExt.java b/tools/src/main/java/org/apache/rocketmq/tools/admin/MQAdminExt.java index ff78f22c704..09204ab7be2 100644 --- a/tools/src/main/java/org/apache/rocketmq/tools/admin/MQAdminExt.java +++ b/tools/src/main/java/org/apache/rocketmq/tools/admin/MQAdminExt.java @@ -48,6 +48,7 @@ import org.apache.rocketmq.remoting.protocol.body.ConsumeStatsList; import org.apache.rocketmq.remoting.protocol.body.ConsumerConnection; import org.apache.rocketmq.remoting.protocol.body.ConsumerRunningInfo; +import org.apache.rocketmq.remoting.protocol.body.CheckRocksdbCqWriteProgressResponseBody; import org.apache.rocketmq.remoting.protocol.body.EpochEntryCache; import org.apache.rocketmq.remoting.protocol.body.GroupList; import org.apache.rocketmq.remoting.protocol.body.HARuntimeInfo; @@ -148,6 +149,8 @@ ConsumeStats examineConsumeStats( final String consumerGroup) throws RemotingException, MQClientException, InterruptedException, MQBrokerException; + CheckRocksdbCqWriteProgressResponseBody checkRocksdbCqWriteProgress(String brokerAddr, String topic) throws InterruptedException, RemotingTimeoutException, RemotingSendRequestException, RemotingConnectException, MQClientException; + ConsumeStats examineConsumeStats(final String consumerGroup, final String topic) throws RemotingException, MQClientException, InterruptedException, MQBrokerException; diff --git a/tools/src/main/java/org/apache/rocketmq/tools/command/MQAdminStartup.java b/tools/src/main/java/org/apache/rocketmq/tools/command/MQAdminStartup.java index 43e4259c4e1..313a777ce4f 100644 --- a/tools/src/main/java/org/apache/rocketmq/tools/command/MQAdminStartup.java +++ b/tools/src/main/java/org/apache/rocketmq/tools/command/MQAdminStartup.java @@ -104,6 +104,7 @@ import org.apache.rocketmq.tools.command.offset.ResetOffsetByTimeCommand; import org.apache.rocketmq.tools.command.offset.SkipAccumulationSubCommand; import org.apache.rocketmq.tools.command.producer.ProducerSubCommand; +import org.apache.rocketmq.tools.command.queue.CheckRocksdbCqWriteProgressCommand; import org.apache.rocketmq.tools.command.queue.QueryConsumeQueueCommand; import org.apache.rocketmq.tools.command.stats.StatsAllSubCommand; import org.apache.rocketmq.tools.command.topic.AllocateMQSubCommand; @@ -304,6 +305,7 @@ public static void initCommand() { initCommand(new ListAclSubCommand()); initCommand(new CopyAclsSubCommand()); initCommand(new RocksDBConfigToJsonCommand()); + initCommand(new CheckRocksdbCqWriteProgressCommand()); } private static void printHelp() { diff --git a/tools/src/main/java/org/apache/rocketmq/tools/command/export/ExportMetadataInRocksDBCommand.java b/tools/src/main/java/org/apache/rocketmq/tools/command/export/ExportMetadataInRocksDBCommand.java index 1ecb1fa2cd9..c466490b8a8 100644 --- a/tools/src/main/java/org/apache/rocketmq/tools/command/export/ExportMetadataInRocksDBCommand.java +++ b/tools/src/main/java/org/apache/rocketmq/tools/command/export/ExportMetadataInRocksDBCommand.java @@ -14,6 +14,7 @@ * See the License for the specific language governing permissions and * limitations under the License. */ + package org.apache.rocketmq.tools.command.export; import com.alibaba.fastjson.JSONObject; @@ -77,6 +78,7 @@ public void execute(CommandLine commandLine, Options options, RPCHook rpcHook) t } String configType = commandLine.getOptionValue("configType").trim().toLowerCase(); + path += "/" + configType; boolean jsonEnable = false; if (commandLine.hasOption("jsonEnable")) { @@ -86,7 +88,7 @@ public void execute(CommandLine commandLine, Options options, RPCHook rpcHook) t ConfigRocksDBStorage kvStore = new ConfigRocksDBStorage(path, true /* readOnly */); if (!kvStore.start()) { - System.out.print("RocksDB load error, path=" + path + "\n"); + System.out.printf("RocksDB load error, path=%s\n" , path); return; } diff --git a/tools/src/main/java/org/apache/rocketmq/tools/command/message/PrintMessageSubCommand.java b/tools/src/main/java/org/apache/rocketmq/tools/command/message/PrintMessageSubCommand.java index bb82f5079e5..97e101d813c 100644 --- a/tools/src/main/java/org/apache/rocketmq/tools/command/message/PrintMessageSubCommand.java +++ b/tools/src/main/java/org/apache/rocketmq/tools/command/message/PrintMessageSubCommand.java @@ -24,6 +24,7 @@ import org.apache.commons.cli.Options; import org.apache.rocketmq.client.consumer.DefaultMQPullConsumer; import org.apache.rocketmq.client.consumer.PullResult; +import org.apache.rocketmq.client.impl.FindBrokerResult; import org.apache.rocketmq.common.MixAll; import org.apache.rocketmq.common.UtilAll; import org.apache.rocketmq.common.message.MessageExt; @@ -97,6 +98,12 @@ public Options buildCommandlineOptions(Options options) { opt.setRequired(false); options.addOption(opt); + opt = + new Option("l", "lmqParentTopic", true, + "Lmq parent topic, lmq is used to find the route."); + opt.setRequired(false); + options.addOption(opt); + return options; } @@ -113,11 +120,20 @@ public void execute(CommandLine commandLine, Options options, RPCHook rpcHook) t String subExpression = !commandLine.hasOption('s') ? "*" : commandLine.getOptionValue('s').trim(); + String lmqParentTopic = + !commandLine.hasOption('l') ? null : commandLine.getOptionValue('l').trim(); + boolean printBody = !commandLine.hasOption('d') || Boolean.parseBoolean(commandLine.getOptionValue('d').trim()); consumer.start(); - Set mqs = consumer.fetchSubscribeMessageQueues(topic); + Set mqs; + if (lmqParentTopic != null) { + mqs = consumer.fetchSubscribeMessageQueues(lmqParentTopic); + mqs.forEach(mq -> mq.setTopic(topic)); + } else { + mqs = consumer.fetchSubscribeMessageQueues(topic); + } for (MessageQueue mq : mqs) { long minOffset = consumer.minOffset(mq); long maxOffset = consumer.maxOffset(mq); @@ -139,6 +155,7 @@ public void execute(CommandLine commandLine, Options options, RPCHook rpcHook) t READQ: for (long offset = minOffset; offset < maxOffset; ) { try { + fillBrokerAddrIfNotExist(consumer, mq, lmqParentTopic); PullResult pullResult = consumer.pull(mq, subExpression, offset, 32); offset = pullResult.getNextBeginOffset(); switch (pullResult.getPullStatus()) { @@ -167,4 +184,17 @@ public void execute(CommandLine commandLine, Options options, RPCHook rpcHook) t consumer.shutdown(); } } + + public void fillBrokerAddrIfNotExist(DefaultMQPullConsumer defaultMQPullConsumer, MessageQueue messageQueue, + String routeTopic) { + + FindBrokerResult findBrokerResult = defaultMQPullConsumer.getDefaultMQPullConsumerImpl().getRebalanceImpl().getmQClientFactory() + .findBrokerAddressInSubscribe(messageQueue.getBrokerName(), 0, false); + if (findBrokerResult == null) { + // use lmq parent topic to fill up broker addr table + defaultMQPullConsumer.getDefaultMQPullConsumerImpl().getRebalanceImpl().getmQClientFactory() + .updateTopicRouteInfoFromNameServer(routeTopic); + } + + } } diff --git a/tools/src/main/java/org/apache/rocketmq/tools/command/metadata/RocksDBConfigToJsonCommand.java b/tools/src/main/java/org/apache/rocketmq/tools/command/metadata/RocksDBConfigToJsonCommand.java index 1d81287ac7d..f2803b0cbb3 100644 --- a/tools/src/main/java/org/apache/rocketmq/tools/command/metadata/RocksDBConfigToJsonCommand.java +++ b/tools/src/main/java/org/apache/rocketmq/tools/command/metadata/RocksDBConfigToJsonCommand.java @@ -17,7 +17,6 @@ package org.apache.rocketmq.tools.command.metadata; -import com.alibaba.fastjson.JSON; import com.alibaba.fastjson.JSONObject; import org.apache.commons.cli.CommandLine; import org.apache.commons.cli.Option; @@ -33,10 +32,13 @@ import java.io.File; import java.util.HashMap; import java.util.Map; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.ConcurrentMap; public class RocksDBConfigToJsonCommand implements SubCommand { private static final String TOPICS_JSON_CONFIG = "topics"; private static final String SUBSCRIPTION_GROUP_JSON_CONFIG = "subscriptionGroups"; + private static final String CONSUMER_OFFSETS_JSON_CONFIG = "consumerOffsets"; @Override public String commandName() { @@ -45,7 +47,7 @@ public String commandName() { @Override public String commandDesc() { - return "Convert RocksDB kv config (topics/subscriptionGroups) to json"; + return "Convert RocksDB kv config (topics/subscriptionGroups/consumerOffsets) to json"; } @Override @@ -56,7 +58,7 @@ public Options buildCommandlineOptions(Options options) { options.addOption(pathOption); Option configTypeOption = new Option("t", "configType", true, "Name of kv config, e.g. " + - "topics/subscriptionGroups"); + "topics/subscriptionGroups/consumerOffsets"); configTypeOption.setRequired(true); options.addOption(configTypeOption); @@ -71,19 +73,21 @@ public void execute(CommandLine commandLine, Options options, RPCHook rpcHook) t return; } - String configType = commandLine.getOptionValue("configType").trim().toLowerCase(); + String configType = commandLine.getOptionValue("configType").trim(); if (!path.endsWith("/")) { path += "/"; } path += configType; - + if (CONSUMER_OFFSETS_JSON_CONFIG.equalsIgnoreCase(configType)) { + printConsumerOffsets(path); + return; + } ConfigRocksDBStorage configRocksDBStorage = new ConfigRocksDBStorage(path, true); configRocksDBStorage.start(); RocksIterator iterator = configRocksDBStorage.iterator(); - try { final Map configMap = new HashMap<>(); - final Map configTable = new HashMap<>(); + final JSONObject configTable = new JSONObject(); iterator.seekToFirst(); while (iterator.isValid()) { final byte[] key = iterator.key(); @@ -95,14 +99,16 @@ public void execute(CommandLine commandLine, Options options, RPCHook rpcHook) t iterator.next(); } byte[] kvDataVersion = configRocksDBStorage.getKvDataVersion(); - configMap.put("dataVersion", - JSONObject.parseObject(new String(kvDataVersion, DataConverter.CHARSET_UTF8))); + if (kvDataVersion != null) { + configMap.put("dataVersion", + JSONObject.parseObject(new String(kvDataVersion, DataConverter.CHARSET_UTF8))); + } - if (TOPICS_JSON_CONFIG.toLowerCase().equals(configType)) { - configMap.put("topicConfigTable", JSON.parseObject(JSONObject.toJSONString(configTable))); + if (TOPICS_JSON_CONFIG.equalsIgnoreCase(configType)) { + configMap.put("topicConfigTable", configTable); } - if (SUBSCRIPTION_GROUP_JSON_CONFIG.toLowerCase().equals(configType)) { - configMap.put("subscriptionGroupTable", JSON.parseObject(JSONObject.toJSONString(configTable))); + if (SUBSCRIPTION_GROUP_JSON_CONFIG.equalsIgnoreCase(configType)) { + configMap.put("subscriptionGroupTable", configTable); } System.out.print(JSONObject.toJSONString(configMap, true) + "\n"); } catch (Exception e) { @@ -111,4 +117,42 @@ public void execute(CommandLine commandLine, Options options, RPCHook rpcHook) t configRocksDBStorage.shutdown(); } } + + private void printConsumerOffsets(String path) { + ConfigRocksDBStorage configRocksDBStorage = new ConfigRocksDBStorage(path, true); + configRocksDBStorage.start(); + RocksIterator iterator = configRocksDBStorage.iterator(); + try { + final Map configMap = new HashMap<>(); + final JSONObject configTable = new JSONObject(); + iterator.seekToFirst(); + while (iterator.isValid()) { + final byte[] key = iterator.key(); + final byte[] value = iterator.value(); + final String name = new String(key, DataConverter.CHARSET_UTF8); + final String config = new String(value, DataConverter.CHARSET_UTF8); + final RocksDBOffsetSerializeWrapper jsonObject = JSONObject.parseObject(config, RocksDBOffsetSerializeWrapper.class); + configTable.put(name, jsonObject.getOffsetTable()); + iterator.next(); + } + configMap.put("offsetTable", configTable); + System.out.print(JSONObject.toJSONString(configMap, true) + "\n"); + } catch (Exception e) { + System.out.print("Error occurred while converting RocksDB kv config to json, " + "configType=consumerOffsets, " + e.getMessage() + "\n"); + } finally { + configRocksDBStorage.shutdown(); + } + } + + static class RocksDBOffsetSerializeWrapper { + private ConcurrentMap offsetTable = new ConcurrentHashMap<>(16); + + public ConcurrentMap getOffsetTable() { + return offsetTable; + } + + public void setOffsetTable(ConcurrentMap offsetTable) { + this.offsetTable = offsetTable; + } + } } \ No newline at end of file diff --git a/tools/src/main/java/org/apache/rocketmq/tools/command/queue/CheckRocksdbCqWriteProgressCommand.java b/tools/src/main/java/org/apache/rocketmq/tools/command/queue/CheckRocksdbCqWriteProgressCommand.java new file mode 100644 index 00000000000..d18a24ee1dc --- /dev/null +++ b/tools/src/main/java/org/apache/rocketmq/tools/command/queue/CheckRocksdbCqWriteProgressCommand.java @@ -0,0 +1,97 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.rocketmq.tools.command.queue; + +import java.util.Map; +import java.util.Set; +import org.apache.commons.cli.CommandLine; +import org.apache.commons.cli.Option; +import org.apache.commons.cli.Options; +import org.apache.commons.lang3.StringUtils; +import org.apache.rocketmq.remoting.RPCHook; +import org.apache.rocketmq.remoting.protocol.body.CheckRocksdbCqWriteProgressResponseBody; +import org.apache.rocketmq.remoting.protocol.body.ClusterInfo; +import org.apache.rocketmq.remoting.protocol.route.BrokerData; +import org.apache.rocketmq.tools.admin.DefaultMQAdminExt; +import org.apache.rocketmq.tools.command.SubCommand; + +public class CheckRocksdbCqWriteProgressCommand implements SubCommand { + + @Override + public String commandName() { + return "checkRocksdbCqWriteProgress"; + } + + @Override + public String commandDesc() { + return "check if rocksdb cq is same as file cq"; + } + + @Override + public Options buildCommandlineOptions(Options options) { + Option opt = new Option("c", "cluster", true, "cluster name"); + opt.setRequired(true); + options.addOption(opt); + + opt = new Option("n", "nameserverAddr", true, "nameserverAddr"); + opt.setRequired(true); + options.addOption(opt); + + opt = new Option("t", "topic", true, "topic name"); + opt.setRequired(false); + options.addOption(opt); + return options; + } + + @Override + public void execute(CommandLine commandLine, Options options, RPCHook rpcHook) { + DefaultMQAdminExt defaultMQAdminExt = new DefaultMQAdminExt(rpcHook); + + defaultMQAdminExt.setInstanceName(Long.toString(System.currentTimeMillis())); + defaultMQAdminExt.setNamesrvAddr(StringUtils.trim(commandLine.getOptionValue('n'))); + String clusterName = commandLine.hasOption('c') ? commandLine.getOptionValue('c').trim() : ""; + String topic = commandLine.hasOption('t') ? commandLine.getOptionValue('t').trim() : ""; + + try { + defaultMQAdminExt.start(); + ClusterInfo clusterInfo = defaultMQAdminExt.examineBrokerClusterInfo(); + Map> clusterAddrTable = clusterInfo.getClusterAddrTable(); + Map brokerAddrTable = clusterInfo.getBrokerAddrTable(); + if (clusterAddrTable.get(clusterName) == null) { + System.out.print("clusterAddrTable is empty"); + return; + } + for (Map.Entry entry : brokerAddrTable.entrySet()) { + String brokerName = entry.getKey(); + BrokerData brokerData = entry.getValue(); + String brokerAddr = brokerData.getBrokerAddrs().get(0L); + CheckRocksdbCqWriteProgressResponseBody body = defaultMQAdminExt.checkRocksdbCqWriteProgress(brokerAddr, topic); + if (StringUtils.isNotBlank(topic)) { + System.out.print(body.getDiffResult()); + } else { + System.out.print(brokerName + " | " + brokerAddr + " | \n" + body.getDiffResult()); + } + } + + } catch (Exception e) { + throw new RuntimeException(this.getClass().getSimpleName() + " command failed", e); + } finally { + defaultMQAdminExt.shutdown(); + } + } +}